2
0

aesni-gcm-x86_64.pl 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099
  1. #! /usr/bin/env perl
  2. # Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. #
  17. # AES-NI-CTR+GHASH stitch.
  18. #
  19. # February 2013
  20. #
  21. # OpenSSL GCM implementation is organized in such way that its
  22. # performance is rather close to the sum of its streamed components,
  23. # in the context parallelized AES-NI CTR and modulo-scheduled
  24. # PCLMULQDQ-enabled GHASH. Unfortunately, as no stitch implementation
  25. # was observed to perform significantly better than the sum of the
  26. # components on contemporary CPUs, the effort was deemed impossible to
  27. # justify. This module is based on combination of Intel submissions,
  28. # [1] and [2], with MOVBE twist suggested by Ilya Albrekht and Max
  29. # Locktyukhin of Intel Corp. who verified that it reduces shuffles
  30. # pressure with notable relative improvement, achieving 1.0 cycle per
  31. # byte processed with 128-bit key on Haswell processor, 0.74 - on
  32. # Broadwell, 0.63 - on Skylake... [Mentioned results are raw profiled
  33. # measurements for favourable packet size, one divisible by 96.
  34. # Applications using the EVP interface will observe a few percent
  35. # worse performance.]
  36. #
  37. # Knights Landing processes 1 byte in 1.25 cycles (measured with EVP).
  38. #
  39. # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
  40. # [2] http://www.intel.com/content/dam/www/public/us/en/documents/software-support/enabling-high-performance-gcm.pdf
  41. $flavour = shift;
  42. $output = shift;
  43. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  44. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  45. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  46. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  47. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  48. die "can't locate x86_64-xlate.pl";
  49. if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  50. =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
  51. $avx = ($1>=2.20) + ($1>=2.22);
  52. }
  53. if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
  54. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
  55. $avx = ($1>=2.09) + ($1>=2.10);
  56. }
  57. if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
  58. `ml64 2>&1` =~ /Version ([0-9]+)\./) {
  59. $avx = ($1>=10) + ($1>=11);
  60. }
  61. if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
  62. $avx = ($2>=3.0) + ($2>3.0);
  63. }
  64. open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
  65. *STDOUT=*OUT;
  66. if ($avx>1) {{{
  67. ($inp,$out,$len,$key,$ivp,$Xip)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
  68. ($Ii,$T1,$T2,$Hkey,
  69. $Z0,$Z1,$Z2,$Z3,$Xi) = map("%xmm$_",(0..8));
  70. ($inout0,$inout1,$inout2,$inout3,$inout4,$inout5,$rndkey) = map("%xmm$_",(9..15));
  71. ($counter,$rounds,$ret,$const,$in0,$end0)=("%ebx","%ebp","%r10","%r11","%r14","%r15");
  72. $code=<<___;
  73. .text
  74. .type _aesni_ctr32_ghash_6x,\@abi-omnipotent
  75. .align 32
  76. _aesni_ctr32_ghash_6x:
  77. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  78. sub \$6,$len
  79. vpxor $Z0,$Z0,$Z0 # $Z0 = 0
  80. vmovdqu 0x00-0x80($key),$rndkey
  81. vpaddb $T2,$T1,$inout1
  82. vpaddb $T2,$inout1,$inout2
  83. vpaddb $T2,$inout2,$inout3
  84. vpaddb $T2,$inout3,$inout4
  85. vpaddb $T2,$inout4,$inout5
  86. vpxor $rndkey,$T1,$inout0
  87. vmovdqu $Z0,16+8(%rsp) # "$Z3" = 0
  88. jmp .Loop6x
  89. .align 32
  90. .Loop6x:
  91. add \$`6<<24`,$counter
  92. jc .Lhandle_ctr32 # discard $inout[1-5]?
  93. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  94. vpaddb $T2,$inout5,$T1 # next counter value
  95. vpxor $rndkey,$inout1,$inout1
  96. vpxor $rndkey,$inout2,$inout2
  97. .Lresume_ctr32:
  98. vmovdqu $T1,($ivp) # save next counter value
  99. vpclmulqdq \$0x10,$Hkey,$Z3,$Z1
  100. vpxor $rndkey,$inout3,$inout3
  101. vmovups 0x10-0x80($key),$T2 # borrow $T2 for $rndkey
  102. vpclmulqdq \$0x01,$Hkey,$Z3,$Z2
  103. xor %r12,%r12
  104. cmp $in0,$end0
  105. vaesenc $T2,$inout0,$inout0
  106. vmovdqu 0x30+8(%rsp),$Ii # I[4]
  107. vpxor $rndkey,$inout4,$inout4
  108. vpclmulqdq \$0x00,$Hkey,$Z3,$T1
  109. vaesenc $T2,$inout1,$inout1
  110. vpxor $rndkey,$inout5,$inout5
  111. setnc %r12b
  112. vpclmulqdq \$0x11,$Hkey,$Z3,$Z3
  113. vaesenc $T2,$inout2,$inout2
  114. vmovdqu 0x10-0x20($Xip),$Hkey # $Hkey^2
  115. neg %r12
  116. vaesenc $T2,$inout3,$inout3
  117. vpxor $Z1,$Z2,$Z2
  118. vpclmulqdq \$0x00,$Hkey,$Ii,$Z1
  119. vpxor $Z0,$Xi,$Xi # modulo-scheduled
  120. vaesenc $T2,$inout4,$inout4
  121. vpxor $Z1,$T1,$Z0
  122. and \$0x60,%r12
  123. vmovups 0x20-0x80($key),$rndkey
  124. vpclmulqdq \$0x10,$Hkey,$Ii,$T1
  125. vaesenc $T2,$inout5,$inout5
  126. vpclmulqdq \$0x01,$Hkey,$Ii,$T2
  127. lea ($in0,%r12),$in0
  128. vaesenc $rndkey,$inout0,$inout0
  129. vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled [vpxor $Z3,$Xi,$Xi]
  130. vpclmulqdq \$0x11,$Hkey,$Ii,$Hkey
  131. vmovdqu 0x40+8(%rsp),$Ii # I[3]
  132. vaesenc $rndkey,$inout1,$inout1
  133. movbe 0x58($in0),%r13
  134. vaesenc $rndkey,$inout2,$inout2
  135. movbe 0x50($in0),%r12
  136. vaesenc $rndkey,$inout3,$inout3
  137. mov %r13,0x20+8(%rsp)
  138. vaesenc $rndkey,$inout4,$inout4
  139. mov %r12,0x28+8(%rsp)
  140. vmovdqu 0x30-0x20($Xip),$Z1 # borrow $Z1 for $Hkey^3
  141. vaesenc $rndkey,$inout5,$inout5
  142. vmovups 0x30-0x80($key),$rndkey
  143. vpxor $T1,$Z2,$Z2
  144. vpclmulqdq \$0x00,$Z1,$Ii,$T1
  145. vaesenc $rndkey,$inout0,$inout0
  146. vpxor $T2,$Z2,$Z2
  147. vpclmulqdq \$0x10,$Z1,$Ii,$T2
  148. vaesenc $rndkey,$inout1,$inout1
  149. vpxor $Hkey,$Z3,$Z3
  150. vpclmulqdq \$0x01,$Z1,$Ii,$Hkey
  151. vaesenc $rndkey,$inout2,$inout2
  152. vpclmulqdq \$0x11,$Z1,$Ii,$Z1
  153. vmovdqu 0x50+8(%rsp),$Ii # I[2]
  154. vaesenc $rndkey,$inout3,$inout3
  155. vaesenc $rndkey,$inout4,$inout4
  156. vpxor $T1,$Z0,$Z0
  157. vmovdqu 0x40-0x20($Xip),$T1 # borrow $T1 for $Hkey^4
  158. vaesenc $rndkey,$inout5,$inout5
  159. vmovups 0x40-0x80($key),$rndkey
  160. vpxor $T2,$Z2,$Z2
  161. vpclmulqdq \$0x00,$T1,$Ii,$T2
  162. vaesenc $rndkey,$inout0,$inout0
  163. vpxor $Hkey,$Z2,$Z2
  164. vpclmulqdq \$0x10,$T1,$Ii,$Hkey
  165. vaesenc $rndkey,$inout1,$inout1
  166. movbe 0x48($in0),%r13
  167. vpxor $Z1,$Z3,$Z3
  168. vpclmulqdq \$0x01,$T1,$Ii,$Z1
  169. vaesenc $rndkey,$inout2,$inout2
  170. movbe 0x40($in0),%r12
  171. vpclmulqdq \$0x11,$T1,$Ii,$T1
  172. vmovdqu 0x60+8(%rsp),$Ii # I[1]
  173. vaesenc $rndkey,$inout3,$inout3
  174. mov %r13,0x30+8(%rsp)
  175. vaesenc $rndkey,$inout4,$inout4
  176. mov %r12,0x38+8(%rsp)
  177. vpxor $T2,$Z0,$Z0
  178. vmovdqu 0x60-0x20($Xip),$T2 # borrow $T2 for $Hkey^5
  179. vaesenc $rndkey,$inout5,$inout5
  180. vmovups 0x50-0x80($key),$rndkey
  181. vpxor $Hkey,$Z2,$Z2
  182. vpclmulqdq \$0x00,$T2,$Ii,$Hkey
  183. vaesenc $rndkey,$inout0,$inout0
  184. vpxor $Z1,$Z2,$Z2
  185. vpclmulqdq \$0x10,$T2,$Ii,$Z1
  186. vaesenc $rndkey,$inout1,$inout1
  187. movbe 0x38($in0),%r13
  188. vpxor $T1,$Z3,$Z3
  189. vpclmulqdq \$0x01,$T2,$Ii,$T1
  190. vpxor 0x70+8(%rsp),$Xi,$Xi # accumulate I[0]
  191. vaesenc $rndkey,$inout2,$inout2
  192. movbe 0x30($in0),%r12
  193. vpclmulqdq \$0x11,$T2,$Ii,$T2
  194. vaesenc $rndkey,$inout3,$inout3
  195. mov %r13,0x40+8(%rsp)
  196. vaesenc $rndkey,$inout4,$inout4
  197. mov %r12,0x48+8(%rsp)
  198. vpxor $Hkey,$Z0,$Z0
  199. vmovdqu 0x70-0x20($Xip),$Hkey # $Hkey^6
  200. vaesenc $rndkey,$inout5,$inout5
  201. vmovups 0x60-0x80($key),$rndkey
  202. vpxor $Z1,$Z2,$Z2
  203. vpclmulqdq \$0x10,$Hkey,$Xi,$Z1
  204. vaesenc $rndkey,$inout0,$inout0
  205. vpxor $T1,$Z2,$Z2
  206. vpclmulqdq \$0x01,$Hkey,$Xi,$T1
  207. vaesenc $rndkey,$inout1,$inout1
  208. movbe 0x28($in0),%r13
  209. vpxor $T2,$Z3,$Z3
  210. vpclmulqdq \$0x00,$Hkey,$Xi,$T2
  211. vaesenc $rndkey,$inout2,$inout2
  212. movbe 0x20($in0),%r12
  213. vpclmulqdq \$0x11,$Hkey,$Xi,$Xi
  214. vaesenc $rndkey,$inout3,$inout3
  215. mov %r13,0x50+8(%rsp)
  216. vaesenc $rndkey,$inout4,$inout4
  217. mov %r12,0x58+8(%rsp)
  218. vpxor $Z1,$Z2,$Z2
  219. vaesenc $rndkey,$inout5,$inout5
  220. vpxor $T1,$Z2,$Z2
  221. vmovups 0x70-0x80($key),$rndkey
  222. vpslldq \$8,$Z2,$Z1
  223. vpxor $T2,$Z0,$Z0
  224. vmovdqu 0x10($const),$Hkey # .Lpoly
  225. vaesenc $rndkey,$inout0,$inout0
  226. vpxor $Xi,$Z3,$Z3
  227. vaesenc $rndkey,$inout1,$inout1
  228. vpxor $Z1,$Z0,$Z0
  229. movbe 0x18($in0),%r13
  230. vaesenc $rndkey,$inout2,$inout2
  231. movbe 0x10($in0),%r12
  232. vpalignr \$8,$Z0,$Z0,$Ii # 1st phase
  233. vpclmulqdq \$0x10,$Hkey,$Z0,$Z0
  234. mov %r13,0x60+8(%rsp)
  235. vaesenc $rndkey,$inout3,$inout3
  236. mov %r12,0x68+8(%rsp)
  237. vaesenc $rndkey,$inout4,$inout4
  238. vmovups 0x80-0x80($key),$T1 # borrow $T1 for $rndkey
  239. vaesenc $rndkey,$inout5,$inout5
  240. vaesenc $T1,$inout0,$inout0
  241. vmovups 0x90-0x80($key),$rndkey
  242. vaesenc $T1,$inout1,$inout1
  243. vpsrldq \$8,$Z2,$Z2
  244. vaesenc $T1,$inout2,$inout2
  245. vpxor $Z2,$Z3,$Z3
  246. vaesenc $T1,$inout3,$inout3
  247. vpxor $Ii,$Z0,$Z0
  248. movbe 0x08($in0),%r13
  249. vaesenc $T1,$inout4,$inout4
  250. movbe 0x00($in0),%r12
  251. vaesenc $T1,$inout5,$inout5
  252. vmovups 0xa0-0x80($key),$T1
  253. cmp \$11,$rounds
  254. jb .Lenc_tail # 128-bit key
  255. vaesenc $rndkey,$inout0,$inout0
  256. vaesenc $rndkey,$inout1,$inout1
  257. vaesenc $rndkey,$inout2,$inout2
  258. vaesenc $rndkey,$inout3,$inout3
  259. vaesenc $rndkey,$inout4,$inout4
  260. vaesenc $rndkey,$inout5,$inout5
  261. vaesenc $T1,$inout0,$inout0
  262. vaesenc $T1,$inout1,$inout1
  263. vaesenc $T1,$inout2,$inout2
  264. vaesenc $T1,$inout3,$inout3
  265. vaesenc $T1,$inout4,$inout4
  266. vmovups 0xb0-0x80($key),$rndkey
  267. vaesenc $T1,$inout5,$inout5
  268. vmovups 0xc0-0x80($key),$T1
  269. je .Lenc_tail # 192-bit key
  270. vaesenc $rndkey,$inout0,$inout0
  271. vaesenc $rndkey,$inout1,$inout1
  272. vaesenc $rndkey,$inout2,$inout2
  273. vaesenc $rndkey,$inout3,$inout3
  274. vaesenc $rndkey,$inout4,$inout4
  275. vaesenc $rndkey,$inout5,$inout5
  276. vaesenc $T1,$inout0,$inout0
  277. vaesenc $T1,$inout1,$inout1
  278. vaesenc $T1,$inout2,$inout2
  279. vaesenc $T1,$inout3,$inout3
  280. vaesenc $T1,$inout4,$inout4
  281. vmovups 0xd0-0x80($key),$rndkey
  282. vaesenc $T1,$inout5,$inout5
  283. vmovups 0xe0-0x80($key),$T1
  284. jmp .Lenc_tail # 256-bit key
  285. .align 32
  286. .Lhandle_ctr32:
  287. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  288. vpshufb $Ii,$T1,$Z2 # byte-swap counter
  289. vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb
  290. vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb
  291. vpaddd $Z1,$Z2,$inout2
  292. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  293. vpaddd $Z1,$inout1,$inout3
  294. vpshufb $Ii,$inout1,$inout1
  295. vpaddd $Z1,$inout2,$inout4
  296. vpshufb $Ii,$inout2,$inout2
  297. vpxor $rndkey,$inout1,$inout1
  298. vpaddd $Z1,$inout3,$inout5
  299. vpshufb $Ii,$inout3,$inout3
  300. vpxor $rndkey,$inout2,$inout2
  301. vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value
  302. vpshufb $Ii,$inout4,$inout4
  303. vpshufb $Ii,$inout5,$inout5
  304. vpshufb $Ii,$T1,$T1 # next counter value
  305. jmp .Lresume_ctr32
  306. .align 32
  307. .Lenc_tail:
  308. vaesenc $rndkey,$inout0,$inout0
  309. vmovdqu $Z3,16+8(%rsp) # postpone vpxor $Z3,$Xi,$Xi
  310. vpalignr \$8,$Z0,$Z0,$Xi # 2nd phase
  311. vaesenc $rndkey,$inout1,$inout1
  312. vpclmulqdq \$0x10,$Hkey,$Z0,$Z0
  313. vpxor 0x00($inp),$T1,$T2
  314. vaesenc $rndkey,$inout2,$inout2
  315. vpxor 0x10($inp),$T1,$Ii
  316. vaesenc $rndkey,$inout3,$inout3
  317. vpxor 0x20($inp),$T1,$Z1
  318. vaesenc $rndkey,$inout4,$inout4
  319. vpxor 0x30($inp),$T1,$Z2
  320. vaesenc $rndkey,$inout5,$inout5
  321. vpxor 0x40($inp),$T1,$Z3
  322. vpxor 0x50($inp),$T1,$Hkey
  323. vmovdqu ($ivp),$T1 # load next counter value
  324. vaesenclast $T2,$inout0,$inout0
  325. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  326. vaesenclast $Ii,$inout1,$inout1
  327. vpaddb $T2,$T1,$Ii
  328. mov %r13,0x70+8(%rsp)
  329. lea 0x60($inp),$inp
  330. vaesenclast $Z1,$inout2,$inout2
  331. vpaddb $T2,$Ii,$Z1
  332. mov %r12,0x78+8(%rsp)
  333. lea 0x60($out),$out
  334. vmovdqu 0x00-0x80($key),$rndkey
  335. vaesenclast $Z2,$inout3,$inout3
  336. vpaddb $T2,$Z1,$Z2
  337. vaesenclast $Z3, $inout4,$inout4
  338. vpaddb $T2,$Z2,$Z3
  339. vaesenclast $Hkey,$inout5,$inout5
  340. vpaddb $T2,$Z3,$Hkey
  341. add \$0x60,$ret
  342. sub \$0x6,$len
  343. jc .L6x_done
  344. vmovups $inout0,-0x60($out) # save output
  345. vpxor $rndkey,$T1,$inout0
  346. vmovups $inout1,-0x50($out)
  347. vmovdqa $Ii,$inout1 # 0 latency
  348. vmovups $inout2,-0x40($out)
  349. vmovdqa $Z1,$inout2 # 0 latency
  350. vmovups $inout3,-0x30($out)
  351. vmovdqa $Z2,$inout3 # 0 latency
  352. vmovups $inout4,-0x20($out)
  353. vmovdqa $Z3,$inout4 # 0 latency
  354. vmovups $inout5,-0x10($out)
  355. vmovdqa $Hkey,$inout5 # 0 latency
  356. vmovdqu 0x20+8(%rsp),$Z3 # I[5]
  357. jmp .Loop6x
  358. .L6x_done:
  359. vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled
  360. vpxor $Z0,$Xi,$Xi # modulo-scheduled
  361. ret
  362. .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x
  363. ___
  364. ######################################################################
  365. #
  366. # size_t aesni_gcm_[en|de]crypt(const void *inp, void *out, size_t len,
  367. # const AES_KEY *key, unsigned char iv[16],
  368. # struct { u128 Xi,H,Htbl[9]; } *Xip);
  369. $code.=<<___;
  370. .globl aesni_gcm_decrypt
  371. .type aesni_gcm_decrypt,\@function,6
  372. .align 32
  373. aesni_gcm_decrypt:
  374. .cfi_startproc
  375. xor $ret,$ret
  376. cmp \$0x60,$len # minimal accepted length
  377. jb .Lgcm_dec_abort
  378. lea (%rsp),%rax # save stack pointer
  379. .cfi_def_cfa_register %rax
  380. push %rbx
  381. .cfi_push %rbx
  382. push %rbp
  383. .cfi_push %rbp
  384. push %r12
  385. .cfi_push %r12
  386. push %r13
  387. .cfi_push %r13
  388. push %r14
  389. .cfi_push %r14
  390. push %r15
  391. .cfi_push %r15
  392. ___
  393. $code.=<<___ if ($win64);
  394. lea -0xa8(%rsp),%rsp
  395. movaps %xmm6,-0xd8(%rax)
  396. movaps %xmm7,-0xc8(%rax)
  397. movaps %xmm8,-0xb8(%rax)
  398. movaps %xmm9,-0xa8(%rax)
  399. movaps %xmm10,-0x98(%rax)
  400. movaps %xmm11,-0x88(%rax)
  401. movaps %xmm12,-0x78(%rax)
  402. movaps %xmm13,-0x68(%rax)
  403. movaps %xmm14,-0x58(%rax)
  404. movaps %xmm15,-0x48(%rax)
  405. .Lgcm_dec_body:
  406. ___
  407. $code.=<<___;
  408. vzeroupper
  409. vmovdqu ($ivp),$T1 # input counter value
  410. add \$-128,%rsp
  411. mov 12($ivp),$counter
  412. lea .Lbswap_mask(%rip),$const
  413. lea -0x80($key),$in0 # borrow $in0
  414. mov \$0xf80,$end0 # borrow $end0
  415. vmovdqu ($Xip),$Xi # load Xi
  416. and \$-128,%rsp # ensure stack alignment
  417. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  418. lea 0x80($key),$key # size optimization
  419. lea 0x20+0x20($Xip),$Xip # size optimization
  420. mov 0xf0-0x80($key),$rounds
  421. vpshufb $Ii,$Xi,$Xi
  422. and $end0,$in0
  423. and %rsp,$end0
  424. sub $in0,$end0
  425. jc .Ldec_no_key_aliasing
  426. cmp \$768,$end0
  427. jnc .Ldec_no_key_aliasing
  428. sub $end0,%rsp # avoid aliasing with key
  429. .Ldec_no_key_aliasing:
  430. vmovdqu 0x50($inp),$Z3 # I[5]
  431. lea ($inp),$in0
  432. vmovdqu 0x40($inp),$Z0
  433. lea -0xc0($inp,$len),$end0
  434. vmovdqu 0x30($inp),$Z1
  435. shr \$4,$len
  436. xor $ret,$ret
  437. vmovdqu 0x20($inp),$Z2
  438. vpshufb $Ii,$Z3,$Z3 # passed to _aesni_ctr32_ghash_6x
  439. vmovdqu 0x10($inp),$T2
  440. vpshufb $Ii,$Z0,$Z0
  441. vmovdqu ($inp),$Hkey
  442. vpshufb $Ii,$Z1,$Z1
  443. vmovdqu $Z0,0x30(%rsp)
  444. vpshufb $Ii,$Z2,$Z2
  445. vmovdqu $Z1,0x40(%rsp)
  446. vpshufb $Ii,$T2,$T2
  447. vmovdqu $Z2,0x50(%rsp)
  448. vpshufb $Ii,$Hkey,$Hkey
  449. vmovdqu $T2,0x60(%rsp)
  450. vmovdqu $Hkey,0x70(%rsp)
  451. call _aesni_ctr32_ghash_6x
  452. vmovups $inout0,-0x60($out) # save output
  453. vmovups $inout1,-0x50($out)
  454. vmovups $inout2,-0x40($out)
  455. vmovups $inout3,-0x30($out)
  456. vmovups $inout4,-0x20($out)
  457. vmovups $inout5,-0x10($out)
  458. vpshufb ($const),$Xi,$Xi # .Lbswap_mask
  459. vmovdqu $Xi,-0x40($Xip) # output Xi
  460. vzeroupper
  461. ___
  462. $code.=<<___ if ($win64);
  463. movaps -0xd8(%rax),%xmm6
  464. movaps -0xc8(%rax),%xmm7
  465. movaps -0xb8(%rax),%xmm8
  466. movaps -0xa8(%rax),%xmm9
  467. movaps -0x98(%rax),%xmm10
  468. movaps -0x88(%rax),%xmm11
  469. movaps -0x78(%rax),%xmm12
  470. movaps -0x68(%rax),%xmm13
  471. movaps -0x58(%rax),%xmm14
  472. movaps -0x48(%rax),%xmm15
  473. ___
  474. $code.=<<___;
  475. mov -48(%rax),%r15
  476. .cfi_restore %r15
  477. mov -40(%rax),%r14
  478. .cfi_restore %r14
  479. mov -32(%rax),%r13
  480. .cfi_restore %r13
  481. mov -24(%rax),%r12
  482. .cfi_restore %r12
  483. mov -16(%rax),%rbp
  484. .cfi_restore %rbp
  485. mov -8(%rax),%rbx
  486. .cfi_restore %rbx
  487. lea (%rax),%rsp # restore %rsp
  488. .cfi_def_cfa_register %rsp
  489. .Lgcm_dec_abort:
  490. mov $ret,%rax # return value
  491. ret
  492. .cfi_endproc
  493. .size aesni_gcm_decrypt,.-aesni_gcm_decrypt
  494. ___
  495. $code.=<<___;
  496. .type _aesni_ctr32_6x,\@abi-omnipotent
  497. .align 32
  498. _aesni_ctr32_6x:
  499. vmovdqu 0x00-0x80($key),$Z0 # borrow $Z0 for $rndkey
  500. vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
  501. lea -1($rounds),%r13
  502. vmovups 0x10-0x80($key),$rndkey
  503. lea 0x20-0x80($key),%r12
  504. vpxor $Z0,$T1,$inout0
  505. add \$`6<<24`,$counter
  506. jc .Lhandle_ctr32_2
  507. vpaddb $T2,$T1,$inout1
  508. vpaddb $T2,$inout1,$inout2
  509. vpxor $Z0,$inout1,$inout1
  510. vpaddb $T2,$inout2,$inout3
  511. vpxor $Z0,$inout2,$inout2
  512. vpaddb $T2,$inout3,$inout4
  513. vpxor $Z0,$inout3,$inout3
  514. vpaddb $T2,$inout4,$inout5
  515. vpxor $Z0,$inout4,$inout4
  516. vpaddb $T2,$inout5,$T1
  517. vpxor $Z0,$inout5,$inout5
  518. jmp .Loop_ctr32
  519. .align 16
  520. .Loop_ctr32:
  521. vaesenc $rndkey,$inout0,$inout0
  522. vaesenc $rndkey,$inout1,$inout1
  523. vaesenc $rndkey,$inout2,$inout2
  524. vaesenc $rndkey,$inout3,$inout3
  525. vaesenc $rndkey,$inout4,$inout4
  526. vaesenc $rndkey,$inout5,$inout5
  527. vmovups (%r12),$rndkey
  528. lea 0x10(%r12),%r12
  529. dec %r13d
  530. jnz .Loop_ctr32
  531. vmovdqu (%r12),$Hkey # last round key
  532. vaesenc $rndkey,$inout0,$inout0
  533. vpxor 0x00($inp),$Hkey,$Z0
  534. vaesenc $rndkey,$inout1,$inout1
  535. vpxor 0x10($inp),$Hkey,$Z1
  536. vaesenc $rndkey,$inout2,$inout2
  537. vpxor 0x20($inp),$Hkey,$Z2
  538. vaesenc $rndkey,$inout3,$inout3
  539. vpxor 0x30($inp),$Hkey,$Xi
  540. vaesenc $rndkey,$inout4,$inout4
  541. vpxor 0x40($inp),$Hkey,$T2
  542. vaesenc $rndkey,$inout5,$inout5
  543. vpxor 0x50($inp),$Hkey,$Hkey
  544. lea 0x60($inp),$inp
  545. vaesenclast $Z0,$inout0,$inout0
  546. vaesenclast $Z1,$inout1,$inout1
  547. vaesenclast $Z2,$inout2,$inout2
  548. vaesenclast $Xi,$inout3,$inout3
  549. vaesenclast $T2,$inout4,$inout4
  550. vaesenclast $Hkey,$inout5,$inout5
  551. vmovups $inout0,0x00($out)
  552. vmovups $inout1,0x10($out)
  553. vmovups $inout2,0x20($out)
  554. vmovups $inout3,0x30($out)
  555. vmovups $inout4,0x40($out)
  556. vmovups $inout5,0x50($out)
  557. lea 0x60($out),$out
  558. ret
  559. .align 32
  560. .Lhandle_ctr32_2:
  561. vpshufb $Ii,$T1,$Z2 # byte-swap counter
  562. vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb
  563. vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb
  564. vpaddd $Z1,$Z2,$inout2
  565. vpaddd $Z1,$inout1,$inout3
  566. vpshufb $Ii,$inout1,$inout1
  567. vpaddd $Z1,$inout2,$inout4
  568. vpshufb $Ii,$inout2,$inout2
  569. vpxor $Z0,$inout1,$inout1
  570. vpaddd $Z1,$inout3,$inout5
  571. vpshufb $Ii,$inout3,$inout3
  572. vpxor $Z0,$inout2,$inout2
  573. vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value
  574. vpshufb $Ii,$inout4,$inout4
  575. vpxor $Z0,$inout3,$inout3
  576. vpshufb $Ii,$inout5,$inout5
  577. vpxor $Z0,$inout4,$inout4
  578. vpshufb $Ii,$T1,$T1 # next counter value
  579. vpxor $Z0,$inout5,$inout5
  580. jmp .Loop_ctr32
  581. .size _aesni_ctr32_6x,.-_aesni_ctr32_6x
  582. .globl aesni_gcm_encrypt
  583. .type aesni_gcm_encrypt,\@function,6
  584. .align 32
  585. aesni_gcm_encrypt:
  586. .cfi_startproc
  587. xor $ret,$ret
  588. cmp \$0x60*3,$len # minimal accepted length
  589. jb .Lgcm_enc_abort
  590. lea (%rsp),%rax # save stack pointer
  591. .cfi_def_cfa_register %rax
  592. push %rbx
  593. .cfi_push %rbx
  594. push %rbp
  595. .cfi_push %rbp
  596. push %r12
  597. .cfi_push %r12
  598. push %r13
  599. .cfi_push %r13
  600. push %r14
  601. .cfi_push %r14
  602. push %r15
  603. .cfi_push %r15
  604. ___
  605. $code.=<<___ if ($win64);
  606. lea -0xa8(%rsp),%rsp
  607. movaps %xmm6,-0xd8(%rax)
  608. movaps %xmm7,-0xc8(%rax)
  609. movaps %xmm8,-0xb8(%rax)
  610. movaps %xmm9,-0xa8(%rax)
  611. movaps %xmm10,-0x98(%rax)
  612. movaps %xmm11,-0x88(%rax)
  613. movaps %xmm12,-0x78(%rax)
  614. movaps %xmm13,-0x68(%rax)
  615. movaps %xmm14,-0x58(%rax)
  616. movaps %xmm15,-0x48(%rax)
  617. .Lgcm_enc_body:
  618. ___
  619. $code.=<<___;
  620. vzeroupper
  621. vmovdqu ($ivp),$T1 # input counter value
  622. add \$-128,%rsp
  623. mov 12($ivp),$counter
  624. lea .Lbswap_mask(%rip),$const
  625. lea -0x80($key),$in0 # borrow $in0
  626. mov \$0xf80,$end0 # borrow $end0
  627. lea 0x80($key),$key # size optimization
  628. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  629. and \$-128,%rsp # ensure stack alignment
  630. mov 0xf0-0x80($key),$rounds
  631. and $end0,$in0
  632. and %rsp,$end0
  633. sub $in0,$end0
  634. jc .Lenc_no_key_aliasing
  635. cmp \$768,$end0
  636. jnc .Lenc_no_key_aliasing
  637. sub $end0,%rsp # avoid aliasing with key
  638. .Lenc_no_key_aliasing:
  639. lea ($out),$in0
  640. lea -0xc0($out,$len),$end0
  641. shr \$4,$len
  642. call _aesni_ctr32_6x
  643. vpshufb $Ii,$inout0,$Xi # save bswapped output on stack
  644. vpshufb $Ii,$inout1,$T2
  645. vmovdqu $Xi,0x70(%rsp)
  646. vpshufb $Ii,$inout2,$Z0
  647. vmovdqu $T2,0x60(%rsp)
  648. vpshufb $Ii,$inout3,$Z1
  649. vmovdqu $Z0,0x50(%rsp)
  650. vpshufb $Ii,$inout4,$Z2
  651. vmovdqu $Z1,0x40(%rsp)
  652. vpshufb $Ii,$inout5,$Z3 # passed to _aesni_ctr32_ghash_6x
  653. vmovdqu $Z2,0x30(%rsp)
  654. call _aesni_ctr32_6x
  655. vmovdqu ($Xip),$Xi # load Xi
  656. lea 0x20+0x20($Xip),$Xip # size optimization
  657. sub \$12,$len
  658. mov \$0x60*2,$ret
  659. vpshufb $Ii,$Xi,$Xi
  660. call _aesni_ctr32_ghash_6x
  661. vmovdqu 0x20(%rsp),$Z3 # I[5]
  662. vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
  663. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  664. vpunpckhqdq $Z3,$Z3,$T1
  665. vmovdqu 0x20-0x20($Xip),$rndkey # borrow $rndkey for $HK
  666. vmovups $inout0,-0x60($out) # save output
  667. vpshufb $Ii,$inout0,$inout0 # but keep bswapped copy
  668. vpxor $Z3,$T1,$T1
  669. vmovups $inout1,-0x50($out)
  670. vpshufb $Ii,$inout1,$inout1
  671. vmovups $inout2,-0x40($out)
  672. vpshufb $Ii,$inout2,$inout2
  673. vmovups $inout3,-0x30($out)
  674. vpshufb $Ii,$inout3,$inout3
  675. vmovups $inout4,-0x20($out)
  676. vpshufb $Ii,$inout4,$inout4
  677. vmovups $inout5,-0x10($out)
  678. vpshufb $Ii,$inout5,$inout5
  679. vmovdqu $inout0,0x10(%rsp) # free $inout0
  680. ___
  681. { my ($HK,$T3)=($rndkey,$inout0);
  682. $code.=<<___;
  683. vmovdqu 0x30(%rsp),$Z2 # I[4]
  684. vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2
  685. vpunpckhqdq $Z2,$Z2,$T2
  686. vpclmulqdq \$0x00,$Hkey,$Z3,$Z1
  687. vpxor $Z2,$T2,$T2
  688. vpclmulqdq \$0x11,$Hkey,$Z3,$Z3
  689. vpclmulqdq \$0x00,$HK,$T1,$T1
  690. vmovdqu 0x40(%rsp),$T3 # I[3]
  691. vpclmulqdq \$0x00,$Ii,$Z2,$Z0
  692. vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3
  693. vpxor $Z1,$Z0,$Z0
  694. vpunpckhqdq $T3,$T3,$Z1
  695. vpclmulqdq \$0x11,$Ii,$Z2,$Z2
  696. vpxor $T3,$Z1,$Z1
  697. vpxor $Z3,$Z2,$Z2
  698. vpclmulqdq \$0x10,$HK,$T2,$T2
  699. vmovdqu 0x50-0x20($Xip),$HK
  700. vpxor $T1,$T2,$T2
  701. vmovdqu 0x50(%rsp),$T1 # I[2]
  702. vpclmulqdq \$0x00,$Hkey,$T3,$Z3
  703. vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4
  704. vpxor $Z0,$Z3,$Z3
  705. vpunpckhqdq $T1,$T1,$Z0
  706. vpclmulqdq \$0x11,$Hkey,$T3,$T3
  707. vpxor $T1,$Z0,$Z0
  708. vpxor $Z2,$T3,$T3
  709. vpclmulqdq \$0x00,$HK,$Z1,$Z1
  710. vpxor $T2,$Z1,$Z1
  711. vmovdqu 0x60(%rsp),$T2 # I[1]
  712. vpclmulqdq \$0x00,$Ii,$T1,$Z2
  713. vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5
  714. vpxor $Z3,$Z2,$Z2
  715. vpunpckhqdq $T2,$T2,$Z3
  716. vpclmulqdq \$0x11,$Ii,$T1,$T1
  717. vpxor $T2,$Z3,$Z3
  718. vpxor $T3,$T1,$T1
  719. vpclmulqdq \$0x10,$HK,$Z0,$Z0
  720. vmovdqu 0x80-0x20($Xip),$HK
  721. vpxor $Z1,$Z0,$Z0
  722. vpxor 0x70(%rsp),$Xi,$Xi # accumulate I[0]
  723. vpclmulqdq \$0x00,$Hkey,$T2,$Z1
  724. vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6
  725. vpunpckhqdq $Xi,$Xi,$T3
  726. vpxor $Z2,$Z1,$Z1
  727. vpclmulqdq \$0x11,$Hkey,$T2,$T2
  728. vpxor $Xi,$T3,$T3
  729. vpxor $T1,$T2,$T2
  730. vpclmulqdq \$0x00,$HK,$Z3,$Z3
  731. vpxor $Z0,$Z3,$Z0
  732. vpclmulqdq \$0x00,$Ii,$Xi,$Z2
  733. vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
  734. vpunpckhqdq $inout5,$inout5,$T1
  735. vpclmulqdq \$0x11,$Ii,$Xi,$Xi
  736. vpxor $inout5,$T1,$T1
  737. vpxor $Z1,$Z2,$Z1
  738. vpclmulqdq \$0x10,$HK,$T3,$T3
  739. vmovdqu 0x20-0x20($Xip),$HK
  740. vpxor $T2,$Xi,$Z3
  741. vpxor $Z0,$T3,$Z2
  742. vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2
  743. vpxor $Z1,$Z3,$T3 # aggregated Karatsuba post-processing
  744. vpclmulqdq \$0x00,$Hkey,$inout5,$Z0
  745. vpxor $T3,$Z2,$Z2
  746. vpunpckhqdq $inout4,$inout4,$T2
  747. vpclmulqdq \$0x11,$Hkey,$inout5,$inout5
  748. vpxor $inout4,$T2,$T2
  749. vpslldq \$8,$Z2,$T3
  750. vpclmulqdq \$0x00,$HK,$T1,$T1
  751. vpxor $T3,$Z1,$Xi
  752. vpsrldq \$8,$Z2,$Z2
  753. vpxor $Z2,$Z3,$Z3
  754. vpclmulqdq \$0x00,$Ii,$inout4,$Z1
  755. vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3
  756. vpxor $Z0,$Z1,$Z1
  757. vpunpckhqdq $inout3,$inout3,$T3
  758. vpclmulqdq \$0x11,$Ii,$inout4,$inout4
  759. vpxor $inout3,$T3,$T3
  760. vpxor $inout5,$inout4,$inout4
  761. vpalignr \$8,$Xi,$Xi,$inout5 # 1st phase
  762. vpclmulqdq \$0x10,$HK,$T2,$T2
  763. vmovdqu 0x50-0x20($Xip),$HK
  764. vpxor $T1,$T2,$T2
  765. vpclmulqdq \$0x00,$Hkey,$inout3,$Z0
  766. vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4
  767. vpxor $Z1,$Z0,$Z0
  768. vpunpckhqdq $inout2,$inout2,$T1
  769. vpclmulqdq \$0x11,$Hkey,$inout3,$inout3
  770. vpxor $inout2,$T1,$T1
  771. vpxor $inout4,$inout3,$inout3
  772. vxorps 0x10(%rsp),$Z3,$Z3 # accumulate $inout0
  773. vpclmulqdq \$0x00,$HK,$T3,$T3
  774. vpxor $T2,$T3,$T3
  775. vpclmulqdq \$0x10,0x10($const),$Xi,$Xi
  776. vxorps $inout5,$Xi,$Xi
  777. vpclmulqdq \$0x00,$Ii,$inout2,$Z1
  778. vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5
  779. vpxor $Z0,$Z1,$Z1
  780. vpunpckhqdq $inout1,$inout1,$T2
  781. vpclmulqdq \$0x11,$Ii,$inout2,$inout2
  782. vpxor $inout1,$T2,$T2
  783. vpalignr \$8,$Xi,$Xi,$inout5 # 2nd phase
  784. vpxor $inout3,$inout2,$inout2
  785. vpclmulqdq \$0x10,$HK,$T1,$T1
  786. vmovdqu 0x80-0x20($Xip),$HK
  787. vpxor $T3,$T1,$T1
  788. vxorps $Z3,$inout5,$inout5
  789. vpclmulqdq \$0x10,0x10($const),$Xi,$Xi
  790. vxorps $inout5,$Xi,$Xi
  791. vpclmulqdq \$0x00,$Hkey,$inout1,$Z0
  792. vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6
  793. vpxor $Z1,$Z0,$Z0
  794. vpunpckhqdq $Xi,$Xi,$T3
  795. vpclmulqdq \$0x11,$Hkey,$inout1,$inout1
  796. vpxor $Xi,$T3,$T3
  797. vpxor $inout2,$inout1,$inout1
  798. vpclmulqdq \$0x00,$HK,$T2,$T2
  799. vpxor $T1,$T2,$T2
  800. vpclmulqdq \$0x00,$Ii,$Xi,$Z1
  801. vpclmulqdq \$0x11,$Ii,$Xi,$Z3
  802. vpxor $Z0,$Z1,$Z1
  803. vpclmulqdq \$0x10,$HK,$T3,$Z2
  804. vpxor $inout1,$Z3,$Z3
  805. vpxor $T2,$Z2,$Z2
  806. vpxor $Z1,$Z3,$Z0 # aggregated Karatsuba post-processing
  807. vpxor $Z0,$Z2,$Z2
  808. vpslldq \$8,$Z2,$T1
  809. vmovdqu 0x10($const),$Hkey # .Lpoly
  810. vpsrldq \$8,$Z2,$Z2
  811. vpxor $T1,$Z1,$Xi
  812. vpxor $Z2,$Z3,$Z3
  813. vpalignr \$8,$Xi,$Xi,$T2 # 1st phase
  814. vpclmulqdq \$0x10,$Hkey,$Xi,$Xi
  815. vpxor $T2,$Xi,$Xi
  816. vpalignr \$8,$Xi,$Xi,$T2 # 2nd phase
  817. vpclmulqdq \$0x10,$Hkey,$Xi,$Xi
  818. vpxor $Z3,$T2,$T2
  819. vpxor $T2,$Xi,$Xi
  820. ___
  821. }
  822. $code.=<<___;
  823. vpshufb ($const),$Xi,$Xi # .Lbswap_mask
  824. vmovdqu $Xi,-0x40($Xip) # output Xi
  825. vzeroupper
  826. ___
  827. $code.=<<___ if ($win64);
  828. movaps -0xd8(%rax),%xmm6
  829. movaps -0xc8(%rax),%xmm7
  830. movaps -0xb8(%rax),%xmm8
  831. movaps -0xa8(%rax),%xmm9
  832. movaps -0x98(%rax),%xmm10
  833. movaps -0x88(%rax),%xmm11
  834. movaps -0x78(%rax),%xmm12
  835. movaps -0x68(%rax),%xmm13
  836. movaps -0x58(%rax),%xmm14
  837. movaps -0x48(%rax),%xmm15
  838. ___
  839. $code.=<<___;
  840. mov -48(%rax),%r15
  841. .cfi_restore %r15
  842. mov -40(%rax),%r14
  843. .cfi_restore %r14
  844. mov -32(%rax),%r13
  845. .cfi_restore %r13
  846. mov -24(%rax),%r12
  847. .cfi_restore %r12
  848. mov -16(%rax),%rbp
  849. .cfi_restore %rbp
  850. mov -8(%rax),%rbx
  851. .cfi_restore %rbx
  852. lea (%rax),%rsp # restore %rsp
  853. .cfi_def_cfa_register %rsp
  854. .Lgcm_enc_abort:
  855. mov $ret,%rax # return value
  856. ret
  857. .cfi_endproc
  858. .size aesni_gcm_encrypt,.-aesni_gcm_encrypt
  859. ___
  860. $code.=<<___;
  861. .align 64
  862. .Lbswap_mask:
  863. .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
  864. .Lpoly:
  865. .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
  866. .Lone_msb:
  867. .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
  868. .Ltwo_lsb:
  869. .byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
  870. .Lone_lsb:
  871. .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
  872. .asciz "AES-NI GCM module for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  873. .align 64
  874. ___
  875. if ($win64) {
  876. $rec="%rcx";
  877. $frame="%rdx";
  878. $context="%r8";
  879. $disp="%r9";
  880. $code.=<<___
  881. .extern __imp_RtlVirtualUnwind
  882. .type gcm_se_handler,\@abi-omnipotent
  883. .align 16
  884. gcm_se_handler:
  885. push %rsi
  886. push %rdi
  887. push %rbx
  888. push %rbp
  889. push %r12
  890. push %r13
  891. push %r14
  892. push %r15
  893. pushfq
  894. sub \$64,%rsp
  895. mov 120($context),%rax # pull context->Rax
  896. mov 248($context),%rbx # pull context->Rip
  897. mov 8($disp),%rsi # disp->ImageBase
  898. mov 56($disp),%r11 # disp->HandlerData
  899. mov 0(%r11),%r10d # HandlerData[0]
  900. lea (%rsi,%r10),%r10 # prologue label
  901. cmp %r10,%rbx # context->Rip<prologue label
  902. jb .Lcommon_seh_tail
  903. mov 152($context),%rax # pull context->Rsp
  904. mov 4(%r11),%r10d # HandlerData[1]
  905. lea (%rsi,%r10),%r10 # epilogue label
  906. cmp %r10,%rbx # context->Rip>=epilogue label
  907. jae .Lcommon_seh_tail
  908. mov 120($context),%rax # pull context->Rax
  909. mov -48(%rax),%r15
  910. mov -40(%rax),%r14
  911. mov -32(%rax),%r13
  912. mov -24(%rax),%r12
  913. mov -16(%rax),%rbp
  914. mov -8(%rax),%rbx
  915. mov %r15,240($context)
  916. mov %r14,232($context)
  917. mov %r13,224($context)
  918. mov %r12,216($context)
  919. mov %rbp,160($context)
  920. mov %rbx,144($context)
  921. lea -0xd8(%rax),%rsi # %xmm save area
  922. lea 512($context),%rdi # & context.Xmm6
  923. mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
  924. .long 0xa548f3fc # cld; rep movsq
  925. .Lcommon_seh_tail:
  926. mov 8(%rax),%rdi
  927. mov 16(%rax),%rsi
  928. mov %rax,152($context) # restore context->Rsp
  929. mov %rsi,168($context) # restore context->Rsi
  930. mov %rdi,176($context) # restore context->Rdi
  931. mov 40($disp),%rdi # disp->ContextRecord
  932. mov $context,%rsi # context
  933. mov \$154,%ecx # sizeof(CONTEXT)
  934. .long 0xa548f3fc # cld; rep movsq
  935. mov $disp,%rsi
  936. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  937. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  938. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  939. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  940. mov 40(%rsi),%r10 # disp->ContextRecord
  941. lea 56(%rsi),%r11 # &disp->HandlerData
  942. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  943. mov %r10,32(%rsp) # arg5
  944. mov %r11,40(%rsp) # arg6
  945. mov %r12,48(%rsp) # arg7
  946. mov %rcx,56(%rsp) # arg8, (NULL)
  947. call *__imp_RtlVirtualUnwind(%rip)
  948. mov \$1,%eax # ExceptionContinueSearch
  949. add \$64,%rsp
  950. popfq
  951. pop %r15
  952. pop %r14
  953. pop %r13
  954. pop %r12
  955. pop %rbp
  956. pop %rbx
  957. pop %rdi
  958. pop %rsi
  959. ret
  960. .size gcm_se_handler,.-gcm_se_handler
  961. .section .pdata
  962. .align 4
  963. .rva .LSEH_begin_aesni_gcm_decrypt
  964. .rva .LSEH_end_aesni_gcm_decrypt
  965. .rva .LSEH_gcm_dec_info
  966. .rva .LSEH_begin_aesni_gcm_encrypt
  967. .rva .LSEH_end_aesni_gcm_encrypt
  968. .rva .LSEH_gcm_enc_info
  969. .section .xdata
  970. .align 8
  971. .LSEH_gcm_dec_info:
  972. .byte 9,0,0,0
  973. .rva gcm_se_handler
  974. .rva .Lgcm_dec_body,.Lgcm_dec_abort
  975. .LSEH_gcm_enc_info:
  976. .byte 9,0,0,0
  977. .rva gcm_se_handler
  978. .rva .Lgcm_enc_body,.Lgcm_enc_abort
  979. ___
  980. }
  981. }}} else {{{
  982. $code=<<___; # assembler is too old
  983. .text
  984. .globl aesni_gcm_encrypt
  985. .type aesni_gcm_encrypt,\@abi-omnipotent
  986. aesni_gcm_encrypt:
  987. xor %eax,%eax
  988. ret
  989. .size aesni_gcm_encrypt,.-aesni_gcm_encrypt
  990. .globl aesni_gcm_decrypt
  991. .type aesni_gcm_decrypt,\@abi-omnipotent
  992. aesni_gcm_decrypt:
  993. xor %eax,%eax
  994. ret
  995. .size aesni_gcm_decrypt,.-aesni_gcm_decrypt
  996. ___
  997. }}}
  998. $code =~ s/\`([^\`]*)\`/eval($1)/gem;
  999. print $code;
  1000. close STDOUT;