aesni-sha1-x86_64.pl 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249
  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. #
  10. # June 2011
  11. #
  12. # This is AESNI-CBC+SHA1 "stitch" implementation. The idea, as spelled
  13. # in http://download.intel.com/design/intarch/papers/323686.pdf, is
  14. # that since AESNI-CBC encrypt exhibit *very* low instruction-level
  15. # parallelism, interleaving it with another algorithm would allow to
  16. # utilize processor resources better and achieve better performance.
  17. # SHA1 instruction sequences(*) are taken from sha1-x86_64.pl and
  18. # AESNI code is weaved into it. Below are performance numbers in
  19. # cycles per processed byte, less is better, for standalone AESNI-CBC
  20. # encrypt, sum of the latter and standalone SHA1, and "stitched"
  21. # subroutine:
  22. #
  23. # AES-128-CBC +SHA1 stitch gain
  24. # Westmere 3.77[+5.6] 9.37 6.65 +41%
  25. # Sandy Bridge 5.05[+5.2(6.3)] 10.25(11.35) 6.16(7.08) +67%(+60%)
  26. #
  27. # AES-192-CBC
  28. # Westmere 4.51 10.11 6.97 +45%
  29. # Sandy Bridge 6.05 11.25(12.35) 6.34(7.27) +77%(+70%)
  30. #
  31. # AES-256-CBC
  32. # Westmere 5.25 10.85 7.25 +50%
  33. # Sandy Bridge 7.05 12.25(13.35) 7.06(7.70) +74%(+73%)
  34. #
  35. # (*) There are two code paths: SSSE3 and AVX. See sha1-568.pl for
  36. # background information. Above numbers in parentheses are SSSE3
  37. # results collected on AVX-capable CPU, i.e. apply on OSes that
  38. # don't support AVX.
  39. #
  40. # Needless to mention that it makes no sense to implement "stitched"
  41. # *decrypt* subroutine. Because *both* AESNI-CBC decrypt and SHA1
  42. # fully utilize parallelism, so stitching would not give any gain
  43. # anyway. Well, there might be some, e.g. because of better cache
  44. # locality... For reference, here are performance results for
  45. # standalone AESNI-CBC decrypt:
  46. #
  47. # AES-128-CBC AES-192-CBC AES-256-CBC
  48. # Westmere 1.31 1.55 1.80
  49. # Sandy Bridge 0.93 1.06 1.22
  50. $flavour = shift;
  51. $output = shift;
  52. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  53. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  54. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  55. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  56. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  57. die "can't locate x86_64-xlate.pl";
  58. $avx=1 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  59. =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
  60. $1>=2.19);
  61. $avx=1 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
  62. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
  63. $1>=2.09);
  64. $avx=1 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
  65. `ml64 2>&1` =~ /Version ([0-9]+)\./ &&
  66. $1>=10);
  67. open STDOUT,"| $^X $xlate $flavour $output";
  68. # void aesni_cbc_sha1_enc(const void *inp,
  69. # void *out,
  70. # size_t length,
  71. # const AES_KEY *key,
  72. # unsigned char *iv,
  73. # SHA_CTX *ctx,
  74. # const void *in0);
  75. $code.=<<___;
  76. .text
  77. .extern OPENSSL_ia32cap_P
  78. .globl aesni_cbc_sha1_enc
  79. .type aesni_cbc_sha1_enc,\@abi-omnipotent
  80. .align 16
  81. aesni_cbc_sha1_enc:
  82. # caller should check for SSSE3 and AES-NI bits
  83. mov OPENSSL_ia32cap_P+0(%rip),%r10d
  84. mov OPENSSL_ia32cap_P+4(%rip),%r11d
  85. ___
  86. $code.=<<___ if ($avx);
  87. and \$`1<<28`,%r11d # mask AVX bit
  88. and \$`1<<30`,%r10d # mask "Intel CPU" bit
  89. or %r11d,%r10d
  90. cmp \$`1<<28|1<<30`,%r10d
  91. je aesni_cbc_sha1_enc_avx
  92. ___
  93. $code.=<<___;
  94. jmp aesni_cbc_sha1_enc_ssse3
  95. ret
  96. .size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc
  97. ___
  98. my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
  99. my $Xi=4;
  100. my @X=map("%xmm$_",(4..7,0..3));
  101. my @Tx=map("%xmm$_",(8..10));
  102. my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp"); # size optimization
  103. my @T=("%esi","%edi");
  104. my $j=0; my $jj=0; my $r=0; my $sn=0;
  105. my $K_XX_XX="%r11";
  106. my ($iv,$in,$rndkey0)=map("%xmm$_",(11..13));
  107. my @rndkey=("%xmm14","%xmm15");
  108. sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
  109. { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
  110. my $arg = pop;
  111. $arg = "\$$arg" if ($arg*1 eq $arg);
  112. $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
  113. }
  114. my $_rol=sub { &rol(@_) };
  115. my $_ror=sub { &ror(@_) };
  116. $code.=<<___;
  117. .type aesni_cbc_sha1_enc_ssse3,\@function,6
  118. .align 16
  119. aesni_cbc_sha1_enc_ssse3:
  120. mov `($win64?56:8)`(%rsp),$inp # load 7th argument
  121. #shr \$6,$len # debugging artefact
  122. #jz .Lepilogue_ssse3 # debugging artefact
  123. push %rbx
  124. push %rbp
  125. push %r12
  126. push %r13
  127. push %r14
  128. push %r15
  129. lea `-104-($win64?10*16:0)`(%rsp),%rsp
  130. #mov $in0,$inp # debugging artefact
  131. #lea 64(%rsp),$ctx # debugging artefact
  132. ___
  133. $code.=<<___ if ($win64);
  134. movaps %xmm6,96+0(%rsp)
  135. movaps %xmm7,96+16(%rsp)
  136. movaps %xmm8,96+32(%rsp)
  137. movaps %xmm9,96+48(%rsp)
  138. movaps %xmm10,96+64(%rsp)
  139. movaps %xmm11,96+80(%rsp)
  140. movaps %xmm12,96+96(%rsp)
  141. movaps %xmm13,96+112(%rsp)
  142. movaps %xmm14,96+128(%rsp)
  143. movaps %xmm15,96+144(%rsp)
  144. .Lprologue_ssse3:
  145. ___
  146. $code.=<<___;
  147. mov $in0,%r12 # reassign arguments
  148. mov $out,%r13
  149. mov $len,%r14
  150. mov $key,%r15
  151. movdqu ($ivp),$iv # load IV
  152. mov $ivp,88(%rsp) # save $ivp
  153. ___
  154. my ($in0,$out,$len,$key)=map("%r$_",(12..15)); # reassign arguments
  155. my $rounds="${ivp}d";
  156. $code.=<<___;
  157. shl \$6,$len
  158. sub $in0,$out
  159. mov 240($key),$rounds
  160. add $inp,$len # end of input
  161. lea K_XX_XX(%rip),$K_XX_XX
  162. mov 0($ctx),$A # load context
  163. mov 4($ctx),$B
  164. mov 8($ctx),$C
  165. mov 12($ctx),$D
  166. mov $B,@T[0] # magic seed
  167. mov 16($ctx),$E
  168. movdqa 64($K_XX_XX),@X[2] # pbswap mask
  169. movdqa 0($K_XX_XX),@Tx[1] # K_00_19
  170. movdqu 0($inp),@X[-4&7] # load input to %xmm[0-3]
  171. movdqu 16($inp),@X[-3&7]
  172. movdqu 32($inp),@X[-2&7]
  173. movdqu 48($inp),@X[-1&7]
  174. pshufb @X[2],@X[-4&7] # byte swap
  175. add \$64,$inp
  176. pshufb @X[2],@X[-3&7]
  177. pshufb @X[2],@X[-2&7]
  178. pshufb @X[2],@X[-1&7]
  179. paddd @Tx[1],@X[-4&7] # add K_00_19
  180. paddd @Tx[1],@X[-3&7]
  181. paddd @Tx[1],@X[-2&7]
  182. movdqa @X[-4&7],0(%rsp) # X[]+K xfer to IALU
  183. psubd @Tx[1],@X[-4&7] # restore X[]
  184. movdqa @X[-3&7],16(%rsp)
  185. psubd @Tx[1],@X[-3&7]
  186. movdqa @X[-2&7],32(%rsp)
  187. psubd @Tx[1],@X[-2&7]
  188. movups ($key),$rndkey0 # $key[0]
  189. movups 16($key),$rndkey[0] # forward reference
  190. jmp .Loop_ssse3
  191. ___
  192. my $aesenc=sub {
  193. use integer;
  194. my ($n,$k)=($r/10,$r%10);
  195. if ($k==0) {
  196. $code.=<<___;
  197. movups `16*$n`($in0),$in # load input
  198. xorps $rndkey0,$in
  199. ___
  200. $code.=<<___ if ($n);
  201. movups $iv,`16*($n-1)`($out,$in0) # write output
  202. ___
  203. $code.=<<___;
  204. xorps $in,$iv
  205. aesenc $rndkey[0],$iv
  206. movups `32+16*$k`($key),$rndkey[1]
  207. ___
  208. } elsif ($k==9) {
  209. $sn++;
  210. $code.=<<___;
  211. cmp \$11,$rounds
  212. jb .Laesenclast$sn
  213. movups `32+16*($k+0)`($key),$rndkey[1]
  214. aesenc $rndkey[0],$iv
  215. movups `32+16*($k+1)`($key),$rndkey[0]
  216. aesenc $rndkey[1],$iv
  217. je .Laesenclast$sn
  218. movups `32+16*($k+2)`($key),$rndkey[1]
  219. aesenc $rndkey[0],$iv
  220. movups `32+16*($k+3)`($key),$rndkey[0]
  221. aesenc $rndkey[1],$iv
  222. .Laesenclast$sn:
  223. aesenclast $rndkey[0],$iv
  224. movups 16($key),$rndkey[1] # forward reference
  225. ___
  226. } else {
  227. $code.=<<___;
  228. aesenc $rndkey[0],$iv
  229. movups `32+16*$k`($key),$rndkey[1]
  230. ___
  231. }
  232. $r++; unshift(@rndkey,pop(@rndkey));
  233. };
  234. sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4
  235. { use integer;
  236. my $body = shift;
  237. my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
  238. my ($a,$b,$c,$d,$e);
  239. &movdqa (@X[0],@X[-3&7]);
  240. eval(shift(@insns));
  241. eval(shift(@insns));
  242. &movdqa (@Tx[0],@X[-1&7]);
  243. &palignr(@X[0],@X[-4&7],8); # compose "X[-14]" in "X[0]"
  244. eval(shift(@insns));
  245. eval(shift(@insns));
  246. &paddd (@Tx[1],@X[-1&7]);
  247. eval(shift(@insns));
  248. eval(shift(@insns));
  249. &psrldq (@Tx[0],4); # "X[-3]", 3 dwords
  250. eval(shift(@insns));
  251. eval(shift(@insns));
  252. &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
  253. eval(shift(@insns));
  254. eval(shift(@insns));
  255. &pxor (@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
  256. eval(shift(@insns));
  257. eval(shift(@insns));
  258. eval(shift(@insns));
  259. eval(shift(@insns));
  260. &pxor (@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
  261. eval(shift(@insns));
  262. eval(shift(@insns));
  263. &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
  264. eval(shift(@insns));
  265. eval(shift(@insns));
  266. &movdqa (@Tx[2],@X[0]);
  267. &movdqa (@Tx[0],@X[0]);
  268. eval(shift(@insns));
  269. eval(shift(@insns));
  270. eval(shift(@insns));
  271. eval(shift(@insns));
  272. &pslldq (@Tx[2],12); # "X[0]"<<96, extract one dword
  273. &paddd (@X[0],@X[0]);
  274. eval(shift(@insns));
  275. eval(shift(@insns));
  276. eval(shift(@insns));
  277. eval(shift(@insns));
  278. &psrld (@Tx[0],31);
  279. eval(shift(@insns));
  280. eval(shift(@insns));
  281. &movdqa (@Tx[1],@Tx[2]);
  282. eval(shift(@insns));
  283. eval(shift(@insns));
  284. &psrld (@Tx[2],30);
  285. &por (@X[0],@Tx[0]); # "X[0]"<<<=1
  286. eval(shift(@insns));
  287. eval(shift(@insns));
  288. eval(shift(@insns));
  289. eval(shift(@insns));
  290. &pslld (@Tx[1],2);
  291. &pxor (@X[0],@Tx[2]);
  292. eval(shift(@insns));
  293. eval(shift(@insns));
  294. &movdqa (@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)"); # K_XX_XX
  295. eval(shift(@insns));
  296. eval(shift(@insns));
  297. &pxor (@X[0],@Tx[1]); # "X[0]"^=("X[0]">>96)<<<2
  298. foreach (@insns) { eval; } # remaining instructions [if any]
  299. $Xi++; push(@X,shift(@X)); # "rotate" X[]
  300. push(@Tx,shift(@Tx));
  301. }
  302. sub Xupdate_ssse3_32_79()
  303. { use integer;
  304. my $body = shift;
  305. my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions
  306. my ($a,$b,$c,$d,$e);
  307. &movdqa (@Tx[0],@X[-1&7]) if ($Xi==8);
  308. eval(shift(@insns)); # body_20_39
  309. &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
  310. &palignr(@Tx[0],@X[-2&7],8); # compose "X[-6]"
  311. eval(shift(@insns));
  312. eval(shift(@insns));
  313. eval(shift(@insns)); # rol
  314. &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
  315. eval(shift(@insns));
  316. eval(shift(@insns)) if (@insns[0] !~ /&ro[rl]/);
  317. if ($Xi%5) {
  318. &movdqa (@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
  319. } else { # ... or load next one
  320. &movdqa (@Tx[2],eval(16*($Xi/5))."($K_XX_XX)");
  321. }
  322. &paddd (@Tx[1],@X[-1&7]);
  323. eval(shift(@insns)); # ror
  324. eval(shift(@insns));
  325. &pxor (@X[0],@Tx[0]); # "X[0]"^="X[-6]"
  326. eval(shift(@insns)); # body_20_39
  327. eval(shift(@insns));
  328. eval(shift(@insns));
  329. eval(shift(@insns)); # rol
  330. &movdqa (@Tx[0],@X[0]);
  331. &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
  332. eval(shift(@insns));
  333. eval(shift(@insns));
  334. eval(shift(@insns)); # ror
  335. eval(shift(@insns));
  336. &pslld (@X[0],2);
  337. eval(shift(@insns)); # body_20_39
  338. eval(shift(@insns));
  339. &psrld (@Tx[0],30);
  340. eval(shift(@insns));
  341. eval(shift(@insns)); # rol
  342. eval(shift(@insns));
  343. eval(shift(@insns));
  344. eval(shift(@insns)); # ror
  345. eval(shift(@insns));
  346. &por (@X[0],@Tx[0]); # "X[0]"<<<=2
  347. eval(shift(@insns)); # body_20_39
  348. eval(shift(@insns));
  349. &movdqa (@Tx[1],@X[0]) if ($Xi<19);
  350. eval(shift(@insns));
  351. eval(shift(@insns)); # rol
  352. eval(shift(@insns));
  353. eval(shift(@insns));
  354. eval(shift(@insns)); # rol
  355. eval(shift(@insns));
  356. foreach (@insns) { eval; } # remaining instructions
  357. $Xi++; push(@X,shift(@X)); # "rotate" X[]
  358. push(@Tx,shift(@Tx));
  359. }
  360. sub Xuplast_ssse3_80()
  361. { use integer;
  362. my $body = shift;
  363. my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
  364. my ($a,$b,$c,$d,$e);
  365. eval(shift(@insns));
  366. &paddd (@Tx[1],@X[-1&7]);
  367. eval(shift(@insns));
  368. eval(shift(@insns));
  369. eval(shift(@insns));
  370. eval(shift(@insns));
  371. &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU
  372. foreach (@insns) { eval; } # remaining instructions
  373. &cmp ($inp,$len);
  374. &je (".Ldone_ssse3");
  375. unshift(@Tx,pop(@Tx));
  376. &movdqa (@X[2],"64($K_XX_XX)"); # pbswap mask
  377. &movdqa (@Tx[1],"0($K_XX_XX)"); # K_00_19
  378. &movdqu (@X[-4&7],"0($inp)"); # load input
  379. &movdqu (@X[-3&7],"16($inp)");
  380. &movdqu (@X[-2&7],"32($inp)");
  381. &movdqu (@X[-1&7],"48($inp)");
  382. &pshufb (@X[-4&7],@X[2]); # byte swap
  383. &add ($inp,64);
  384. $Xi=0;
  385. }
  386. sub Xloop_ssse3()
  387. { use integer;
  388. my $body = shift;
  389. my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
  390. my ($a,$b,$c,$d,$e);
  391. eval(shift(@insns));
  392. eval(shift(@insns));
  393. &pshufb (@X[($Xi-3)&7],@X[2]);
  394. eval(shift(@insns));
  395. eval(shift(@insns));
  396. &paddd (@X[($Xi-4)&7],@Tx[1]);
  397. eval(shift(@insns));
  398. eval(shift(@insns));
  399. eval(shift(@insns));
  400. eval(shift(@insns));
  401. &movdqa (eval(16*$Xi)."(%rsp)",@X[($Xi-4)&7]); # X[]+K xfer to IALU
  402. eval(shift(@insns));
  403. eval(shift(@insns));
  404. &psubd (@X[($Xi-4)&7],@Tx[1]);
  405. foreach (@insns) { eval; }
  406. $Xi++;
  407. }
  408. sub Xtail_ssse3()
  409. { use integer;
  410. my $body = shift;
  411. my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
  412. my ($a,$b,$c,$d,$e);
  413. foreach (@insns) { eval; }
  414. }
  415. sub body_00_19 () {
  416. use integer;
  417. my ($k,$n);
  418. my @r=(
  419. '($a,$b,$c,$d,$e)=@V;'.
  420. '&add ($e,eval(4*($j&15))."(%rsp)");', # X[]+K xfer
  421. '&xor ($c,$d);',
  422. '&mov (@T[1],$a);', # $b in next round
  423. '&$_rol ($a,5);',
  424. '&and (@T[0],$c);', # ($b&($c^$d))
  425. '&xor ($c,$d);', # restore $c
  426. '&xor (@T[0],$d);',
  427. '&add ($e,$a);',
  428. '&$_ror ($b,$j?7:2);', # $b>>>2
  429. '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
  430. );
  431. $n = scalar(@r);
  432. $k = (($jj+1)*12/20)*20*$n/12; # 12 aesencs per these 20 rounds
  433. @r[$k%$n].='&$aesenc();' if ($jj==$k/$n);
  434. $jj++;
  435. return @r;
  436. }
  437. sub body_20_39 () {
  438. use integer;
  439. my ($k,$n);
  440. my @r=(
  441. '($a,$b,$c,$d,$e)=@V;'.
  442. '&add ($e,eval(4*($j++&15))."(%rsp)");', # X[]+K xfer
  443. '&xor (@T[0],$d);', # ($b^$d)
  444. '&mov (@T[1],$a);', # $b in next round
  445. '&$_rol ($a,5);',
  446. '&xor (@T[0],$c);', # ($b^$d^$c)
  447. '&add ($e,$a);',
  448. '&$_ror ($b,7);', # $b>>>2
  449. '&add ($e,@T[0]);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
  450. );
  451. $n = scalar(@r);
  452. $k = (($jj+1)*8/20)*20*$n/8; # 8 aesencs per these 20 rounds
  453. @r[$k%$n].='&$aesenc();' if ($jj==$k/$n);
  454. $jj++;
  455. return @r;
  456. }
  457. sub body_40_59 () {
  458. use integer;
  459. my ($k,$n);
  460. my @r=(
  461. '($a,$b,$c,$d,$e)=@V;'.
  462. '&mov (@T[1],$c);',
  463. '&xor ($c,$d);',
  464. '&add ($e,eval(4*($j++&15))."(%rsp)");', # X[]+K xfer
  465. '&and (@T[1],$d);',
  466. '&and (@T[0],$c);', # ($b&($c^$d))
  467. '&$_ror ($b,7);', # $b>>>2
  468. '&add ($e,@T[1]);',
  469. '&mov (@T[1],$a);', # $b in next round
  470. '&$_rol ($a,5);',
  471. '&add ($e,@T[0]);',
  472. '&xor ($c,$d);', # restore $c
  473. '&add ($e,$a);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
  474. );
  475. $n = scalar(@r);
  476. $k=(($jj+1)*12/20)*20*$n/12; # 12 aesencs per these 20 rounds
  477. @r[$k%$n].='&$aesenc();' if ($jj==$k/$n);
  478. $jj++;
  479. return @r;
  480. }
  481. $code.=<<___;
  482. .align 16
  483. .Loop_ssse3:
  484. ___
  485. &Xupdate_ssse3_16_31(\&body_00_19);
  486. &Xupdate_ssse3_16_31(\&body_00_19);
  487. &Xupdate_ssse3_16_31(\&body_00_19);
  488. &Xupdate_ssse3_16_31(\&body_00_19);
  489. &Xupdate_ssse3_32_79(\&body_00_19);
  490. &Xupdate_ssse3_32_79(\&body_20_39);
  491. &Xupdate_ssse3_32_79(\&body_20_39);
  492. &Xupdate_ssse3_32_79(\&body_20_39);
  493. &Xupdate_ssse3_32_79(\&body_20_39);
  494. &Xupdate_ssse3_32_79(\&body_20_39);
  495. &Xupdate_ssse3_32_79(\&body_40_59);
  496. &Xupdate_ssse3_32_79(\&body_40_59);
  497. &Xupdate_ssse3_32_79(\&body_40_59);
  498. &Xupdate_ssse3_32_79(\&body_40_59);
  499. &Xupdate_ssse3_32_79(\&body_40_59);
  500. &Xupdate_ssse3_32_79(\&body_20_39);
  501. &Xuplast_ssse3_80(\&body_20_39); # can jump to "done"
  502. $saved_j=$j; @saved_V=@V;
  503. $saved_r=$r; @saved_rndkey=@rndkey;
  504. &Xloop_ssse3(\&body_20_39);
  505. &Xloop_ssse3(\&body_20_39);
  506. &Xloop_ssse3(\&body_20_39);
  507. $code.=<<___;
  508. movups $iv,48($out,$in0) # write output
  509. lea 64($in0),$in0
  510. add 0($ctx),$A # update context
  511. add 4($ctx),@T[0]
  512. add 8($ctx),$C
  513. add 12($ctx),$D
  514. mov $A,0($ctx)
  515. add 16($ctx),$E
  516. mov @T[0],4($ctx)
  517. mov @T[0],$B # magic seed
  518. mov $C,8($ctx)
  519. mov $D,12($ctx)
  520. mov $E,16($ctx)
  521. jmp .Loop_ssse3
  522. .align 16
  523. .Ldone_ssse3:
  524. ___
  525. $jj=$j=$saved_j; @V=@saved_V;
  526. $r=$saved_r; @rndkey=@saved_rndkey;
  527. &Xtail_ssse3(\&body_20_39);
  528. &Xtail_ssse3(\&body_20_39);
  529. &Xtail_ssse3(\&body_20_39);
  530. $code.=<<___;
  531. movups $iv,48($out,$in0) # write output
  532. mov 88(%rsp),$ivp # restore $ivp
  533. add 0($ctx),$A # update context
  534. add 4($ctx),@T[0]
  535. add 8($ctx),$C
  536. mov $A,0($ctx)
  537. add 12($ctx),$D
  538. mov @T[0],4($ctx)
  539. add 16($ctx),$E
  540. mov $C,8($ctx)
  541. mov $D,12($ctx)
  542. mov $E,16($ctx)
  543. movups $iv,($ivp) # write IV
  544. ___
  545. $code.=<<___ if ($win64);
  546. movaps 96+0(%rsp),%xmm6
  547. movaps 96+16(%rsp),%xmm7
  548. movaps 96+32(%rsp),%xmm8
  549. movaps 96+48(%rsp),%xmm9
  550. movaps 96+64(%rsp),%xmm10
  551. movaps 96+80(%rsp),%xmm11
  552. movaps 96+96(%rsp),%xmm12
  553. movaps 96+112(%rsp),%xmm13
  554. movaps 96+128(%rsp),%xmm14
  555. movaps 96+144(%rsp),%xmm15
  556. ___
  557. $code.=<<___;
  558. lea `104+($win64?10*16:0)`(%rsp),%rsi
  559. mov 0(%rsi),%r15
  560. mov 8(%rsi),%r14
  561. mov 16(%rsi),%r13
  562. mov 24(%rsi),%r12
  563. mov 32(%rsi),%rbp
  564. mov 40(%rsi),%rbx
  565. lea 48(%rsi),%rsp
  566. .Lepilogue_ssse3:
  567. ret
  568. .size aesni_cbc_sha1_enc_ssse3,.-aesni_cbc_sha1_enc_ssse3
  569. ___
  570. $j=$jj=$r=$sn=0;
  571. if ($avx) {
  572. my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
  573. my $Xi=4;
  574. my @X=map("%xmm$_",(4..7,0..3));
  575. my @Tx=map("%xmm$_",(8..10));
  576. my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp"); # size optimization
  577. my @T=("%esi","%edi");
  578. my $_rol=sub { &shld(@_[0],@_) };
  579. my $_ror=sub { &shrd(@_[0],@_) };
  580. $code.=<<___;
  581. .type aesni_cbc_sha1_enc_avx,\@function,6
  582. .align 16
  583. aesni_cbc_sha1_enc_avx:
  584. mov `($win64?56:8)`(%rsp),$inp # load 7th argument
  585. #shr \$6,$len # debugging artefact
  586. #jz .Lepilogue_avx # debugging artefact
  587. push %rbx
  588. push %rbp
  589. push %r12
  590. push %r13
  591. push %r14
  592. push %r15
  593. lea `-104-($win64?10*16:0)`(%rsp),%rsp
  594. #mov $in0,$inp # debugging artefact
  595. #lea 64(%rsp),$ctx # debugging artefact
  596. ___
  597. $code.=<<___ if ($win64);
  598. movaps %xmm6,96+0(%rsp)
  599. movaps %xmm7,96+16(%rsp)
  600. movaps %xmm8,96+32(%rsp)
  601. movaps %xmm9,96+48(%rsp)
  602. movaps %xmm10,96+64(%rsp)
  603. movaps %xmm11,96+80(%rsp)
  604. movaps %xmm12,96+96(%rsp)
  605. movaps %xmm13,96+112(%rsp)
  606. movaps %xmm14,96+128(%rsp)
  607. movaps %xmm15,96+144(%rsp)
  608. .Lprologue_avx:
  609. ___
  610. $code.=<<___;
  611. vzeroall
  612. mov $in0,%r12 # reassign arguments
  613. mov $out,%r13
  614. mov $len,%r14
  615. mov $key,%r15
  616. vmovdqu ($ivp),$iv # load IV
  617. mov $ivp,88(%rsp) # save $ivp
  618. ___
  619. my ($in0,$out,$len,$key)=map("%r$_",(12..15)); # reassign arguments
  620. my $rounds="${ivp}d";
  621. $code.=<<___;
  622. shl \$6,$len
  623. sub $in0,$out
  624. mov 240($key),$rounds
  625. add \$112,$key # size optimization
  626. add $inp,$len # end of input
  627. lea K_XX_XX(%rip),$K_XX_XX
  628. mov 0($ctx),$A # load context
  629. mov 4($ctx),$B
  630. mov 8($ctx),$C
  631. mov 12($ctx),$D
  632. mov $B,@T[0] # magic seed
  633. mov 16($ctx),$E
  634. vmovdqa 64($K_XX_XX),@X[2] # pbswap mask
  635. vmovdqa 0($K_XX_XX),@Tx[1] # K_00_19
  636. vmovdqu 0($inp),@X[-4&7] # load input to %xmm[0-3]
  637. vmovdqu 16($inp),@X[-3&7]
  638. vmovdqu 32($inp),@X[-2&7]
  639. vmovdqu 48($inp),@X[-1&7]
  640. vpshufb @X[2],@X[-4&7],@X[-4&7] # byte swap
  641. add \$64,$inp
  642. vpshufb @X[2],@X[-3&7],@X[-3&7]
  643. vpshufb @X[2],@X[-2&7],@X[-2&7]
  644. vpshufb @X[2],@X[-1&7],@X[-1&7]
  645. vpaddd @Tx[1],@X[-4&7],@X[0] # add K_00_19
  646. vpaddd @Tx[1],@X[-3&7],@X[1]
  647. vpaddd @Tx[1],@X[-2&7],@X[2]
  648. vmovdqa @X[0],0(%rsp) # X[]+K xfer to IALU
  649. vmovdqa @X[1],16(%rsp)
  650. vmovdqa @X[2],32(%rsp)
  651. vmovups -112($key),$rndkey0 # $key[0]
  652. vmovups 16-112($key),$rndkey[0] # forward reference
  653. jmp .Loop_avx
  654. ___
  655. my $aesenc=sub {
  656. use integer;
  657. my ($n,$k)=($r/10,$r%10);
  658. if ($k==0) {
  659. $code.=<<___;
  660. vmovups `16*$n`($in0),$in # load input
  661. vxorps $rndkey0,$in,$in
  662. ___
  663. $code.=<<___ if ($n);
  664. vmovups $iv,`16*($n-1)`($out,$in0) # write output
  665. ___
  666. $code.=<<___;
  667. vxorps $in,$iv,$iv
  668. vaesenc $rndkey[0],$iv,$iv
  669. vmovups `32+16*$k-112`($key),$rndkey[1]
  670. ___
  671. } elsif ($k==9) {
  672. $sn++;
  673. $code.=<<___;
  674. cmp \$11,$rounds
  675. jb .Lvaesenclast$sn
  676. vaesenc $rndkey[0],$iv,$iv
  677. vmovups `32+16*($k+0)-112`($key),$rndkey[1]
  678. vaesenc $rndkey[1],$iv,$iv
  679. vmovups `32+16*($k+1)-112`($key),$rndkey[0]
  680. je .Lvaesenclast$sn
  681. vaesenc $rndkey[0],$iv,$iv
  682. vmovups `32+16*($k+2)-112`($key),$rndkey[1]
  683. vaesenc $rndkey[1],$iv,$iv
  684. vmovups `32+16*($k+3)-112`($key),$rndkey[0]
  685. .Lvaesenclast$sn:
  686. vaesenclast $rndkey[0],$iv,$iv
  687. vmovups 16-112($key),$rndkey[1] # forward reference
  688. ___
  689. } else {
  690. $code.=<<___;
  691. vaesenc $rndkey[0],$iv,$iv
  692. vmovups `32+16*$k-112`($key),$rndkey[1]
  693. ___
  694. }
  695. $r++; unshift(@rndkey,pop(@rndkey));
  696. };
  697. sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4
  698. { use integer;
  699. my $body = shift;
  700. my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
  701. my ($a,$b,$c,$d,$e);
  702. eval(shift(@insns));
  703. eval(shift(@insns));
  704. &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]"
  705. eval(shift(@insns));
  706. eval(shift(@insns));
  707. &vpaddd (@Tx[1],@Tx[1],@X[-1&7]);
  708. eval(shift(@insns));
  709. eval(shift(@insns));
  710. &vpsrldq(@Tx[0],@X[-1&7],4); # "X[-3]", 3 dwords
  711. eval(shift(@insns));
  712. eval(shift(@insns));
  713. &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
  714. eval(shift(@insns));
  715. eval(shift(@insns));
  716. &vpxor (@Tx[0],@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
  717. eval(shift(@insns));
  718. eval(shift(@insns));
  719. eval(shift(@insns));
  720. eval(shift(@insns));
  721. &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
  722. eval(shift(@insns));
  723. eval(shift(@insns));
  724. &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
  725. eval(shift(@insns));
  726. eval(shift(@insns));
  727. &vpsrld (@Tx[0],@X[0],31);
  728. eval(shift(@insns));
  729. eval(shift(@insns));
  730. eval(shift(@insns));
  731. eval(shift(@insns));
  732. &vpslldq(@Tx[2],@X[0],12); # "X[0]"<<96, extract one dword
  733. &vpaddd (@X[0],@X[0],@X[0]);
  734. eval(shift(@insns));
  735. eval(shift(@insns));
  736. eval(shift(@insns));
  737. eval(shift(@insns));
  738. &vpsrld (@Tx[1],@Tx[2],30);
  739. &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=1
  740. eval(shift(@insns));
  741. eval(shift(@insns));
  742. eval(shift(@insns));
  743. eval(shift(@insns));
  744. &vpslld (@Tx[2],@Tx[2],2);
  745. &vpxor (@X[0],@X[0],@Tx[1]);
  746. eval(shift(@insns));
  747. eval(shift(@insns));
  748. eval(shift(@insns));
  749. eval(shift(@insns));
  750. &vpxor (@X[0],@X[0],@Tx[2]); # "X[0]"^=("X[0]">>96)<<<2
  751. eval(shift(@insns));
  752. eval(shift(@insns));
  753. &vmovdqa (@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)"); # K_XX_XX
  754. eval(shift(@insns));
  755. eval(shift(@insns));
  756. foreach (@insns) { eval; } # remaining instructions [if any]
  757. $Xi++; push(@X,shift(@X)); # "rotate" X[]
  758. push(@Tx,shift(@Tx));
  759. }
  760. sub Xupdate_avx_32_79()
  761. { use integer;
  762. my $body = shift;
  763. my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions
  764. my ($a,$b,$c,$d,$e);
  765. &vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8); # compose "X[-6]"
  766. &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
  767. eval(shift(@insns)); # body_20_39
  768. eval(shift(@insns));
  769. eval(shift(@insns));
  770. eval(shift(@insns)); # rol
  771. &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
  772. eval(shift(@insns));
  773. eval(shift(@insns)) if (@insns[0] !~ /&ro[rl]/);
  774. if ($Xi%5) {
  775. &vmovdqa (@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
  776. } else { # ... or load next one
  777. &vmovdqa (@Tx[2],eval(16*($Xi/5))."($K_XX_XX)");
  778. }
  779. &vpaddd (@Tx[1],@Tx[1],@X[-1&7]);
  780. eval(shift(@insns)); # ror
  781. eval(shift(@insns));
  782. &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-6]"
  783. eval(shift(@insns)); # body_20_39
  784. eval(shift(@insns));
  785. eval(shift(@insns));
  786. eval(shift(@insns)); # rol
  787. &vpsrld (@Tx[0],@X[0],30);
  788. &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
  789. eval(shift(@insns));
  790. eval(shift(@insns));
  791. eval(shift(@insns)); # ror
  792. eval(shift(@insns));
  793. &vpslld (@X[0],@X[0],2);
  794. eval(shift(@insns)); # body_20_39
  795. eval(shift(@insns));
  796. eval(shift(@insns));
  797. eval(shift(@insns)); # rol
  798. eval(shift(@insns));
  799. eval(shift(@insns));
  800. eval(shift(@insns)); # ror
  801. eval(shift(@insns));
  802. &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=2
  803. eval(shift(@insns)); # body_20_39
  804. eval(shift(@insns));
  805. &vmovdqa (@Tx[1],@X[0]) if ($Xi<19);
  806. eval(shift(@insns));
  807. eval(shift(@insns)); # rol
  808. eval(shift(@insns));
  809. eval(shift(@insns));
  810. eval(shift(@insns)); # rol
  811. eval(shift(@insns));
  812. foreach (@insns) { eval; } # remaining instructions
  813. $Xi++; push(@X,shift(@X)); # "rotate" X[]
  814. push(@Tx,shift(@Tx));
  815. }
  816. sub Xuplast_avx_80()
  817. { use integer;
  818. my $body = shift;
  819. my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
  820. my ($a,$b,$c,$d,$e);
  821. eval(shift(@insns));
  822. &vpaddd (@Tx[1],@Tx[1],@X[-1&7]);
  823. eval(shift(@insns));
  824. eval(shift(@insns));
  825. eval(shift(@insns));
  826. eval(shift(@insns));
  827. &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU
  828. foreach (@insns) { eval; } # remaining instructions
  829. &cmp ($inp,$len);
  830. &je (".Ldone_avx");
  831. unshift(@Tx,pop(@Tx));
  832. &vmovdqa(@X[2],"64($K_XX_XX)"); # pbswap mask
  833. &vmovdqa(@Tx[1],"0($K_XX_XX)"); # K_00_19
  834. &vmovdqu(@X[-4&7],"0($inp)"); # load input
  835. &vmovdqu(@X[-3&7],"16($inp)");
  836. &vmovdqu(@X[-2&7],"32($inp)");
  837. &vmovdqu(@X[-1&7],"48($inp)");
  838. &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap
  839. &add ($inp,64);
  840. $Xi=0;
  841. }
  842. sub Xloop_avx()
  843. { use integer;
  844. my $body = shift;
  845. my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
  846. my ($a,$b,$c,$d,$e);
  847. eval(shift(@insns));
  848. eval(shift(@insns));
  849. &vpshufb(@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
  850. eval(shift(@insns));
  851. eval(shift(@insns));
  852. &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@Tx[1]);
  853. eval(shift(@insns));
  854. eval(shift(@insns));
  855. eval(shift(@insns));
  856. eval(shift(@insns));
  857. &vmovdqa(eval(16*$Xi)."(%rsp)",@X[$Xi&7]); # X[]+K xfer to IALU
  858. eval(shift(@insns));
  859. eval(shift(@insns));
  860. foreach (@insns) { eval; }
  861. $Xi++;
  862. }
  863. sub Xtail_avx()
  864. { use integer;
  865. my $body = shift;
  866. my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
  867. my ($a,$b,$c,$d,$e);
  868. foreach (@insns) { eval; }
  869. }
  870. $code.=<<___;
  871. .align 16
  872. .Loop_avx:
  873. ___
  874. &Xupdate_avx_16_31(\&body_00_19);
  875. &Xupdate_avx_16_31(\&body_00_19);
  876. &Xupdate_avx_16_31(\&body_00_19);
  877. &Xupdate_avx_16_31(\&body_00_19);
  878. &Xupdate_avx_32_79(\&body_00_19);
  879. &Xupdate_avx_32_79(\&body_20_39);
  880. &Xupdate_avx_32_79(\&body_20_39);
  881. &Xupdate_avx_32_79(\&body_20_39);
  882. &Xupdate_avx_32_79(\&body_20_39);
  883. &Xupdate_avx_32_79(\&body_20_39);
  884. &Xupdate_avx_32_79(\&body_40_59);
  885. &Xupdate_avx_32_79(\&body_40_59);
  886. &Xupdate_avx_32_79(\&body_40_59);
  887. &Xupdate_avx_32_79(\&body_40_59);
  888. &Xupdate_avx_32_79(\&body_40_59);
  889. &Xupdate_avx_32_79(\&body_20_39);
  890. &Xuplast_avx_80(\&body_20_39); # can jump to "done"
  891. $saved_j=$j; @saved_V=@V;
  892. $saved_r=$r; @saved_rndkey=@rndkey;
  893. &Xloop_avx(\&body_20_39);
  894. &Xloop_avx(\&body_20_39);
  895. &Xloop_avx(\&body_20_39);
  896. $code.=<<___;
  897. vmovups $iv,48($out,$in0) # write output
  898. lea 64($in0),$in0
  899. add 0($ctx),$A # update context
  900. add 4($ctx),@T[0]
  901. add 8($ctx),$C
  902. add 12($ctx),$D
  903. mov $A,0($ctx)
  904. add 16($ctx),$E
  905. mov @T[0],4($ctx)
  906. mov @T[0],$B # magic seed
  907. mov $C,8($ctx)
  908. mov $D,12($ctx)
  909. mov $E,16($ctx)
  910. jmp .Loop_avx
  911. .align 16
  912. .Ldone_avx:
  913. ___
  914. $jj=$j=$saved_j; @V=@saved_V;
  915. $r=$saved_r; @rndkey=@saved_rndkey;
  916. &Xtail_avx(\&body_20_39);
  917. &Xtail_avx(\&body_20_39);
  918. &Xtail_avx(\&body_20_39);
  919. $code.=<<___;
  920. vmovups $iv,48($out,$in0) # write output
  921. mov 88(%rsp),$ivp # restore $ivp
  922. add 0($ctx),$A # update context
  923. add 4($ctx),@T[0]
  924. add 8($ctx),$C
  925. mov $A,0($ctx)
  926. add 12($ctx),$D
  927. mov @T[0],4($ctx)
  928. add 16($ctx),$E
  929. mov $C,8($ctx)
  930. mov $D,12($ctx)
  931. mov $E,16($ctx)
  932. vmovups $iv,($ivp) # write IV
  933. vzeroall
  934. ___
  935. $code.=<<___ if ($win64);
  936. movaps 96+0(%rsp),%xmm6
  937. movaps 96+16(%rsp),%xmm7
  938. movaps 96+32(%rsp),%xmm8
  939. movaps 96+48(%rsp),%xmm9
  940. movaps 96+64(%rsp),%xmm10
  941. movaps 96+80(%rsp),%xmm11
  942. movaps 96+96(%rsp),%xmm12
  943. movaps 96+112(%rsp),%xmm13
  944. movaps 96+128(%rsp),%xmm14
  945. movaps 96+144(%rsp),%xmm15
  946. ___
  947. $code.=<<___;
  948. lea `104+($win64?10*16:0)`(%rsp),%rsi
  949. mov 0(%rsi),%r15
  950. mov 8(%rsi),%r14
  951. mov 16(%rsi),%r13
  952. mov 24(%rsi),%r12
  953. mov 32(%rsi),%rbp
  954. mov 40(%rsi),%rbx
  955. lea 48(%rsi),%rsp
  956. .Lepilogue_avx:
  957. ret
  958. .size aesni_cbc_sha1_enc_avx,.-aesni_cbc_sha1_enc_avx
  959. ___
  960. }
  961. $code.=<<___;
  962. .align 64
  963. K_XX_XX:
  964. .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
  965. .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
  966. .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
  967. .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
  968. .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap mask
  969. .asciz "AESNI-CBC+SHA1 stitch for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  970. .align 64
  971. ___
  972. # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
  973. # CONTEXT *context,DISPATCHER_CONTEXT *disp)
  974. if ($win64) {
  975. $rec="%rcx";
  976. $frame="%rdx";
  977. $context="%r8";
  978. $disp="%r9";
  979. $code.=<<___;
  980. .extern __imp_RtlVirtualUnwind
  981. .type ssse3_handler,\@abi-omnipotent
  982. .align 16
  983. ssse3_handler:
  984. push %rsi
  985. push %rdi
  986. push %rbx
  987. push %rbp
  988. push %r12
  989. push %r13
  990. push %r14
  991. push %r15
  992. pushfq
  993. sub \$64,%rsp
  994. mov 120($context),%rax # pull context->Rax
  995. mov 248($context),%rbx # pull context->Rip
  996. mov 8($disp),%rsi # disp->ImageBase
  997. mov 56($disp),%r11 # disp->HandlerData
  998. mov 0(%r11),%r10d # HandlerData[0]
  999. lea (%rsi,%r10),%r10 # prologue label
  1000. cmp %r10,%rbx # context->Rip<prologue label
  1001. jb .Lcommon_seh_tail
  1002. mov 152($context),%rax # pull context->Rsp
  1003. mov 4(%r11),%r10d # HandlerData[1]
  1004. lea (%rsi,%r10),%r10 # epilogue label
  1005. cmp %r10,%rbx # context->Rip>=epilogue label
  1006. jae .Lcommon_seh_tail
  1007. lea 96(%rax),%rsi
  1008. lea 512($context),%rdi # &context.Xmm6
  1009. mov \$20,%ecx
  1010. .long 0xa548f3fc # cld; rep movsq
  1011. lea `104+10*16`(%rax),%rax # adjust stack pointer
  1012. mov 0(%rax),%r15
  1013. mov 8(%rax),%r14
  1014. mov 16(%rax),%r13
  1015. mov 24(%rax),%r12
  1016. mov 32(%rax),%rbp
  1017. mov 40(%rax),%rbx
  1018. lea 48(%rax),%rax
  1019. mov %rbx,144($context) # restore context->Rbx
  1020. mov %rbp,160($context) # restore context->Rbp
  1021. mov %r12,216($context) # restore context->R12
  1022. mov %r13,224($context) # restore context->R13
  1023. mov %r14,232($context) # restore context->R14
  1024. mov %r15,240($context) # restore context->R15
  1025. .Lcommon_seh_tail:
  1026. mov 8(%rax),%rdi
  1027. mov 16(%rax),%rsi
  1028. mov %rax,152($context) # restore context->Rsp
  1029. mov %rsi,168($context) # restore context->Rsi
  1030. mov %rdi,176($context) # restore context->Rdi
  1031. mov 40($disp),%rdi # disp->ContextRecord
  1032. mov $context,%rsi # context
  1033. mov \$154,%ecx # sizeof(CONTEXT)
  1034. .long 0xa548f3fc # cld; rep movsq
  1035. mov $disp,%rsi
  1036. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  1037. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  1038. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  1039. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  1040. mov 40(%rsi),%r10 # disp->ContextRecord
  1041. lea 56(%rsi),%r11 # &disp->HandlerData
  1042. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  1043. mov %r10,32(%rsp) # arg5
  1044. mov %r11,40(%rsp) # arg6
  1045. mov %r12,48(%rsp) # arg7
  1046. mov %rcx,56(%rsp) # arg8, (NULL)
  1047. call *__imp_RtlVirtualUnwind(%rip)
  1048. mov \$1,%eax # ExceptionContinueSearch
  1049. add \$64,%rsp
  1050. popfq
  1051. pop %r15
  1052. pop %r14
  1053. pop %r13
  1054. pop %r12
  1055. pop %rbp
  1056. pop %rbx
  1057. pop %rdi
  1058. pop %rsi
  1059. ret
  1060. .size ssse3_handler,.-ssse3_handler
  1061. .section .pdata
  1062. .align 4
  1063. .rva .LSEH_begin_aesni_cbc_sha1_enc_ssse3
  1064. .rva .LSEH_end_aesni_cbc_sha1_enc_ssse3
  1065. .rva .LSEH_info_aesni_cbc_sha1_enc_ssse3
  1066. ___
  1067. $code.=<<___ if ($avx);
  1068. .rva .LSEH_begin_aesni_cbc_sha1_enc_avx
  1069. .rva .LSEH_end_aesni_cbc_sha1_enc_avx
  1070. .rva .LSEH_info_aesni_cbc_sha1_enc_avx
  1071. ___
  1072. $code.=<<___;
  1073. .section .xdata
  1074. .align 8
  1075. .LSEH_info_aesni_cbc_sha1_enc_ssse3:
  1076. .byte 9,0,0,0
  1077. .rva ssse3_handler
  1078. .rva .Lprologue_ssse3,.Lepilogue_ssse3 # HandlerData[]
  1079. ___
  1080. $code.=<<___ if ($avx);
  1081. .LSEH_info_aesni_cbc_sha1_enc_avx:
  1082. .byte 9,0,0,0
  1083. .rva ssse3_handler
  1084. .rva .Lprologue_avx,.Lepilogue_avx # HandlerData[]
  1085. ___
  1086. }
  1087. ####################################################################
  1088. sub rex {
  1089. local *opcode=shift;
  1090. my ($dst,$src)=@_;
  1091. my $rex=0;
  1092. $rex|=0x04 if($dst>=8);
  1093. $rex|=0x01 if($src>=8);
  1094. push @opcode,$rex|0x40 if($rex);
  1095. }
  1096. sub aesni {
  1097. my $line=shift;
  1098. my @opcode=(0x66);
  1099. if ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
  1100. my %opcodelet = (
  1101. "aesenc" => 0xdc, "aesenclast" => 0xdd
  1102. );
  1103. return undef if (!defined($opcodelet{$1}));
  1104. rex(\@opcode,$3,$2);
  1105. push @opcode,0x0f,0x38,$opcodelet{$1};
  1106. push @opcode,0xc0|($2&7)|(($3&7)<<3); # ModR/M
  1107. return ".byte\t".join(',',@opcode);
  1108. }
  1109. return $line;
  1110. }
  1111. $code =~ s/\`([^\`]*)\`/eval($1)/gem;
  1112. $code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
  1113. print $code;
  1114. close STDOUT;