sha1-x86_64.pl 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131
  1. #! /usr/bin/env perl
  2. # Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # sha1_block procedure for x86_64.
  17. #
  18. # It was brought to my attention that on EM64T compiler-generated code
  19. # was far behind 32-bit assembler implementation. This is unlike on
  20. # Opteron where compiler-generated code was only 15% behind 32-bit
  21. # assembler, which originally made it hard to motivate the effort.
  22. # There was suggestion to mechanically translate 32-bit code, but I
  23. # dismissed it, reasoning that x86_64 offers enough register bank
  24. # capacity to fully utilize SHA-1 parallelism. Therefore this fresh
  25. # implementation:-) However! While 64-bit code does perform better
  26. # on Opteron, I failed to beat 32-bit assembler on EM64T core. Well,
  27. # x86_64 does offer larger *addressable* bank, but out-of-order core
  28. # reaches for even more registers through dynamic aliasing, and EM64T
  29. # core must have managed to run-time optimize even 32-bit code just as
  30. # good as 64-bit one. Performance improvement is summarized in the
  31. # following table:
  32. #
  33. # gcc 3.4 32-bit asm cycles/byte
  34. # Opteron +45% +20% 6.8
  35. # Xeon P4 +65% +0% 9.9
  36. # Core2 +60% +10% 7.0
  37. # August 2009.
  38. #
  39. # The code was revised to minimize code size and to maximize
  40. # "distance" between instructions producing input to 'lea'
  41. # instruction and the 'lea' instruction itself, which is essential
  42. # for Intel Atom core.
  43. # October 2010.
  44. #
  45. # Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it
  46. # is to offload message schedule denoted by Wt in NIST specification,
  47. # or Xupdate in OpenSSL source, to SIMD unit. See sha1-586.pl module
  48. # for background and implementation details. The only difference from
  49. # 32-bit code is that 64-bit code doesn't have to spill @X[] elements
  50. # to free temporary registers.
  51. # April 2011.
  52. #
  53. # Add AVX code path. See sha1-586.pl for further information.
  54. # May 2013.
  55. #
  56. # Add AVX2+BMI code path. Initial attempt (utilizing BMI instructions
  57. # and loading pair of consecutive blocks to 256-bit %ymm registers)
  58. # did not provide impressive performance improvement till a crucial
  59. # hint regarding the number of Xupdate iterations to pre-compute in
  60. # advance was provided by Ilya Albrekht of Intel Corp.
  61. # March 2014.
  62. #
  63. # Add support for Intel SHA Extensions.
  64. ######################################################################
  65. # Current performance is summarized in following table. Numbers are
  66. # CPU clock cycles spent to process single byte (less is better).
  67. #
  68. # x86_64 SSSE3 AVX[2]
  69. # P4 9.05 -
  70. # Opteron 6.26 -
  71. # Core2 6.55 6.05/+8% -
  72. # Westmere 6.73 5.30/+27% -
  73. # Sandy Bridge 7.70 6.10/+26% 4.99/+54%
  74. # Ivy Bridge 6.06 4.67/+30% 4.60/+32%
  75. # Haswell 5.45 4.15/+31% 3.57/+53%
  76. # Skylake 5.18 4.06/+28% 3.54/+46%
  77. # Bulldozer 9.11 5.95/+53%
  78. # Ryzen 4.75 ? 1.93/+150%(**)
  79. # VIA Nano 9.32 7.15/+30%
  80. # Atom 10.3 9.17/+12%
  81. # Silvermont 13.1(*) 9.37/+40%
  82. # Goldmont 8.13 6.42/+27% 1.70/+380%(**)
  83. #
  84. # (*) obviously suboptimal result, nothing was done about it,
  85. # because SSSE3 code is compiled unconditionally;
  86. # (**) SHAEXT result
  87. $flavour = shift;
  88. $output = shift;
  89. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  90. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  91. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  92. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  93. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  94. die "can't locate x86_64-xlate.pl";
  95. if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  96. =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
  97. $avx = ($1>=2.19) + ($1>=2.22);
  98. }
  99. if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
  100. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
  101. $avx = ($1>=2.09) + ($1>=2.10);
  102. }
  103. if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
  104. `ml64 2>&1` =~ /Version ([0-9]+)\./) {
  105. $avx = ($1>=10) + ($1>=11);
  106. }
  107. if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([2-9]\.[0-9]+)/) {
  108. $avx = ($2>=3.0) + ($2>3.0);
  109. }
  110. $shaext=1; ### set to zero if compiling for 1.0.1
  111. $avx=1 if (!$shaext && $avx);
  112. open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
  113. *STDOUT=*OUT;
  114. $ctx="%rdi"; # 1st arg
  115. $inp="%rsi"; # 2nd arg
  116. $num="%rdx"; # 3rd arg
  117. # reassign arguments in order to produce more compact code
  118. $ctx="%r8";
  119. $inp="%r9";
  120. $num="%r10";
  121. $t0="%eax";
  122. $t1="%ebx";
  123. $t2="%ecx";
  124. @xi=("%edx","%ebp","%r14d");
  125. $A="%esi";
  126. $B="%edi";
  127. $C="%r11d";
  128. $D="%r12d";
  129. $E="%r13d";
  130. @V=($A,$B,$C,$D,$E);
  131. sub BODY_00_19 {
  132. my ($i,$a,$b,$c,$d,$e)=@_;
  133. my $j=$i+1;
  134. $code.=<<___ if ($i==0);
  135. mov `4*$i`($inp),$xi[0]
  136. bswap $xi[0]
  137. ___
  138. $code.=<<___ if ($i<15);
  139. mov `4*$j`($inp),$xi[1]
  140. mov $d,$t0
  141. mov $xi[0],`4*$i`(%rsp)
  142. mov $a,$t2
  143. bswap $xi[1]
  144. xor $c,$t0
  145. rol \$5,$t2
  146. and $b,$t0
  147. lea 0x5a827999($xi[0],$e),$e
  148. add $t2,$e
  149. xor $d,$t0
  150. rol \$30,$b
  151. add $t0,$e
  152. ___
  153. $code.=<<___ if ($i>=15);
  154. xor `4*($j%16)`(%rsp),$xi[1]
  155. mov $d,$t0
  156. mov $xi[0],`4*($i%16)`(%rsp)
  157. mov $a,$t2
  158. xor `4*(($j+2)%16)`(%rsp),$xi[1]
  159. xor $c,$t0
  160. rol \$5,$t2
  161. xor `4*(($j+8)%16)`(%rsp),$xi[1]
  162. and $b,$t0
  163. lea 0x5a827999($xi[0],$e),$e
  164. rol \$30,$b
  165. xor $d,$t0
  166. add $t2,$e
  167. rol \$1,$xi[1]
  168. add $t0,$e
  169. ___
  170. push(@xi,shift(@xi));
  171. }
  172. sub BODY_20_39 {
  173. my ($i,$a,$b,$c,$d,$e)=@_;
  174. my $j=$i+1;
  175. my $K=($i<40)?0x6ed9eba1:0xca62c1d6;
  176. $code.=<<___ if ($i<79);
  177. xor `4*($j%16)`(%rsp),$xi[1]
  178. mov $b,$t0
  179. `"mov $xi[0],".4*($i%16)."(%rsp)" if ($i<72)`
  180. mov $a,$t2
  181. xor `4*(($j+2)%16)`(%rsp),$xi[1]
  182. xor $d,$t0
  183. rol \$5,$t2
  184. xor `4*(($j+8)%16)`(%rsp),$xi[1]
  185. lea $K($xi[0],$e),$e
  186. xor $c,$t0
  187. add $t2,$e
  188. rol \$30,$b
  189. add $t0,$e
  190. rol \$1,$xi[1]
  191. ___
  192. $code.=<<___ if ($i==79);
  193. mov $b,$t0
  194. mov $a,$t2
  195. xor $d,$t0
  196. lea $K($xi[0],$e),$e
  197. rol \$5,$t2
  198. xor $c,$t0
  199. add $t2,$e
  200. rol \$30,$b
  201. add $t0,$e
  202. ___
  203. push(@xi,shift(@xi));
  204. }
  205. sub BODY_40_59 {
  206. my ($i,$a,$b,$c,$d,$e)=@_;
  207. my $j=$i+1;
  208. $code.=<<___;
  209. xor `4*($j%16)`(%rsp),$xi[1]
  210. mov $d,$t0
  211. mov $xi[0],`4*($i%16)`(%rsp)
  212. mov $d,$t1
  213. xor `4*(($j+2)%16)`(%rsp),$xi[1]
  214. and $c,$t0
  215. mov $a,$t2
  216. xor `4*(($j+8)%16)`(%rsp),$xi[1]
  217. lea 0x8f1bbcdc($xi[0],$e),$e
  218. xor $c,$t1
  219. rol \$5,$t2
  220. add $t0,$e
  221. rol \$1,$xi[1]
  222. and $b,$t1
  223. add $t2,$e
  224. rol \$30,$b
  225. add $t1,$e
  226. ___
  227. push(@xi,shift(@xi));
  228. }
  229. $code.=<<___;
  230. .text
  231. .extern OPENSSL_ia32cap_P
  232. .globl sha1_block_data_order
  233. .type sha1_block_data_order,\@function,3
  234. .align 16
  235. sha1_block_data_order:
  236. .cfi_startproc
  237. mov OPENSSL_ia32cap_P+0(%rip),%r9d
  238. mov OPENSSL_ia32cap_P+4(%rip),%r8d
  239. mov OPENSSL_ia32cap_P+8(%rip),%r10d
  240. test \$`1<<9`,%r8d # check SSSE3 bit
  241. jz .Lialu
  242. ___
  243. $code.=<<___ if ($shaext);
  244. test \$`1<<29`,%r10d # check SHA bit
  245. jnz _shaext_shortcut
  246. ___
  247. $code.=<<___ if ($avx>1);
  248. and \$`1<<3|1<<5|1<<8`,%r10d # check AVX2+BMI1+BMI2
  249. cmp \$`1<<3|1<<5|1<<8`,%r10d
  250. je _avx2_shortcut
  251. ___
  252. $code.=<<___ if ($avx);
  253. and \$`1<<28`,%r8d # mask AVX bit
  254. and \$`1<<30`,%r9d # mask "Intel CPU" bit
  255. or %r9d,%r8d
  256. cmp \$`1<<28|1<<30`,%r8d
  257. je _avx_shortcut
  258. ___
  259. $code.=<<___;
  260. jmp _ssse3_shortcut
  261. .align 16
  262. .Lialu:
  263. mov %rsp,%rax
  264. .cfi_def_cfa_register %rax
  265. push %rbx
  266. .cfi_push %rbx
  267. push %rbp
  268. .cfi_push %rbp
  269. push %r12
  270. .cfi_push %r12
  271. push %r13
  272. .cfi_push %r13
  273. push %r14
  274. .cfi_push %r14
  275. mov %rdi,$ctx # reassigned argument
  276. sub \$`8+16*4`,%rsp
  277. mov %rsi,$inp # reassigned argument
  278. and \$-64,%rsp
  279. mov %rdx,$num # reassigned argument
  280. mov %rax,`16*4`(%rsp)
  281. .cfi_cfa_expression %rsp+64,deref,+8
  282. .Lprologue:
  283. mov 0($ctx),$A
  284. mov 4($ctx),$B
  285. mov 8($ctx),$C
  286. mov 12($ctx),$D
  287. mov 16($ctx),$E
  288. jmp .Lloop
  289. .align 16
  290. .Lloop:
  291. ___
  292. for($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
  293. for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
  294. for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
  295. for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
  296. $code.=<<___;
  297. add 0($ctx),$A
  298. add 4($ctx),$B
  299. add 8($ctx),$C
  300. add 12($ctx),$D
  301. add 16($ctx),$E
  302. mov $A,0($ctx)
  303. mov $B,4($ctx)
  304. mov $C,8($ctx)
  305. mov $D,12($ctx)
  306. mov $E,16($ctx)
  307. sub \$1,$num
  308. lea `16*4`($inp),$inp
  309. jnz .Lloop
  310. mov `16*4`(%rsp),%rsi
  311. .cfi_def_cfa %rsi,8
  312. mov -40(%rsi),%r14
  313. .cfi_restore %r14
  314. mov -32(%rsi),%r13
  315. .cfi_restore %r13
  316. mov -24(%rsi),%r12
  317. .cfi_restore %r12
  318. mov -16(%rsi),%rbp
  319. .cfi_restore %rbp
  320. mov -8(%rsi),%rbx
  321. .cfi_restore %rbx
  322. lea (%rsi),%rsp
  323. .cfi_def_cfa_register %rsp
  324. .Lepilogue:
  325. ret
  326. .cfi_endproc
  327. .size sha1_block_data_order,.-sha1_block_data_order
  328. ___
  329. if ($shaext) {{{
  330. ######################################################################
  331. # Intel SHA Extensions implementation of SHA1 update function.
  332. #
  333. my ($ctx,$inp,$num)=("%rdi","%rsi","%rdx");
  334. my ($ABCD,$E,$E_,$BSWAP,$ABCD_SAVE,$E_SAVE)=map("%xmm$_",(0..3,8,9));
  335. my @MSG=map("%xmm$_",(4..7));
  336. $code.=<<___;
  337. .type sha1_block_data_order_shaext,\@function,3
  338. .align 32
  339. sha1_block_data_order_shaext:
  340. _shaext_shortcut:
  341. .cfi_startproc
  342. ___
  343. $code.=<<___ if ($win64);
  344. lea `-8-4*16`(%rsp),%rsp
  345. movaps %xmm6,-8-4*16(%rax)
  346. movaps %xmm7,-8-3*16(%rax)
  347. movaps %xmm8,-8-2*16(%rax)
  348. movaps %xmm9,-8-1*16(%rax)
  349. .Lprologue_shaext:
  350. ___
  351. $code.=<<___;
  352. movdqu ($ctx),$ABCD
  353. movd 16($ctx),$E
  354. movdqa K_XX_XX+0xa0(%rip),$BSWAP # byte-n-word swap
  355. movdqu ($inp),@MSG[0]
  356. pshufd \$0b00011011,$ABCD,$ABCD # flip word order
  357. movdqu 0x10($inp),@MSG[1]
  358. pshufd \$0b00011011,$E,$E # flip word order
  359. movdqu 0x20($inp),@MSG[2]
  360. pshufb $BSWAP,@MSG[0]
  361. movdqu 0x30($inp),@MSG[3]
  362. pshufb $BSWAP,@MSG[1]
  363. pshufb $BSWAP,@MSG[2]
  364. movdqa $E,$E_SAVE # offload $E
  365. pshufb $BSWAP,@MSG[3]
  366. jmp .Loop_shaext
  367. .align 16
  368. .Loop_shaext:
  369. dec $num
  370. lea 0x40($inp),%r8 # next input block
  371. paddd @MSG[0],$E
  372. cmovne %r8,$inp
  373. movdqa $ABCD,$ABCD_SAVE # offload $ABCD
  374. ___
  375. for($i=0;$i<20-4;$i+=2) {
  376. $code.=<<___;
  377. sha1msg1 @MSG[1],@MSG[0]
  378. movdqa $ABCD,$E_
  379. sha1rnds4 \$`int($i/5)`,$E,$ABCD # 0-3...
  380. sha1nexte @MSG[1],$E_
  381. pxor @MSG[2],@MSG[0]
  382. sha1msg1 @MSG[2],@MSG[1]
  383. sha1msg2 @MSG[3],@MSG[0]
  384. movdqa $ABCD,$E
  385. sha1rnds4 \$`int(($i+1)/5)`,$E_,$ABCD
  386. sha1nexte @MSG[2],$E
  387. pxor @MSG[3],@MSG[1]
  388. sha1msg2 @MSG[0],@MSG[1]
  389. ___
  390. push(@MSG,shift(@MSG)); push(@MSG,shift(@MSG));
  391. }
  392. $code.=<<___;
  393. movdqu ($inp),@MSG[0]
  394. movdqa $ABCD,$E_
  395. sha1rnds4 \$3,$E,$ABCD # 64-67
  396. sha1nexte @MSG[1],$E_
  397. movdqu 0x10($inp),@MSG[1]
  398. pshufb $BSWAP,@MSG[0]
  399. movdqa $ABCD,$E
  400. sha1rnds4 \$3,$E_,$ABCD # 68-71
  401. sha1nexte @MSG[2],$E
  402. movdqu 0x20($inp),@MSG[2]
  403. pshufb $BSWAP,@MSG[1]
  404. movdqa $ABCD,$E_
  405. sha1rnds4 \$3,$E,$ABCD # 72-75
  406. sha1nexte @MSG[3],$E_
  407. movdqu 0x30($inp),@MSG[3]
  408. pshufb $BSWAP,@MSG[2]
  409. movdqa $ABCD,$E
  410. sha1rnds4 \$3,$E_,$ABCD # 76-79
  411. sha1nexte $E_SAVE,$E
  412. pshufb $BSWAP,@MSG[3]
  413. paddd $ABCD_SAVE,$ABCD
  414. movdqa $E,$E_SAVE # offload $E
  415. jnz .Loop_shaext
  416. pshufd \$0b00011011,$ABCD,$ABCD
  417. pshufd \$0b00011011,$E,$E
  418. movdqu $ABCD,($ctx)
  419. movd $E,16($ctx)
  420. ___
  421. $code.=<<___ if ($win64);
  422. movaps -8-4*16(%rax),%xmm6
  423. movaps -8-3*16(%rax),%xmm7
  424. movaps -8-2*16(%rax),%xmm8
  425. movaps -8-1*16(%rax),%xmm9
  426. mov %rax,%rsp
  427. .Lepilogue_shaext:
  428. ___
  429. $code.=<<___;
  430. .cfi_endproc
  431. ret
  432. .size sha1_block_data_order_shaext,.-sha1_block_data_order_shaext
  433. ___
  434. }}}
  435. {{{
  436. my $Xi=4;
  437. my @X=map("%xmm$_",(4..7,0..3));
  438. my @Tx=map("%xmm$_",(8..10));
  439. my $Kx="%xmm11";
  440. my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp"); # size optimization
  441. my @T=("%esi","%edi");
  442. my $j=0;
  443. my $rx=0;
  444. my $K_XX_XX="%r14";
  445. my $fp="%r11";
  446. my $_rol=sub { &rol(@_) };
  447. my $_ror=sub { &ror(@_) };
  448. { my $sn;
  449. sub align32() {
  450. ++$sn;
  451. $code.=<<___;
  452. jmp .Lalign32_$sn # see "Decoded ICache" in manual
  453. .align 32
  454. .Lalign32_$sn:
  455. ___
  456. }
  457. }
  458. $code.=<<___;
  459. .type sha1_block_data_order_ssse3,\@function,3
  460. .align 16
  461. sha1_block_data_order_ssse3:
  462. _ssse3_shortcut:
  463. .cfi_startproc
  464. mov %rsp,$fp # frame pointer
  465. .cfi_def_cfa_register $fp
  466. push %rbx
  467. .cfi_push %rbx
  468. push %rbp
  469. .cfi_push %rbp
  470. push %r12
  471. .cfi_push %r12
  472. push %r13 # redundant, done to share Win64 SE handler
  473. .cfi_push %r13
  474. push %r14
  475. .cfi_push %r14
  476. lea `-64-($win64?6*16:0)`(%rsp),%rsp
  477. ___
  478. $code.=<<___ if ($win64);
  479. movaps %xmm6,-40-6*16($fp)
  480. movaps %xmm7,-40-5*16($fp)
  481. movaps %xmm8,-40-4*16($fp)
  482. movaps %xmm9,-40-3*16($fp)
  483. movaps %xmm10,-40-2*16($fp)
  484. movaps %xmm11,-40-1*16($fp)
  485. .Lprologue_ssse3:
  486. ___
  487. $code.=<<___;
  488. and \$-64,%rsp
  489. mov %rdi,$ctx # reassigned argument
  490. mov %rsi,$inp # reassigned argument
  491. mov %rdx,$num # reassigned argument
  492. shl \$6,$num
  493. add $inp,$num
  494. lea K_XX_XX+64(%rip),$K_XX_XX
  495. mov 0($ctx),$A # load context
  496. mov 4($ctx),$B
  497. mov 8($ctx),$C
  498. mov 12($ctx),$D
  499. mov $B,@T[0] # magic seed
  500. mov 16($ctx),$E
  501. mov $C,@T[1]
  502. xor $D,@T[1]
  503. and @T[1],@T[0]
  504. movdqa 64($K_XX_XX),@X[2] # pbswap mask
  505. movdqa -64($K_XX_XX),@Tx[1] # K_00_19
  506. movdqu 0($inp),@X[-4&7] # load input to %xmm[0-3]
  507. movdqu 16($inp),@X[-3&7]
  508. movdqu 32($inp),@X[-2&7]
  509. movdqu 48($inp),@X[-1&7]
  510. pshufb @X[2],@X[-4&7] # byte swap
  511. pshufb @X[2],@X[-3&7]
  512. pshufb @X[2],@X[-2&7]
  513. add \$64,$inp
  514. paddd @Tx[1],@X[-4&7] # add K_00_19
  515. pshufb @X[2],@X[-1&7]
  516. paddd @Tx[1],@X[-3&7]
  517. paddd @Tx[1],@X[-2&7]
  518. movdqa @X[-4&7],0(%rsp) # X[]+K xfer to IALU
  519. psubd @Tx[1],@X[-4&7] # restore X[]
  520. movdqa @X[-3&7],16(%rsp)
  521. psubd @Tx[1],@X[-3&7]
  522. movdqa @X[-2&7],32(%rsp)
  523. psubd @Tx[1],@X[-2&7]
  524. jmp .Loop_ssse3
  525. ___
  526. sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
  527. { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
  528. my $arg = pop;
  529. $arg = "\$$arg" if ($arg*1 eq $arg);
  530. $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
  531. }
  532. sub Xupdate_ssse3_16_31() # recall that $Xi starts with 4
  533. { use integer;
  534. my $body = shift;
  535. my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
  536. my ($a,$b,$c,$d,$e);
  537. eval(shift(@insns)); # ror
  538. &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]);
  539. eval(shift(@insns));
  540. &movdqa (@Tx[0],@X[-1&7]);
  541. &paddd (@Tx[1],@X[-1&7]);
  542. eval(shift(@insns));
  543. eval(shift(@insns));
  544. &punpcklqdq(@X[0],@X[-3&7]); # compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8);
  545. eval(shift(@insns));
  546. eval(shift(@insns)); # rol
  547. eval(shift(@insns));
  548. &psrldq (@Tx[0],4); # "X[-3]", 3 dwords
  549. eval(shift(@insns));
  550. eval(shift(@insns));
  551. &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
  552. eval(shift(@insns));
  553. eval(shift(@insns)); # ror
  554. &pxor (@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
  555. eval(shift(@insns));
  556. eval(shift(@insns));
  557. eval(shift(@insns));
  558. &pxor (@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
  559. eval(shift(@insns));
  560. eval(shift(@insns)); # rol
  561. &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
  562. eval(shift(@insns));
  563. eval(shift(@insns));
  564. &movdqa (@Tx[2],@X[0]);
  565. eval(shift(@insns));
  566. eval(shift(@insns));
  567. eval(shift(@insns)); # ror
  568. &movdqa (@Tx[0],@X[0]);
  569. eval(shift(@insns));
  570. &pslldq (@Tx[2],12); # "X[0]"<<96, extract one dword
  571. &paddd (@X[0],@X[0]);
  572. eval(shift(@insns));
  573. eval(shift(@insns));
  574. &psrld (@Tx[0],31);
  575. eval(shift(@insns));
  576. eval(shift(@insns)); # rol
  577. eval(shift(@insns));
  578. &movdqa (@Tx[1],@Tx[2]);
  579. eval(shift(@insns));
  580. eval(shift(@insns));
  581. &psrld (@Tx[2],30);
  582. eval(shift(@insns));
  583. eval(shift(@insns)); # ror
  584. &por (@X[0],@Tx[0]); # "X[0]"<<<=1
  585. eval(shift(@insns));
  586. eval(shift(@insns));
  587. eval(shift(@insns));
  588. &pslld (@Tx[1],2);
  589. &pxor (@X[0],@Tx[2]);
  590. eval(shift(@insns));
  591. &movdqa (@Tx[2],eval(2*16*(($Xi)/5)-64)."($K_XX_XX)"); # K_XX_XX
  592. eval(shift(@insns)); # rol
  593. eval(shift(@insns));
  594. eval(shift(@insns));
  595. &pxor (@X[0],@Tx[1]); # "X[0]"^=("X[0]">>96)<<<2
  596. &pshufd (@Tx[1],@X[-1&7],0xee) if ($Xi==7); # was &movdqa (@Tx[0],@X[-1&7]) in Xupdate_ssse3_32_79
  597. foreach (@insns) { eval; } # remaining instructions [if any]
  598. $Xi++; push(@X,shift(@X)); # "rotate" X[]
  599. push(@Tx,shift(@Tx));
  600. }
  601. sub Xupdate_ssse3_32_79()
  602. { use integer;
  603. my $body = shift;
  604. my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
  605. my ($a,$b,$c,$d,$e);
  606. eval(shift(@insns)) if ($Xi==8);
  607. &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
  608. eval(shift(@insns)) if ($Xi==8);
  609. eval(shift(@insns)); # body_20_39
  610. eval(shift(@insns));
  611. eval(shift(@insns)) if (@insns[1] =~ /_ror/);
  612. eval(shift(@insns)) if (@insns[0] =~ /_ror/);
  613. &punpcklqdq(@Tx[0],@X[-1&7]); # compose "X[-6]", was &palignr(@Tx[0],@X[-2&7],8);
  614. eval(shift(@insns));
  615. eval(shift(@insns)); # rol
  616. &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
  617. eval(shift(@insns));
  618. eval(shift(@insns));
  619. if ($Xi%5) {
  620. &movdqa (@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
  621. } else { # ... or load next one
  622. &movdqa (@Tx[2],eval(2*16*($Xi/5)-64)."($K_XX_XX)");
  623. }
  624. eval(shift(@insns)); # ror
  625. &paddd (@Tx[1],@X[-1&7]);
  626. eval(shift(@insns));
  627. &pxor (@X[0],@Tx[0]); # "X[0]"^="X[-6]"
  628. eval(shift(@insns)); # body_20_39
  629. eval(shift(@insns));
  630. eval(shift(@insns));
  631. eval(shift(@insns)); # rol
  632. eval(shift(@insns)) if (@insns[0] =~ /_ror/);
  633. &movdqa (@Tx[0],@X[0]);
  634. eval(shift(@insns));
  635. eval(shift(@insns));
  636. &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
  637. eval(shift(@insns)); # ror
  638. eval(shift(@insns));
  639. eval(shift(@insns)); # body_20_39
  640. &pslld (@X[0],2);
  641. eval(shift(@insns));
  642. eval(shift(@insns));
  643. &psrld (@Tx[0],30);
  644. eval(shift(@insns)) if (@insns[0] =~ /_rol/);# rol
  645. eval(shift(@insns));
  646. eval(shift(@insns));
  647. eval(shift(@insns)); # ror
  648. &por (@X[0],@Tx[0]); # "X[0]"<<<=2
  649. eval(shift(@insns));
  650. eval(shift(@insns)); # body_20_39
  651. eval(shift(@insns)) if (@insns[1] =~ /_rol/);
  652. eval(shift(@insns)) if (@insns[0] =~ /_rol/);
  653. &pshufd(@Tx[1],@X[-1&7],0xee) if ($Xi<19); # was &movdqa (@Tx[1],@X[0])
  654. eval(shift(@insns));
  655. eval(shift(@insns)); # rol
  656. eval(shift(@insns));
  657. eval(shift(@insns));
  658. eval(shift(@insns)); # rol
  659. eval(shift(@insns));
  660. foreach (@insns) { eval; } # remaining instructions
  661. $Xi++; push(@X,shift(@X)); # "rotate" X[]
  662. push(@Tx,shift(@Tx));
  663. }
  664. sub Xuplast_ssse3_80()
  665. { use integer;
  666. my $body = shift;
  667. my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
  668. my ($a,$b,$c,$d,$e);
  669. eval(shift(@insns));
  670. eval(shift(@insns));
  671. eval(shift(@insns));
  672. eval(shift(@insns));
  673. &paddd (@Tx[1],@X[-1&7]);
  674. eval(shift(@insns));
  675. eval(shift(@insns));
  676. &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU
  677. foreach (@insns) { eval; } # remaining instructions
  678. &cmp ($inp,$num);
  679. &je (".Ldone_ssse3");
  680. unshift(@Tx,pop(@Tx));
  681. &movdqa (@X[2],"64($K_XX_XX)"); # pbswap mask
  682. &movdqa (@Tx[1],"-64($K_XX_XX)"); # K_00_19
  683. &movdqu (@X[-4&7],"0($inp)"); # load input
  684. &movdqu (@X[-3&7],"16($inp)");
  685. &movdqu (@X[-2&7],"32($inp)");
  686. &movdqu (@X[-1&7],"48($inp)");
  687. &pshufb (@X[-4&7],@X[2]); # byte swap
  688. &add ($inp,64);
  689. $Xi=0;
  690. }
  691. sub Xloop_ssse3()
  692. { use integer;
  693. my $body = shift;
  694. my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
  695. my ($a,$b,$c,$d,$e);
  696. eval(shift(@insns));
  697. eval(shift(@insns));
  698. eval(shift(@insns));
  699. &pshufb (@X[($Xi-3)&7],@X[2]);
  700. eval(shift(@insns));
  701. eval(shift(@insns));
  702. eval(shift(@insns));
  703. eval(shift(@insns));
  704. &paddd (@X[($Xi-4)&7],@Tx[1]);
  705. eval(shift(@insns));
  706. eval(shift(@insns));
  707. eval(shift(@insns));
  708. eval(shift(@insns));
  709. &movdqa (eval(16*$Xi)."(%rsp)",@X[($Xi-4)&7]); # X[]+K xfer to IALU
  710. eval(shift(@insns));
  711. eval(shift(@insns));
  712. eval(shift(@insns));
  713. eval(shift(@insns));
  714. &psubd (@X[($Xi-4)&7],@Tx[1]);
  715. foreach (@insns) { eval; }
  716. $Xi++;
  717. }
  718. sub Xtail_ssse3()
  719. { use integer;
  720. my $body = shift;
  721. my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
  722. my ($a,$b,$c,$d,$e);
  723. foreach (@insns) { eval; }
  724. }
  725. sub body_00_19 () { # ((c^d)&b)^d
  726. # on start @T[0]=(c^d)&b
  727. return &body_20_39() if ($rx==19); $rx++;
  728. (
  729. '($a,$b,$c,$d,$e)=@V;'.
  730. '&$_ror ($b,$j?7:2)', # $b>>>2
  731. '&xor (@T[0],$d)',
  732. '&mov (@T[1],$a)', # $b for next round
  733. '&add ($e,eval(4*($j&15))."(%rsp)")', # X[]+K xfer
  734. '&xor ($b,$c)', # $c^$d for next round
  735. '&$_rol ($a,5)',
  736. '&add ($e,@T[0])',
  737. '&and (@T[1],$b)', # ($b&($c^$d)) for next round
  738. '&xor ($b,$c)', # restore $b
  739. '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
  740. );
  741. }
  742. sub body_20_39 () { # b^d^c
  743. # on entry @T[0]=b^d
  744. return &body_40_59() if ($rx==39); $rx++;
  745. (
  746. '($a,$b,$c,$d,$e)=@V;'.
  747. '&add ($e,eval(4*($j&15))."(%rsp)")', # X[]+K xfer
  748. '&xor (@T[0],$d) if($j==19);'.
  749. '&xor (@T[0],$c) if($j> 19)', # ($b^$d^$c)
  750. '&mov (@T[1],$a)', # $b for next round
  751. '&$_rol ($a,5)',
  752. '&add ($e,@T[0])',
  753. '&xor (@T[1],$c) if ($j< 79)', # $b^$d for next round
  754. '&$_ror ($b,7)', # $b>>>2
  755. '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
  756. );
  757. }
  758. sub body_40_59 () { # ((b^c)&(c^d))^c
  759. # on entry @T[0]=(b^c), (c^=d)
  760. $rx++;
  761. (
  762. '($a,$b,$c,$d,$e)=@V;'.
  763. '&add ($e,eval(4*($j&15))."(%rsp)")', # X[]+K xfer
  764. '&and (@T[0],$c) if ($j>=40)', # (b^c)&(c^d)
  765. '&xor ($c,$d) if ($j>=40)', # restore $c
  766. '&$_ror ($b,7)', # $b>>>2
  767. '&mov (@T[1],$a)', # $b for next round
  768. '&xor (@T[0],$c)',
  769. '&$_rol ($a,5)',
  770. '&add ($e,@T[0])',
  771. '&xor (@T[1],$c) if ($j==59);'.
  772. '&xor (@T[1],$b) if ($j< 59)', # b^c for next round
  773. '&xor ($b,$c) if ($j< 59)', # c^d for next round
  774. '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
  775. );
  776. }
  777. $code.=<<___;
  778. .align 16
  779. .Loop_ssse3:
  780. ___
  781. &Xupdate_ssse3_16_31(\&body_00_19);
  782. &Xupdate_ssse3_16_31(\&body_00_19);
  783. &Xupdate_ssse3_16_31(\&body_00_19);
  784. &Xupdate_ssse3_16_31(\&body_00_19);
  785. &Xupdate_ssse3_32_79(\&body_00_19);
  786. &Xupdate_ssse3_32_79(\&body_20_39);
  787. &Xupdate_ssse3_32_79(\&body_20_39);
  788. &Xupdate_ssse3_32_79(\&body_20_39);
  789. &Xupdate_ssse3_32_79(\&body_20_39);
  790. &Xupdate_ssse3_32_79(\&body_20_39);
  791. &Xupdate_ssse3_32_79(\&body_40_59);
  792. &Xupdate_ssse3_32_79(\&body_40_59);
  793. &Xupdate_ssse3_32_79(\&body_40_59);
  794. &Xupdate_ssse3_32_79(\&body_40_59);
  795. &Xupdate_ssse3_32_79(\&body_40_59);
  796. &Xupdate_ssse3_32_79(\&body_20_39);
  797. &Xuplast_ssse3_80(\&body_20_39); # can jump to "done"
  798. $saved_j=$j; @saved_V=@V;
  799. &Xloop_ssse3(\&body_20_39);
  800. &Xloop_ssse3(\&body_20_39);
  801. &Xloop_ssse3(\&body_20_39);
  802. $code.=<<___;
  803. add 0($ctx),$A # update context
  804. add 4($ctx),@T[0]
  805. add 8($ctx),$C
  806. add 12($ctx),$D
  807. mov $A,0($ctx)
  808. add 16($ctx),$E
  809. mov @T[0],4($ctx)
  810. mov @T[0],$B # magic seed
  811. mov $C,8($ctx)
  812. mov $C,@T[1]
  813. mov $D,12($ctx)
  814. xor $D,@T[1]
  815. mov $E,16($ctx)
  816. and @T[1],@T[0]
  817. jmp .Loop_ssse3
  818. .align 16
  819. .Ldone_ssse3:
  820. ___
  821. $j=$saved_j; @V=@saved_V;
  822. &Xtail_ssse3(\&body_20_39);
  823. &Xtail_ssse3(\&body_20_39);
  824. &Xtail_ssse3(\&body_20_39);
  825. $code.=<<___;
  826. add 0($ctx),$A # update context
  827. add 4($ctx),@T[0]
  828. add 8($ctx),$C
  829. mov $A,0($ctx)
  830. add 12($ctx),$D
  831. mov @T[0],4($ctx)
  832. add 16($ctx),$E
  833. mov $C,8($ctx)
  834. mov $D,12($ctx)
  835. mov $E,16($ctx)
  836. ___
  837. $code.=<<___ if ($win64);
  838. movaps -40-6*16($fp),%xmm6
  839. movaps -40-5*16($fp),%xmm7
  840. movaps -40-4*16($fp),%xmm8
  841. movaps -40-3*16($fp),%xmm9
  842. movaps -40-2*16($fp),%xmm10
  843. movaps -40-1*16($fp),%xmm11
  844. ___
  845. $code.=<<___;
  846. mov -40($fp),%r14
  847. .cfi_restore %r14
  848. mov -32($fp),%r13
  849. .cfi_restore %r13
  850. mov -24($fp),%r12
  851. .cfi_restore %r12
  852. mov -16($fp),%rbp
  853. .cfi_restore %rbp
  854. mov -8($fp),%rbx
  855. .cfi_restore %rbx
  856. lea ($fp),%rsp
  857. .cfi_def_cfa_register %rsp
  858. .Lepilogue_ssse3:
  859. ret
  860. .cfi_endproc
  861. .size sha1_block_data_order_ssse3,.-sha1_block_data_order_ssse3
  862. ___
  863. if ($avx) {
  864. $Xi=4; # reset variables
  865. @X=map("%xmm$_",(4..7,0..3));
  866. @Tx=map("%xmm$_",(8..10));
  867. $j=0;
  868. $rx=0;
  869. my $done_avx_label=".Ldone_avx";
  870. my $_rol=sub { &shld(@_[0],@_) };
  871. my $_ror=sub { &shrd(@_[0],@_) };
  872. $code.=<<___;
  873. .type sha1_block_data_order_avx,\@function,3
  874. .align 16
  875. sha1_block_data_order_avx:
  876. _avx_shortcut:
  877. .cfi_startproc
  878. mov %rsp,$fp
  879. .cfi_def_cfa_register $fp
  880. push %rbx
  881. .cfi_push %rbx
  882. push %rbp
  883. .cfi_push %rbp
  884. push %r12
  885. .cfi_push %r12
  886. push %r13 # redundant, done to share Win64 SE handler
  887. .cfi_push %r13
  888. push %r14
  889. .cfi_push %r14
  890. lea `-64-($win64?6*16:0)`(%rsp),%rsp
  891. vzeroupper
  892. ___
  893. $code.=<<___ if ($win64);
  894. vmovaps %xmm6,-40-6*16($fp)
  895. vmovaps %xmm7,-40-5*16($fp)
  896. vmovaps %xmm8,-40-4*16($fp)
  897. vmovaps %xmm9,-40-3*16($fp)
  898. vmovaps %xmm10,-40-2*16($fp)
  899. vmovaps %xmm11,-40-1*16($fp)
  900. .Lprologue_avx:
  901. ___
  902. $code.=<<___;
  903. and \$-64,%rsp
  904. mov %rdi,$ctx # reassigned argument
  905. mov %rsi,$inp # reassigned argument
  906. mov %rdx,$num # reassigned argument
  907. shl \$6,$num
  908. add $inp,$num
  909. lea K_XX_XX+64(%rip),$K_XX_XX
  910. mov 0($ctx),$A # load context
  911. mov 4($ctx),$B
  912. mov 8($ctx),$C
  913. mov 12($ctx),$D
  914. mov $B,@T[0] # magic seed
  915. mov 16($ctx),$E
  916. mov $C,@T[1]
  917. xor $D,@T[1]
  918. and @T[1],@T[0]
  919. vmovdqa 64($K_XX_XX),@X[2] # pbswap mask
  920. vmovdqa -64($K_XX_XX),$Kx # K_00_19
  921. vmovdqu 0($inp),@X[-4&7] # load input to %xmm[0-3]
  922. vmovdqu 16($inp),@X[-3&7]
  923. vmovdqu 32($inp),@X[-2&7]
  924. vmovdqu 48($inp),@X[-1&7]
  925. vpshufb @X[2],@X[-4&7],@X[-4&7] # byte swap
  926. add \$64,$inp
  927. vpshufb @X[2],@X[-3&7],@X[-3&7]
  928. vpshufb @X[2],@X[-2&7],@X[-2&7]
  929. vpshufb @X[2],@X[-1&7],@X[-1&7]
  930. vpaddd $Kx,@X[-4&7],@X[0] # add K_00_19
  931. vpaddd $Kx,@X[-3&7],@X[1]
  932. vpaddd $Kx,@X[-2&7],@X[2]
  933. vmovdqa @X[0],0(%rsp) # X[]+K xfer to IALU
  934. vmovdqa @X[1],16(%rsp)
  935. vmovdqa @X[2],32(%rsp)
  936. jmp .Loop_avx
  937. ___
  938. sub Xupdate_avx_16_31() # recall that $Xi starts with 4
  939. { use integer;
  940. my $body = shift;
  941. my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
  942. my ($a,$b,$c,$d,$e);
  943. eval(shift(@insns));
  944. eval(shift(@insns));
  945. &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]"
  946. eval(shift(@insns));
  947. eval(shift(@insns));
  948. &vpaddd (@Tx[1],$Kx,@X[-1&7]);
  949. eval(shift(@insns));
  950. eval(shift(@insns));
  951. &vpsrldq(@Tx[0],@X[-1&7],4); # "X[-3]", 3 dwords
  952. eval(shift(@insns));
  953. eval(shift(@insns));
  954. &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
  955. eval(shift(@insns));
  956. eval(shift(@insns));
  957. &vpxor (@Tx[0],@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
  958. eval(shift(@insns));
  959. eval(shift(@insns));
  960. eval(shift(@insns));
  961. eval(shift(@insns));
  962. &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
  963. eval(shift(@insns));
  964. eval(shift(@insns));
  965. &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
  966. eval(shift(@insns));
  967. eval(shift(@insns));
  968. &vpsrld (@Tx[0],@X[0],31);
  969. eval(shift(@insns));
  970. eval(shift(@insns));
  971. eval(shift(@insns));
  972. eval(shift(@insns));
  973. &vpslldq(@Tx[2],@X[0],12); # "X[0]"<<96, extract one dword
  974. &vpaddd (@X[0],@X[0],@X[0]);
  975. eval(shift(@insns));
  976. eval(shift(@insns));
  977. eval(shift(@insns));
  978. eval(shift(@insns));
  979. &vpsrld (@Tx[1],@Tx[2],30);
  980. &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=1
  981. eval(shift(@insns));
  982. eval(shift(@insns));
  983. eval(shift(@insns));
  984. eval(shift(@insns));
  985. &vpslld (@Tx[2],@Tx[2],2);
  986. &vpxor (@X[0],@X[0],@Tx[1]);
  987. eval(shift(@insns));
  988. eval(shift(@insns));
  989. eval(shift(@insns));
  990. eval(shift(@insns));
  991. &vpxor (@X[0],@X[0],@Tx[2]); # "X[0]"^=("X[0]">>96)<<<2
  992. eval(shift(@insns));
  993. eval(shift(@insns));
  994. &vmovdqa ($Kx,eval(2*16*(($Xi)/5)-64)."($K_XX_XX)") if ($Xi%5==0); # K_XX_XX
  995. eval(shift(@insns));
  996. eval(shift(@insns));
  997. foreach (@insns) { eval; } # remaining instructions [if any]
  998. $Xi++; push(@X,shift(@X)); # "rotate" X[]
  999. }
  1000. sub Xupdate_avx_32_79()
  1001. { use integer;
  1002. my $body = shift;
  1003. my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
  1004. my ($a,$b,$c,$d,$e);
  1005. &vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8); # compose "X[-6]"
  1006. &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
  1007. eval(shift(@insns)); # body_20_39
  1008. eval(shift(@insns));
  1009. eval(shift(@insns));
  1010. eval(shift(@insns)); # rol
  1011. &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
  1012. eval(shift(@insns));
  1013. eval(shift(@insns)) if (@insns[0] !~ /&ro[rl]/);
  1014. &vpaddd (@Tx[1],$Kx,@X[-1&7]);
  1015. &vmovdqa ($Kx,eval(2*16*($Xi/5)-64)."($K_XX_XX)") if ($Xi%5==0);
  1016. eval(shift(@insns)); # ror
  1017. eval(shift(@insns));
  1018. &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-6]"
  1019. eval(shift(@insns)); # body_20_39
  1020. eval(shift(@insns));
  1021. eval(shift(@insns));
  1022. eval(shift(@insns)); # rol
  1023. &vpsrld (@Tx[0],@X[0],30);
  1024. &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
  1025. eval(shift(@insns));
  1026. eval(shift(@insns));
  1027. eval(shift(@insns)); # ror
  1028. eval(shift(@insns));
  1029. &vpslld (@X[0],@X[0],2);
  1030. eval(shift(@insns)); # body_20_39
  1031. eval(shift(@insns));
  1032. eval(shift(@insns));
  1033. eval(shift(@insns)); # rol
  1034. eval(shift(@insns));
  1035. eval(shift(@insns));
  1036. eval(shift(@insns)); # ror
  1037. eval(shift(@insns));
  1038. &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=2
  1039. eval(shift(@insns)); # body_20_39
  1040. eval(shift(@insns));
  1041. eval(shift(@insns));
  1042. eval(shift(@insns)); # rol
  1043. eval(shift(@insns));
  1044. eval(shift(@insns));
  1045. eval(shift(@insns)); # rol
  1046. eval(shift(@insns));
  1047. foreach (@insns) { eval; } # remaining instructions
  1048. $Xi++; push(@X,shift(@X)); # "rotate" X[]
  1049. }
  1050. sub Xuplast_avx_80()
  1051. { use integer;
  1052. my $body = shift;
  1053. my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
  1054. my ($a,$b,$c,$d,$e);
  1055. eval(shift(@insns));
  1056. &vpaddd (@Tx[1],$Kx,@X[-1&7]);
  1057. eval(shift(@insns));
  1058. eval(shift(@insns));
  1059. eval(shift(@insns));
  1060. eval(shift(@insns));
  1061. &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU
  1062. foreach (@insns) { eval; } # remaining instructions
  1063. &cmp ($inp,$num);
  1064. &je ($done_avx_label);
  1065. &vmovdqa(@X[2],"64($K_XX_XX)"); # pbswap mask
  1066. &vmovdqa($Kx,"-64($K_XX_XX)"); # K_00_19
  1067. &vmovdqu(@X[-4&7],"0($inp)"); # load input
  1068. &vmovdqu(@X[-3&7],"16($inp)");
  1069. &vmovdqu(@X[-2&7],"32($inp)");
  1070. &vmovdqu(@X[-1&7],"48($inp)");
  1071. &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap
  1072. &add ($inp,64);
  1073. $Xi=0;
  1074. }
  1075. sub Xloop_avx()
  1076. { use integer;
  1077. my $body = shift;
  1078. my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
  1079. my ($a,$b,$c,$d,$e);
  1080. eval(shift(@insns));
  1081. eval(shift(@insns));
  1082. &vpshufb(@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
  1083. eval(shift(@insns));
  1084. eval(shift(@insns));
  1085. &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],$Kx);
  1086. eval(shift(@insns));
  1087. eval(shift(@insns));
  1088. eval(shift(@insns));
  1089. eval(shift(@insns));
  1090. &vmovdqa(eval(16*$Xi)."(%rsp)",@X[$Xi&7]); # X[]+K xfer to IALU
  1091. eval(shift(@insns));
  1092. eval(shift(@insns));
  1093. foreach (@insns) { eval; }
  1094. $Xi++;
  1095. }
  1096. sub Xtail_avx()
  1097. { use integer;
  1098. my $body = shift;
  1099. my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
  1100. my ($a,$b,$c,$d,$e);
  1101. foreach (@insns) { eval; }
  1102. }
  1103. $code.=<<___;
  1104. .align 16
  1105. .Loop_avx:
  1106. ___
  1107. &Xupdate_avx_16_31(\&body_00_19);
  1108. &Xupdate_avx_16_31(\&body_00_19);
  1109. &Xupdate_avx_16_31(\&body_00_19);
  1110. &Xupdate_avx_16_31(\&body_00_19);
  1111. &Xupdate_avx_32_79(\&body_00_19);
  1112. &Xupdate_avx_32_79(\&body_20_39);
  1113. &Xupdate_avx_32_79(\&body_20_39);
  1114. &Xupdate_avx_32_79(\&body_20_39);
  1115. &Xupdate_avx_32_79(\&body_20_39);
  1116. &Xupdate_avx_32_79(\&body_20_39);
  1117. &Xupdate_avx_32_79(\&body_40_59);
  1118. &Xupdate_avx_32_79(\&body_40_59);
  1119. &Xupdate_avx_32_79(\&body_40_59);
  1120. &Xupdate_avx_32_79(\&body_40_59);
  1121. &Xupdate_avx_32_79(\&body_40_59);
  1122. &Xupdate_avx_32_79(\&body_20_39);
  1123. &Xuplast_avx_80(\&body_20_39); # can jump to "done"
  1124. $saved_j=$j; @saved_V=@V;
  1125. &Xloop_avx(\&body_20_39);
  1126. &Xloop_avx(\&body_20_39);
  1127. &Xloop_avx(\&body_20_39);
  1128. $code.=<<___;
  1129. add 0($ctx),$A # update context
  1130. add 4($ctx),@T[0]
  1131. add 8($ctx),$C
  1132. add 12($ctx),$D
  1133. mov $A,0($ctx)
  1134. add 16($ctx),$E
  1135. mov @T[0],4($ctx)
  1136. mov @T[0],$B # magic seed
  1137. mov $C,8($ctx)
  1138. mov $C,@T[1]
  1139. mov $D,12($ctx)
  1140. xor $D,@T[1]
  1141. mov $E,16($ctx)
  1142. and @T[1],@T[0]
  1143. jmp .Loop_avx
  1144. .align 16
  1145. $done_avx_label:
  1146. ___
  1147. $j=$saved_j; @V=@saved_V;
  1148. &Xtail_avx(\&body_20_39);
  1149. &Xtail_avx(\&body_20_39);
  1150. &Xtail_avx(\&body_20_39);
  1151. $code.=<<___;
  1152. vzeroupper
  1153. add 0($ctx),$A # update context
  1154. add 4($ctx),@T[0]
  1155. add 8($ctx),$C
  1156. mov $A,0($ctx)
  1157. add 12($ctx),$D
  1158. mov @T[0],4($ctx)
  1159. add 16($ctx),$E
  1160. mov $C,8($ctx)
  1161. mov $D,12($ctx)
  1162. mov $E,16($ctx)
  1163. ___
  1164. $code.=<<___ if ($win64);
  1165. movaps -40-6*16($fp),%xmm6
  1166. movaps -40-5*16($fp),%xmm7
  1167. movaps -40-4*16($fp),%xmm8
  1168. movaps -40-3*16($fp),%xmm9
  1169. movaps -40-2*16($fp),%xmm10
  1170. movaps -40-1*16($fp),%xmm11
  1171. ___
  1172. $code.=<<___;
  1173. mov -40($fp),%r14
  1174. .cfi_restore %r14
  1175. mov -32($fp),%r13
  1176. .cfi_restore %r13
  1177. mov -24($fp),%r12
  1178. .cfi_restore %r12
  1179. mov -16($fp),%rbp
  1180. .cfi_restore %rbp
  1181. mov -8($fp),%rbx
  1182. .cfi_restore %rbx
  1183. lea ($fp),%rsp
  1184. .cfi_def_cfa_register %rsp
  1185. .Lepilogue_avx:
  1186. ret
  1187. .cfi_endproc
  1188. .size sha1_block_data_order_avx,.-sha1_block_data_order_avx
  1189. ___
  1190. if ($avx>1) {
  1191. use integer;
  1192. $Xi=4; # reset variables
  1193. @X=map("%ymm$_",(4..7,0..3));
  1194. @Tx=map("%ymm$_",(8..10));
  1195. $Kx="%ymm11";
  1196. $j=0;
  1197. my @ROTX=("%eax","%ebp","%ebx","%ecx","%edx","%esi");
  1198. my ($a5,$t0)=("%r12d","%edi");
  1199. my ($A,$F,$B,$C,$D,$E)=@ROTX;
  1200. my $rx=0;
  1201. my $frame="%r13";
  1202. $code.=<<___;
  1203. .type sha1_block_data_order_avx2,\@function,3
  1204. .align 16
  1205. sha1_block_data_order_avx2:
  1206. _avx2_shortcut:
  1207. .cfi_startproc
  1208. mov %rsp,$fp
  1209. .cfi_def_cfa_register $fp
  1210. push %rbx
  1211. .cfi_push %rbx
  1212. push %rbp
  1213. .cfi_push %rbp
  1214. push %r12
  1215. .cfi_push %r12
  1216. push %r13
  1217. .cfi_push %r13
  1218. push %r14
  1219. .cfi_push %r14
  1220. vzeroupper
  1221. ___
  1222. $code.=<<___ if ($win64);
  1223. lea -6*16(%rsp),%rsp
  1224. vmovaps %xmm6,-40-6*16($fp)
  1225. vmovaps %xmm7,-40-5*16($fp)
  1226. vmovaps %xmm8,-40-4*16($fp)
  1227. vmovaps %xmm9,-40-3*16($fp)
  1228. vmovaps %xmm10,-40-2*16($fp)
  1229. vmovaps %xmm11,-40-1*16($fp)
  1230. .Lprologue_avx2:
  1231. ___
  1232. $code.=<<___;
  1233. mov %rdi,$ctx # reassigned argument
  1234. mov %rsi,$inp # reassigned argument
  1235. mov %rdx,$num # reassigned argument
  1236. lea -640(%rsp),%rsp
  1237. shl \$6,$num
  1238. lea 64($inp),$frame
  1239. and \$-128,%rsp
  1240. add $inp,$num
  1241. lea K_XX_XX+64(%rip),$K_XX_XX
  1242. mov 0($ctx),$A # load context
  1243. cmp $num,$frame
  1244. cmovae $inp,$frame # next or same block
  1245. mov 4($ctx),$F
  1246. mov 8($ctx),$C
  1247. mov 12($ctx),$D
  1248. mov 16($ctx),$E
  1249. vmovdqu 64($K_XX_XX),@X[2] # pbswap mask
  1250. vmovdqu ($inp),%xmm0
  1251. vmovdqu 16($inp),%xmm1
  1252. vmovdqu 32($inp),%xmm2
  1253. vmovdqu 48($inp),%xmm3
  1254. lea 64($inp),$inp
  1255. vinserti128 \$1,($frame),@X[-4&7],@X[-4&7]
  1256. vinserti128 \$1,16($frame),@X[-3&7],@X[-3&7]
  1257. vpshufb @X[2],@X[-4&7],@X[-4&7]
  1258. vinserti128 \$1,32($frame),@X[-2&7],@X[-2&7]
  1259. vpshufb @X[2],@X[-3&7],@X[-3&7]
  1260. vinserti128 \$1,48($frame),@X[-1&7],@X[-1&7]
  1261. vpshufb @X[2],@X[-2&7],@X[-2&7]
  1262. vmovdqu -64($K_XX_XX),$Kx # K_00_19
  1263. vpshufb @X[2],@X[-1&7],@X[-1&7]
  1264. vpaddd $Kx,@X[-4&7],@X[0] # add K_00_19
  1265. vpaddd $Kx,@X[-3&7],@X[1]
  1266. vmovdqu @X[0],0(%rsp) # X[]+K xfer to IALU
  1267. vpaddd $Kx,@X[-2&7],@X[2]
  1268. vmovdqu @X[1],32(%rsp)
  1269. vpaddd $Kx,@X[-1&7],@X[3]
  1270. vmovdqu @X[2],64(%rsp)
  1271. vmovdqu @X[3],96(%rsp)
  1272. ___
  1273. for (;$Xi<8;$Xi++) { # Xupdate_avx2_16_31
  1274. use integer;
  1275. &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]"
  1276. &vpsrldq(@Tx[0],@X[-1&7],4); # "X[-3]", 3 dwords
  1277. &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
  1278. &vpxor (@Tx[0],@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
  1279. &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
  1280. &vpsrld (@Tx[0],@X[0],31);
  1281. &vmovdqu($Kx,eval(2*16*(($Xi)/5)-64)."($K_XX_XX)") if ($Xi%5==0); # K_XX_XX
  1282. &vpslldq(@Tx[2],@X[0],12); # "X[0]"<<96, extract one dword
  1283. &vpaddd (@X[0],@X[0],@X[0]);
  1284. &vpsrld (@Tx[1],@Tx[2],30);
  1285. &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=1
  1286. &vpslld (@Tx[2],@Tx[2],2);
  1287. &vpxor (@X[0],@X[0],@Tx[1]);
  1288. &vpxor (@X[0],@X[0],@Tx[2]); # "X[0]"^=("X[0]">>96)<<<2
  1289. &vpaddd (@Tx[1],@X[0],$Kx);
  1290. &vmovdqu("32*$Xi(%rsp)",@Tx[1]); # X[]+K xfer to IALU
  1291. push(@X,shift(@X)); # "rotate" X[]
  1292. }
  1293. $code.=<<___;
  1294. lea 128(%rsp),$frame
  1295. jmp .Loop_avx2
  1296. .align 32
  1297. .Loop_avx2:
  1298. rorx \$2,$F,$B
  1299. andn $D,$F,$t0
  1300. and $C,$F
  1301. xor $t0,$F
  1302. ___
  1303. sub bodyx_00_19 () { # 8 instructions, 3 cycles critical path
  1304. # at start $f=(b&c)^(~b&d), $b>>>=2
  1305. return &bodyx_20_39() if ($rx==19); $rx++;
  1306. (
  1307. '($a,$f,$b,$c,$d,$e)=@ROTX;'.
  1308. '&add ($e,((32*($j/4)+4*($j%4))%256-128)."($frame)");'. # e+=X[i]+K
  1309. '&lea ($frame,"256($frame)") if ($j%32==31);',
  1310. '&andn ($t0,$a,$c)', # ~b&d for next round
  1311. '&add ($e,$f)', # e+=(b&c)^(~b&d)
  1312. '&rorx ($a5,$a,27)', # a<<<5
  1313. '&rorx ($f,$a,2)', # b>>>2 for next round
  1314. '&and ($a,$b)', # b&c for next round
  1315. '&add ($e,$a5)', # e+=a<<<5
  1316. '&xor ($a,$t0);'. # f=(b&c)^(~b&d) for next round
  1317. 'unshift(@ROTX,pop(@ROTX)); $j++;'
  1318. )
  1319. }
  1320. sub bodyx_20_39 () { # 7 instructions, 2 cycles critical path
  1321. # on entry $f=b^c^d, $b>>>=2
  1322. return &bodyx_40_59() if ($rx==39); $rx++;
  1323. (
  1324. '($a,$f,$b,$c,$d,$e)=@ROTX;'.
  1325. '&add ($e,((32*($j/4)+4*($j%4))%256-128)."($frame)");'. # e+=X[i]+K
  1326. '&lea ($frame,"256($frame)") if ($j%32==31);',
  1327. '&lea ($e,"($e,$f)")', # e+=b^c^d
  1328. '&rorx ($a5,$a,27)', # a<<<5
  1329. '&rorx ($f,$a,2) if ($j<79)', # b>>>2 in next round
  1330. '&xor ($a,$b) if ($j<79)', # b^c for next round
  1331. '&add ($e,$a5)', # e+=a<<<5
  1332. '&xor ($a,$c) if ($j<79);'. # f=b^c^d for next round
  1333. 'unshift(@ROTX,pop(@ROTX)); $j++;'
  1334. )
  1335. }
  1336. sub bodyx_40_59 () { # 10 instructions, 3 cycles critical path
  1337. # on entry $f=((b^c)&(c^d)), $b>>>=2
  1338. $rx++;
  1339. (
  1340. '($a,$f,$b,$c,$d,$e)=@ROTX;'.
  1341. '&add ($e,((32*($j/4)+4*($j%4))%256-128)."($frame)");'. # e+=X[i]+K
  1342. '&lea ($frame,"256($frame)") if ($j%32==31);',
  1343. '&xor ($f,$c) if ($j>39)', # (b^c)&(c^d)^c
  1344. '&mov ($t0,$b) if ($j<59)', # count on zero latency
  1345. '&xor ($t0,$c) if ($j<59)', # c^d for next round
  1346. '&lea ($e,"($e,$f)")', # e+=(b^c)&(c^d)^c
  1347. '&rorx ($a5,$a,27)', # a<<<5
  1348. '&rorx ($f,$a,2)', # b>>>2 in next round
  1349. '&xor ($a,$b)', # b^c for next round
  1350. '&add ($e,$a5)', # e+=a<<<5
  1351. '&and ($a,$t0) if ($j< 59);'. # f=(b^c)&(c^d) for next round
  1352. '&xor ($a,$c) if ($j==59);'. # f=b^c^d for next round
  1353. 'unshift(@ROTX,pop(@ROTX)); $j++;'
  1354. )
  1355. }
  1356. sub Xupdate_avx2_16_31() # recall that $Xi starts with 4
  1357. { use integer;
  1358. my $body = shift;
  1359. my @insns = (&$body,&$body,&$body,&$body,&$body); # 35 instructions
  1360. my ($a,$b,$c,$d,$e);
  1361. &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]"
  1362. eval(shift(@insns));
  1363. eval(shift(@insns));
  1364. eval(shift(@insns));
  1365. eval(shift(@insns));
  1366. &vpsrldq(@Tx[0],@X[-1&7],4); # "X[-3]", 3 dwords
  1367. eval(shift(@insns));
  1368. eval(shift(@insns));
  1369. eval(shift(@insns));
  1370. &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
  1371. &vpxor (@Tx[0],@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
  1372. eval(shift(@insns));
  1373. eval(shift(@insns));
  1374. &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
  1375. eval(shift(@insns));
  1376. eval(shift(@insns));
  1377. eval(shift(@insns));
  1378. eval(shift(@insns));
  1379. &vpsrld (@Tx[0],@X[0],31);
  1380. &vmovdqu($Kx,eval(2*16*(($Xi)/5)-64)."($K_XX_XX)") if ($Xi%5==0); # K_XX_XX
  1381. eval(shift(@insns));
  1382. eval(shift(@insns));
  1383. eval(shift(@insns));
  1384. &vpslldq(@Tx[2],@X[0],12); # "X[0]"<<96, extract one dword
  1385. &vpaddd (@X[0],@X[0],@X[0]);
  1386. eval(shift(@insns));
  1387. eval(shift(@insns));
  1388. &vpsrld (@Tx[1],@Tx[2],30);
  1389. &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=1
  1390. eval(shift(@insns));
  1391. eval(shift(@insns));
  1392. &vpslld (@Tx[2],@Tx[2],2);
  1393. &vpxor (@X[0],@X[0],@Tx[1]);
  1394. eval(shift(@insns));
  1395. eval(shift(@insns));
  1396. &vpxor (@X[0],@X[0],@Tx[2]); # "X[0]"^=("X[0]">>96)<<<2
  1397. eval(shift(@insns));
  1398. eval(shift(@insns));
  1399. eval(shift(@insns));
  1400. &vpaddd (@Tx[1],@X[0],$Kx);
  1401. eval(shift(@insns));
  1402. eval(shift(@insns));
  1403. eval(shift(@insns));
  1404. &vmovdqu(eval(32*($Xi))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
  1405. foreach (@insns) { eval; } # remaining instructions [if any]
  1406. $Xi++;
  1407. push(@X,shift(@X)); # "rotate" X[]
  1408. }
  1409. sub Xupdate_avx2_32_79()
  1410. { use integer;
  1411. my $body = shift;
  1412. my @insns = (&$body,&$body,&$body,&$body,&$body); # 35 to 50 instructions
  1413. my ($a,$b,$c,$d,$e);
  1414. &vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8); # compose "X[-6]"
  1415. &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
  1416. eval(shift(@insns));
  1417. eval(shift(@insns));
  1418. &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
  1419. &vmovdqu($Kx,eval(2*16*($Xi/5)-64)."($K_XX_XX)") if ($Xi%5==0);
  1420. eval(shift(@insns));
  1421. eval(shift(@insns));
  1422. eval(shift(@insns));
  1423. &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-6]"
  1424. eval(shift(@insns));
  1425. eval(shift(@insns));
  1426. eval(shift(@insns));
  1427. &vpsrld (@Tx[0],@X[0],30);
  1428. &vpslld (@X[0],@X[0],2);
  1429. eval(shift(@insns));
  1430. eval(shift(@insns));
  1431. eval(shift(@insns));
  1432. #&vpslld (@X[0],@X[0],2);
  1433. eval(shift(@insns));
  1434. eval(shift(@insns));
  1435. eval(shift(@insns));
  1436. &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=2
  1437. eval(shift(@insns));
  1438. eval(shift(@insns));
  1439. eval(shift(@insns));
  1440. eval(shift(@insns));
  1441. &vpaddd (@Tx[1],@X[0],$Kx);
  1442. eval(shift(@insns));
  1443. eval(shift(@insns));
  1444. eval(shift(@insns));
  1445. eval(shift(@insns));
  1446. &vmovdqu("32*$Xi(%rsp)",@Tx[1]); # X[]+K xfer to IALU
  1447. foreach (@insns) { eval; } # remaining instructions
  1448. $Xi++;
  1449. push(@X,shift(@X)); # "rotate" X[]
  1450. }
  1451. sub Xloop_avx2()
  1452. { use integer;
  1453. my $body = shift;
  1454. my @insns = (&$body,&$body,&$body,&$body,&$body); # 32 instructions
  1455. my ($a,$b,$c,$d,$e);
  1456. foreach (@insns) { eval; }
  1457. }
  1458. &align32();
  1459. &Xupdate_avx2_32_79(\&bodyx_00_19);
  1460. &Xupdate_avx2_32_79(\&bodyx_00_19);
  1461. &Xupdate_avx2_32_79(\&bodyx_00_19);
  1462. &Xupdate_avx2_32_79(\&bodyx_00_19);
  1463. &Xupdate_avx2_32_79(\&bodyx_20_39);
  1464. &Xupdate_avx2_32_79(\&bodyx_20_39);
  1465. &Xupdate_avx2_32_79(\&bodyx_20_39);
  1466. &Xupdate_avx2_32_79(\&bodyx_20_39);
  1467. &align32();
  1468. &Xupdate_avx2_32_79(\&bodyx_40_59);
  1469. &Xupdate_avx2_32_79(\&bodyx_40_59);
  1470. &Xupdate_avx2_32_79(\&bodyx_40_59);
  1471. &Xupdate_avx2_32_79(\&bodyx_40_59);
  1472. &Xloop_avx2(\&bodyx_20_39);
  1473. &Xloop_avx2(\&bodyx_20_39);
  1474. &Xloop_avx2(\&bodyx_20_39);
  1475. &Xloop_avx2(\&bodyx_20_39);
  1476. $code.=<<___;
  1477. lea 128($inp),$frame
  1478. lea 128($inp),%rdi # borrow $t0
  1479. cmp $num,$frame
  1480. cmovae $inp,$frame # next or previous block
  1481. # output is d-e-[a]-f-b-c => A=d,F=e,C=f,D=b,E=c
  1482. add 0($ctx),@ROTX[0] # update context
  1483. add 4($ctx),@ROTX[1]
  1484. add 8($ctx),@ROTX[3]
  1485. mov @ROTX[0],0($ctx)
  1486. add 12($ctx),@ROTX[4]
  1487. mov @ROTX[1],4($ctx)
  1488. mov @ROTX[0],$A # A=d
  1489. add 16($ctx),@ROTX[5]
  1490. mov @ROTX[3],$a5
  1491. mov @ROTX[3],8($ctx)
  1492. mov @ROTX[4],$D # D=b
  1493. #xchg @ROTX[5],$F # F=c, C=f
  1494. mov @ROTX[4],12($ctx)
  1495. mov @ROTX[1],$F # F=e
  1496. mov @ROTX[5],16($ctx)
  1497. #mov $F,16($ctx)
  1498. mov @ROTX[5],$E # E=c
  1499. mov $a5,$C # C=f
  1500. #xchg $F,$E # E=c, F=e
  1501. cmp $num,$inp
  1502. je .Ldone_avx2
  1503. ___
  1504. $Xi=4; # reset variables
  1505. @X=map("%ymm$_",(4..7,0..3));
  1506. $code.=<<___;
  1507. vmovdqu 64($K_XX_XX),@X[2] # pbswap mask
  1508. cmp $num,%rdi # borrowed $t0
  1509. ja .Last_avx2
  1510. vmovdqu -64(%rdi),%xmm0 # low part of @X[-4&7]
  1511. vmovdqu -48(%rdi),%xmm1
  1512. vmovdqu -32(%rdi),%xmm2
  1513. vmovdqu -16(%rdi),%xmm3
  1514. vinserti128 \$1,0($frame),@X[-4&7],@X[-4&7]
  1515. vinserti128 \$1,16($frame),@X[-3&7],@X[-3&7]
  1516. vinserti128 \$1,32($frame),@X[-2&7],@X[-2&7]
  1517. vinserti128 \$1,48($frame),@X[-1&7],@X[-1&7]
  1518. jmp .Last_avx2
  1519. .align 32
  1520. .Last_avx2:
  1521. lea 128+16(%rsp),$frame
  1522. rorx \$2,$F,$B
  1523. andn $D,$F,$t0
  1524. and $C,$F
  1525. xor $t0,$F
  1526. sub \$-128,$inp
  1527. ___
  1528. $rx=$j=0; @ROTX=($A,$F,$B,$C,$D,$E);
  1529. &Xloop_avx2 (\&bodyx_00_19);
  1530. &Xloop_avx2 (\&bodyx_00_19);
  1531. &Xloop_avx2 (\&bodyx_00_19);
  1532. &Xloop_avx2 (\&bodyx_00_19);
  1533. &Xloop_avx2 (\&bodyx_20_39);
  1534. &vmovdqu ($Kx,"-64($K_XX_XX)"); # K_00_19
  1535. &vpshufb (@X[-4&7],@X[-4&7],@X[2]); # byte swap
  1536. &Xloop_avx2 (\&bodyx_20_39);
  1537. &vpshufb (@X[-3&7],@X[-3&7],@X[2]);
  1538. &vpaddd (@Tx[0],@X[-4&7],$Kx); # add K_00_19
  1539. &Xloop_avx2 (\&bodyx_20_39);
  1540. &vmovdqu ("0(%rsp)",@Tx[0]);
  1541. &vpshufb (@X[-2&7],@X[-2&7],@X[2]);
  1542. &vpaddd (@Tx[1],@X[-3&7],$Kx);
  1543. &Xloop_avx2 (\&bodyx_20_39);
  1544. &vmovdqu ("32(%rsp)",@Tx[1]);
  1545. &vpshufb (@X[-1&7],@X[-1&7],@X[2]);
  1546. &vpaddd (@X[2],@X[-2&7],$Kx);
  1547. &Xloop_avx2 (\&bodyx_40_59);
  1548. &align32 ();
  1549. &vmovdqu ("64(%rsp)",@X[2]);
  1550. &vpaddd (@X[3],@X[-1&7],$Kx);
  1551. &Xloop_avx2 (\&bodyx_40_59);
  1552. &vmovdqu ("96(%rsp)",@X[3]);
  1553. &Xloop_avx2 (\&bodyx_40_59);
  1554. &Xupdate_avx2_16_31(\&bodyx_40_59);
  1555. &Xupdate_avx2_16_31(\&bodyx_20_39);
  1556. &Xupdate_avx2_16_31(\&bodyx_20_39);
  1557. &Xupdate_avx2_16_31(\&bodyx_20_39);
  1558. &Xloop_avx2 (\&bodyx_20_39);
  1559. $code.=<<___;
  1560. lea 128(%rsp),$frame
  1561. # output is d-e-[a]-f-b-c => A=d,F=e,C=f,D=b,E=c
  1562. add 0($ctx),@ROTX[0] # update context
  1563. add 4($ctx),@ROTX[1]
  1564. add 8($ctx),@ROTX[3]
  1565. mov @ROTX[0],0($ctx)
  1566. add 12($ctx),@ROTX[4]
  1567. mov @ROTX[1],4($ctx)
  1568. mov @ROTX[0],$A # A=d
  1569. add 16($ctx),@ROTX[5]
  1570. mov @ROTX[3],$a5
  1571. mov @ROTX[3],8($ctx)
  1572. mov @ROTX[4],$D # D=b
  1573. #xchg @ROTX[5],$F # F=c, C=f
  1574. mov @ROTX[4],12($ctx)
  1575. mov @ROTX[1],$F # F=e
  1576. mov @ROTX[5],16($ctx)
  1577. #mov $F,16($ctx)
  1578. mov @ROTX[5],$E # E=c
  1579. mov $a5,$C # C=f
  1580. #xchg $F,$E # E=c, F=e
  1581. cmp $num,$inp
  1582. jbe .Loop_avx2
  1583. .Ldone_avx2:
  1584. vzeroupper
  1585. ___
  1586. $code.=<<___ if ($win64);
  1587. movaps -40-6*16($fp),%xmm6
  1588. movaps -40-5*16($fp),%xmm7
  1589. movaps -40-4*16($fp),%xmm8
  1590. movaps -40-3*16($fp),%xmm9
  1591. movaps -40-2*16($fp),%xmm10
  1592. movaps -40-1*16($fp),%xmm11
  1593. ___
  1594. $code.=<<___;
  1595. mov -40($fp),%r14
  1596. .cfi_restore %r14
  1597. mov -32($fp),%r13
  1598. .cfi_restore %r13
  1599. mov -24($fp),%r12
  1600. .cfi_restore %r12
  1601. mov -16($fp),%rbp
  1602. .cfi_restore %rbp
  1603. mov -8($fp),%rbx
  1604. .cfi_restore %rbx
  1605. lea ($fp),%rsp
  1606. .cfi_def_cfa_register %rsp
  1607. .Lepilogue_avx2:
  1608. ret
  1609. .cfi_endproc
  1610. .size sha1_block_data_order_avx2,.-sha1_block_data_order_avx2
  1611. ___
  1612. }
  1613. }
  1614. $code.=<<___;
  1615. .align 64
  1616. K_XX_XX:
  1617. .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
  1618. .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
  1619. .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
  1620. .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
  1621. .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
  1622. .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
  1623. .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
  1624. .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
  1625. .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap mask
  1626. .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap mask
  1627. .byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0
  1628. ___
  1629. }}}
  1630. $code.=<<___;
  1631. .asciz "SHA1 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  1632. .align 64
  1633. ___
  1634. # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
  1635. # CONTEXT *context,DISPATCHER_CONTEXT *disp)
  1636. if ($win64) {
  1637. $rec="%rcx";
  1638. $frame="%rdx";
  1639. $context="%r8";
  1640. $disp="%r9";
  1641. $code.=<<___;
  1642. .extern __imp_RtlVirtualUnwind
  1643. .type se_handler,\@abi-omnipotent
  1644. .align 16
  1645. se_handler:
  1646. push %rsi
  1647. push %rdi
  1648. push %rbx
  1649. push %rbp
  1650. push %r12
  1651. push %r13
  1652. push %r14
  1653. push %r15
  1654. pushfq
  1655. sub \$64,%rsp
  1656. mov 120($context),%rax # pull context->Rax
  1657. mov 248($context),%rbx # pull context->Rip
  1658. lea .Lprologue(%rip),%r10
  1659. cmp %r10,%rbx # context->Rip<.Lprologue
  1660. jb .Lcommon_seh_tail
  1661. mov 152($context),%rax # pull context->Rsp
  1662. lea .Lepilogue(%rip),%r10
  1663. cmp %r10,%rbx # context->Rip>=.Lepilogue
  1664. jae .Lcommon_seh_tail
  1665. mov `16*4`(%rax),%rax # pull saved stack pointer
  1666. mov -8(%rax),%rbx
  1667. mov -16(%rax),%rbp
  1668. mov -24(%rax),%r12
  1669. mov -32(%rax),%r13
  1670. mov -40(%rax),%r14
  1671. mov %rbx,144($context) # restore context->Rbx
  1672. mov %rbp,160($context) # restore context->Rbp
  1673. mov %r12,216($context) # restore context->R12
  1674. mov %r13,224($context) # restore context->R13
  1675. mov %r14,232($context) # restore context->R14
  1676. jmp .Lcommon_seh_tail
  1677. .size se_handler,.-se_handler
  1678. ___
  1679. $code.=<<___ if ($shaext);
  1680. .type shaext_handler,\@abi-omnipotent
  1681. .align 16
  1682. shaext_handler:
  1683. push %rsi
  1684. push %rdi
  1685. push %rbx
  1686. push %rbp
  1687. push %r12
  1688. push %r13
  1689. push %r14
  1690. push %r15
  1691. pushfq
  1692. sub \$64,%rsp
  1693. mov 120($context),%rax # pull context->Rax
  1694. mov 248($context),%rbx # pull context->Rip
  1695. lea .Lprologue_shaext(%rip),%r10
  1696. cmp %r10,%rbx # context->Rip<.Lprologue
  1697. jb .Lcommon_seh_tail
  1698. lea .Lepilogue_shaext(%rip),%r10
  1699. cmp %r10,%rbx # context->Rip>=.Lepilogue
  1700. jae .Lcommon_seh_tail
  1701. lea -8-4*16(%rax),%rsi
  1702. lea 512($context),%rdi # &context.Xmm6
  1703. mov \$8,%ecx
  1704. .long 0xa548f3fc # cld; rep movsq
  1705. jmp .Lcommon_seh_tail
  1706. .size shaext_handler,.-shaext_handler
  1707. ___
  1708. $code.=<<___;
  1709. .type ssse3_handler,\@abi-omnipotent
  1710. .align 16
  1711. ssse3_handler:
  1712. push %rsi
  1713. push %rdi
  1714. push %rbx
  1715. push %rbp
  1716. push %r12
  1717. push %r13
  1718. push %r14
  1719. push %r15
  1720. pushfq
  1721. sub \$64,%rsp
  1722. mov 120($context),%rax # pull context->Rax
  1723. mov 248($context),%rbx # pull context->Rip
  1724. mov 8($disp),%rsi # disp->ImageBase
  1725. mov 56($disp),%r11 # disp->HandlerData
  1726. mov 0(%r11),%r10d # HandlerData[0]
  1727. lea (%rsi,%r10),%r10 # prologue label
  1728. cmp %r10,%rbx # context->Rip<prologue label
  1729. jb .Lcommon_seh_tail
  1730. mov 208($context),%rax # pull context->R11
  1731. mov 4(%r11),%r10d # HandlerData[1]
  1732. lea (%rsi,%r10),%r10 # epilogue label
  1733. cmp %r10,%rbx # context->Rip>=epilogue label
  1734. jae .Lcommon_seh_tail
  1735. lea -40-6*16(%rax),%rsi
  1736. lea 512($context),%rdi # &context.Xmm6
  1737. mov \$12,%ecx
  1738. .long 0xa548f3fc # cld; rep movsq
  1739. mov -8(%rax),%rbx
  1740. mov -16(%rax),%rbp
  1741. mov -24(%rax),%r12
  1742. mov -32(%rax),%r13
  1743. mov -40(%rax),%r14
  1744. mov %rbx,144($context) # restore context->Rbx
  1745. mov %rbp,160($context) # restore context->Rbp
  1746. mov %r12,216($context) # restore cotnext->R12
  1747. mov %r13,224($context) # restore cotnext->R13
  1748. mov %r14,232($context) # restore cotnext->R14
  1749. .Lcommon_seh_tail:
  1750. mov 8(%rax),%rdi
  1751. mov 16(%rax),%rsi
  1752. mov %rax,152($context) # restore context->Rsp
  1753. mov %rsi,168($context) # restore context->Rsi
  1754. mov %rdi,176($context) # restore context->Rdi
  1755. mov 40($disp),%rdi # disp->ContextRecord
  1756. mov $context,%rsi # context
  1757. mov \$154,%ecx # sizeof(CONTEXT)
  1758. .long 0xa548f3fc # cld; rep movsq
  1759. mov $disp,%rsi
  1760. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  1761. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  1762. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  1763. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  1764. mov 40(%rsi),%r10 # disp->ContextRecord
  1765. lea 56(%rsi),%r11 # &disp->HandlerData
  1766. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  1767. mov %r10,32(%rsp) # arg5
  1768. mov %r11,40(%rsp) # arg6
  1769. mov %r12,48(%rsp) # arg7
  1770. mov %rcx,56(%rsp) # arg8, (NULL)
  1771. call *__imp_RtlVirtualUnwind(%rip)
  1772. mov \$1,%eax # ExceptionContinueSearch
  1773. add \$64,%rsp
  1774. popfq
  1775. pop %r15
  1776. pop %r14
  1777. pop %r13
  1778. pop %r12
  1779. pop %rbp
  1780. pop %rbx
  1781. pop %rdi
  1782. pop %rsi
  1783. ret
  1784. .size ssse3_handler,.-ssse3_handler
  1785. .section .pdata
  1786. .align 4
  1787. .rva .LSEH_begin_sha1_block_data_order
  1788. .rva .LSEH_end_sha1_block_data_order
  1789. .rva .LSEH_info_sha1_block_data_order
  1790. ___
  1791. $code.=<<___ if ($shaext);
  1792. .rva .LSEH_begin_sha1_block_data_order_shaext
  1793. .rva .LSEH_end_sha1_block_data_order_shaext
  1794. .rva .LSEH_info_sha1_block_data_order_shaext
  1795. ___
  1796. $code.=<<___;
  1797. .rva .LSEH_begin_sha1_block_data_order_ssse3
  1798. .rva .LSEH_end_sha1_block_data_order_ssse3
  1799. .rva .LSEH_info_sha1_block_data_order_ssse3
  1800. ___
  1801. $code.=<<___ if ($avx);
  1802. .rva .LSEH_begin_sha1_block_data_order_avx
  1803. .rva .LSEH_end_sha1_block_data_order_avx
  1804. .rva .LSEH_info_sha1_block_data_order_avx
  1805. ___
  1806. $code.=<<___ if ($avx>1);
  1807. .rva .LSEH_begin_sha1_block_data_order_avx2
  1808. .rva .LSEH_end_sha1_block_data_order_avx2
  1809. .rva .LSEH_info_sha1_block_data_order_avx2
  1810. ___
  1811. $code.=<<___;
  1812. .section .xdata
  1813. .align 8
  1814. .LSEH_info_sha1_block_data_order:
  1815. .byte 9,0,0,0
  1816. .rva se_handler
  1817. ___
  1818. $code.=<<___ if ($shaext);
  1819. .LSEH_info_sha1_block_data_order_shaext:
  1820. .byte 9,0,0,0
  1821. .rva shaext_handler
  1822. ___
  1823. $code.=<<___;
  1824. .LSEH_info_sha1_block_data_order_ssse3:
  1825. .byte 9,0,0,0
  1826. .rva ssse3_handler
  1827. .rva .Lprologue_ssse3,.Lepilogue_ssse3 # HandlerData[]
  1828. ___
  1829. $code.=<<___ if ($avx);
  1830. .LSEH_info_sha1_block_data_order_avx:
  1831. .byte 9,0,0,0
  1832. .rva ssse3_handler
  1833. .rva .Lprologue_avx,.Lepilogue_avx # HandlerData[]
  1834. ___
  1835. $code.=<<___ if ($avx>1);
  1836. .LSEH_info_sha1_block_data_order_avx2:
  1837. .byte 9,0,0,0
  1838. .rva ssse3_handler
  1839. .rva .Lprologue_avx2,.Lepilogue_avx2 # HandlerData[]
  1840. ___
  1841. }
  1842. ####################################################################
  1843. sub sha1rnds4 {
  1844. if (@_[0] =~ /\$([x0-9a-f]+),\s*%xmm([0-7]),\s*%xmm([0-7])/) {
  1845. my @opcode=(0x0f,0x3a,0xcc);
  1846. push @opcode,0xc0|($2&7)|(($3&7)<<3); # ModR/M
  1847. my $c=$1;
  1848. push @opcode,$c=~/^0/?oct($c):$c;
  1849. return ".byte\t".join(',',@opcode);
  1850. } else {
  1851. return "sha1rnds4\t".@_[0];
  1852. }
  1853. }
  1854. sub sha1op38 {
  1855. my $instr = shift;
  1856. my %opcodelet = (
  1857. "sha1nexte" => 0xc8,
  1858. "sha1msg1" => 0xc9,
  1859. "sha1msg2" => 0xca );
  1860. if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-9]+),\s*%xmm([0-9]+)/) {
  1861. my @opcode=(0x0f,0x38);
  1862. my $rex=0;
  1863. $rex|=0x04 if ($2>=8);
  1864. $rex|=0x01 if ($1>=8);
  1865. unshift @opcode,0x40|$rex if ($rex);
  1866. push @opcode,$opcodelet{$instr};
  1867. push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M
  1868. return ".byte\t".join(',',@opcode);
  1869. } else {
  1870. return $instr."\t".@_[0];
  1871. }
  1872. }
  1873. foreach (split("\n",$code)) {
  1874. s/\`([^\`]*)\`/eval $1/geo;
  1875. s/\b(sha1rnds4)\s+(.*)/sha1rnds4($2)/geo or
  1876. s/\b(sha1[^\s]*)\s+(.*)/sha1op38($1,$2)/geo;
  1877. print $_,"\n";
  1878. }
  1879. close STDOUT;