sha1-mb-x86_64.pl 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628
  1. #! /usr/bin/env perl
  2. # Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. # ====================================================================
  9. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  10. # project. The module is, however, dual licensed under OpenSSL and
  11. # CRYPTOGAMS licenses depending on where you obtain it. For further
  12. # details see http://www.openssl.org/~appro/cryptogams/.
  13. # ====================================================================
  14. # Multi-buffer SHA1 procedure processes n buffers in parallel by
  15. # placing buffer data to designated lane of SIMD register. n is
  16. # naturally limited to 4 on pre-AVX2 processors and to 8 on
  17. # AVX2-capable processors such as Haswell.
  18. #
  19. # this +aesni(i) sha1 aesni-sha1 gain(iv)
  20. # -------------------------------------------------------------------
  21. # Westmere(ii) 10.7/n +1.28=3.96(n=4) 5.30 6.66 +68%
  22. # Atom(ii) 18.1/n +3.93=8.46(n=4) 9.37 12.8 +51%
  23. # Sandy Bridge (8.16 +5.15=13.3)/n 4.99 5.98 +80%
  24. # Ivy Bridge (8.08 +5.14=13.2)/n 4.60 5.54 +68%
  25. # Haswell(iii) (8.96 +5.00=14.0)/n 3.57 4.55 +160%
  26. # Skylake (8.70 +5.00=13.7)/n 3.64 4.20 +145%
  27. # Bulldozer (9.76 +5.76=15.5)/n 5.95 6.37 +64%
  28. #
  29. # (i) multi-block CBC encrypt with 128-bit key;
  30. # (ii) (HASH+AES)/n does not apply to Westmere for n>3 and Atom,
  31. # because of lower AES-NI instruction throughput;
  32. # (iii) "this" is for n=8, when we gather twice as much data, result
  33. # for n=4 is 8.00+4.44=12.4;
  34. # (iv) presented improvement coefficients are asymptotic limits and
  35. # in real-life application are somewhat lower, e.g. for 2KB
  36. # fragments they range from 30% to 100% (on Haswell);
  37. $flavour = shift;
  38. $output = shift;
  39. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  40. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  41. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  42. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  43. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  44. die "can't locate x86_64-xlate.pl";
  45. $avx=0;
  46. if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  47. =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
  48. $avx = ($1>=2.19) + ($1>=2.22);
  49. }
  50. if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
  51. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
  52. $avx = ($1>=2.09) + ($1>=2.10);
  53. }
  54. if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
  55. `ml64 2>&1` =~ /Version ([0-9]+)\./) {
  56. $avx = ($1>=10) + ($1>=11);
  57. }
  58. if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
  59. $avx = ($2>=3.0) + ($2>3.0);
  60. }
  61. open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
  62. *STDOUT=*OUT;
  63. # void sha1_multi_block (
  64. # struct { unsigned int A[8];
  65. # unsigned int B[8];
  66. # unsigned int C[8];
  67. # unsigned int D[8];
  68. # unsigned int E[8]; } *ctx,
  69. # struct { void *ptr; int blocks; } inp[8],
  70. # int num); /* 1 or 2 */
  71. #
  72. $ctx="%rdi"; # 1st arg
  73. $inp="%rsi"; # 2nd arg
  74. $num="%edx";
  75. @ptr=map("%r$_",(8..11));
  76. $Tbl="%rbp";
  77. @V=($A,$B,$C,$D,$E)=map("%xmm$_",(0..4));
  78. ($t0,$t1,$t2,$t3,$tx)=map("%xmm$_",(5..9));
  79. @Xi=map("%xmm$_",(10..14));
  80. $K="%xmm15";
  81. if (1) {
  82. # Atom-specific optimization aiming to eliminate pshufb with high
  83. # registers [and thus get rid of 48 cycles accumulated penalty]
  84. @Xi=map("%xmm$_",(0..4));
  85. ($tx,$t0,$t1,$t2,$t3)=map("%xmm$_",(5..9));
  86. @V=($A,$B,$C,$D,$E)=map("%xmm$_",(10..14));
  87. }
  88. $REG_SZ=16;
  89. sub Xi_off {
  90. my $off = shift;
  91. $off %= 16; $off *= $REG_SZ;
  92. $off<256 ? "$off-128(%rax)" : "$off-256-128(%rbx)";
  93. }
  94. sub BODY_00_19 {
  95. my ($i,$a,$b,$c,$d,$e)=@_;
  96. my $j=$i+1;
  97. my $k=$i+2;
  98. # Loads are performed 2+3/4 iterations in advance. 3/4 means that out
  99. # of 4 words you would expect to be loaded per given iteration one is
  100. # spilled to next iteration. In other words indices in four input
  101. # streams are distributed as following:
  102. #
  103. # $i==0: 0,0,0,0,1,1,1,1,2,2,2,
  104. # $i==1: 2,3,3,3,
  105. # $i==2: 3,4,4,4,
  106. # ...
  107. # $i==13: 14,15,15,15,
  108. # $i==14: 15
  109. #
  110. # Then at $i==15 Xupdate is applied one iteration in advance...
  111. $code.=<<___ if ($i==0);
  112. movd (@ptr[0]),@Xi[0]
  113. lea `16*4`(@ptr[0]),@ptr[0]
  114. movd (@ptr[1]),@Xi[2] # borrow @Xi[2]
  115. lea `16*4`(@ptr[1]),@ptr[1]
  116. movd (@ptr[2]),@Xi[3] # borrow @Xi[3]
  117. lea `16*4`(@ptr[2]),@ptr[2]
  118. movd (@ptr[3]),@Xi[4] # borrow @Xi[4]
  119. lea `16*4`(@ptr[3]),@ptr[3]
  120. punpckldq @Xi[3],@Xi[0]
  121. movd `4*$j-16*4`(@ptr[0]),@Xi[1]
  122. punpckldq @Xi[4],@Xi[2]
  123. movd `4*$j-16*4`(@ptr[1]),$t3
  124. punpckldq @Xi[2],@Xi[0]
  125. movd `4*$j-16*4`(@ptr[2]),$t2
  126. pshufb $tx,@Xi[0]
  127. ___
  128. $code.=<<___ if ($i<14); # just load input
  129. movd `4*$j-16*4`(@ptr[3]),$t1
  130. punpckldq $t2,@Xi[1]
  131. movdqa $a,$t2
  132. paddd $K,$e # e+=K_00_19
  133. punpckldq $t1,$t3
  134. movdqa $b,$t1
  135. movdqa $b,$t0
  136. pslld \$5,$t2
  137. pandn $d,$t1
  138. pand $c,$t0
  139. punpckldq $t3,@Xi[1]
  140. movdqa $a,$t3
  141. movdqa @Xi[0],`&Xi_off($i)`
  142. paddd @Xi[0],$e # e+=X[i]
  143. movd `4*$k-16*4`(@ptr[0]),@Xi[2]
  144. psrld \$27,$t3
  145. pxor $t1,$t0 # Ch(b,c,d)
  146. movdqa $b,$t1
  147. por $t3,$t2 # rol(a,5)
  148. movd `4*$k-16*4`(@ptr[1]),$t3
  149. pslld \$30,$t1
  150. paddd $t0,$e # e+=Ch(b,c,d)
  151. psrld \$2,$b
  152. paddd $t2,$e # e+=rol(a,5)
  153. pshufb $tx,@Xi[1]
  154. movd `4*$k-16*4`(@ptr[2]),$t2
  155. por $t1,$b # b=rol(b,30)
  156. ___
  157. $code.=<<___ if ($i==14); # just load input
  158. movd `4*$j-16*4`(@ptr[3]),$t1
  159. punpckldq $t2,@Xi[1]
  160. movdqa $a,$t2
  161. paddd $K,$e # e+=K_00_19
  162. punpckldq $t1,$t3
  163. movdqa $b,$t1
  164. movdqa $b,$t0
  165. pslld \$5,$t2
  166. prefetcht0 63(@ptr[0])
  167. pandn $d,$t1
  168. pand $c,$t0
  169. punpckldq $t3,@Xi[1]
  170. movdqa $a,$t3
  171. movdqa @Xi[0],`&Xi_off($i)`
  172. paddd @Xi[0],$e # e+=X[i]
  173. psrld \$27,$t3
  174. pxor $t1,$t0 # Ch(b,c,d)
  175. movdqa $b,$t1
  176. prefetcht0 63(@ptr[1])
  177. por $t3,$t2 # rol(a,5)
  178. pslld \$30,$t1
  179. paddd $t0,$e # e+=Ch(b,c,d)
  180. prefetcht0 63(@ptr[2])
  181. psrld \$2,$b
  182. paddd $t2,$e # e+=rol(a,5)
  183. pshufb $tx,@Xi[1]
  184. prefetcht0 63(@ptr[3])
  185. por $t1,$b # b=rol(b,30)
  186. ___
  187. $code.=<<___ if ($i>=13 && $i<15);
  188. movdqa `&Xi_off($j+2)`,@Xi[3] # preload "X[2]"
  189. ___
  190. $code.=<<___ if ($i>=15); # apply Xupdate
  191. pxor @Xi[-2],@Xi[1] # "X[13]"
  192. movdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
  193. movdqa $a,$t2
  194. pxor `&Xi_off($j+8)`,@Xi[1]
  195. paddd $K,$e # e+=K_00_19
  196. movdqa $b,$t1
  197. pslld \$5,$t2
  198. pxor @Xi[3],@Xi[1]
  199. movdqa $b,$t0
  200. pandn $d,$t1
  201. movdqa @Xi[1],$tx
  202. pand $c,$t0
  203. movdqa $a,$t3
  204. psrld \$31,$tx
  205. paddd @Xi[1],@Xi[1]
  206. movdqa @Xi[0],`&Xi_off($i)`
  207. paddd @Xi[0],$e # e+=X[i]
  208. psrld \$27,$t3
  209. pxor $t1,$t0 # Ch(b,c,d)
  210. movdqa $b,$t1
  211. por $t3,$t2 # rol(a,5)
  212. pslld \$30,$t1
  213. paddd $t0,$e # e+=Ch(b,c,d)
  214. psrld \$2,$b
  215. paddd $t2,$e # e+=rol(a,5)
  216. por $tx,@Xi[1] # rol \$1,@Xi[1]
  217. por $t1,$b # b=rol(b,30)
  218. ___
  219. push(@Xi,shift(@Xi));
  220. }
  221. sub BODY_20_39 {
  222. my ($i,$a,$b,$c,$d,$e)=@_;
  223. my $j=$i+1;
  224. $code.=<<___ if ($i<79);
  225. pxor @Xi[-2],@Xi[1] # "X[13]"
  226. movdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
  227. movdqa $a,$t2
  228. movdqa $d,$t0
  229. pxor `&Xi_off($j+8)`,@Xi[1]
  230. paddd $K,$e # e+=K_20_39
  231. pslld \$5,$t2
  232. pxor $b,$t0
  233. movdqa $a,$t3
  234. ___
  235. $code.=<<___ if ($i<72);
  236. movdqa @Xi[0],`&Xi_off($i)`
  237. ___
  238. $code.=<<___ if ($i<79);
  239. paddd @Xi[0],$e # e+=X[i]
  240. pxor @Xi[3],@Xi[1]
  241. psrld \$27,$t3
  242. pxor $c,$t0 # Parity(b,c,d)
  243. movdqa $b,$t1
  244. pslld \$30,$t1
  245. movdqa @Xi[1],$tx
  246. por $t3,$t2 # rol(a,5)
  247. psrld \$31,$tx
  248. paddd $t0,$e # e+=Parity(b,c,d)
  249. paddd @Xi[1],@Xi[1]
  250. psrld \$2,$b
  251. paddd $t2,$e # e+=rol(a,5)
  252. por $tx,@Xi[1] # rol(@Xi[1],1)
  253. por $t1,$b # b=rol(b,30)
  254. ___
  255. $code.=<<___ if ($i==79);
  256. movdqa $a,$t2
  257. paddd $K,$e # e+=K_20_39
  258. movdqa $d,$t0
  259. pslld \$5,$t2
  260. pxor $b,$t0
  261. movdqa $a,$t3
  262. paddd @Xi[0],$e # e+=X[i]
  263. psrld \$27,$t3
  264. movdqa $b,$t1
  265. pxor $c,$t0 # Parity(b,c,d)
  266. pslld \$30,$t1
  267. por $t3,$t2 # rol(a,5)
  268. paddd $t0,$e # e+=Parity(b,c,d)
  269. psrld \$2,$b
  270. paddd $t2,$e # e+=rol(a,5)
  271. por $t1,$b # b=rol(b,30)
  272. ___
  273. push(@Xi,shift(@Xi));
  274. }
  275. sub BODY_40_59 {
  276. my ($i,$a,$b,$c,$d,$e)=@_;
  277. my $j=$i+1;
  278. $code.=<<___;
  279. pxor @Xi[-2],@Xi[1] # "X[13]"
  280. movdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
  281. movdqa $a,$t2
  282. movdqa $d,$t1
  283. pxor `&Xi_off($j+8)`,@Xi[1]
  284. pxor @Xi[3],@Xi[1]
  285. paddd $K,$e # e+=K_40_59
  286. pslld \$5,$t2
  287. movdqa $a,$t3
  288. pand $c,$t1
  289. movdqa $d,$t0
  290. movdqa @Xi[1],$tx
  291. psrld \$27,$t3
  292. paddd $t1,$e
  293. pxor $c,$t0
  294. movdqa @Xi[0],`&Xi_off($i)`
  295. paddd @Xi[0],$e # e+=X[i]
  296. por $t3,$t2 # rol(a,5)
  297. psrld \$31,$tx
  298. pand $b,$t0
  299. movdqa $b,$t1
  300. pslld \$30,$t1
  301. paddd @Xi[1],@Xi[1]
  302. paddd $t0,$e # e+=Maj(b,d,c)
  303. psrld \$2,$b
  304. paddd $t2,$e # e+=rol(a,5)
  305. por $tx,@Xi[1] # rol(@X[1],1)
  306. por $t1,$b # b=rol(b,30)
  307. ___
  308. push(@Xi,shift(@Xi));
  309. }
  310. $code.=<<___;
  311. .text
  312. .extern OPENSSL_ia32cap_P
  313. .globl sha1_multi_block
  314. .type sha1_multi_block,\@function,3
  315. .align 32
  316. sha1_multi_block:
  317. .cfi_startproc
  318. mov OPENSSL_ia32cap_P+4(%rip),%rcx
  319. bt \$61,%rcx # check SHA bit
  320. jc _shaext_shortcut
  321. ___
  322. $code.=<<___ if ($avx);
  323. test \$`1<<28`,%ecx
  324. jnz _avx_shortcut
  325. ___
  326. $code.=<<___;
  327. mov %rsp,%rax
  328. .cfi_def_cfa_register %rax
  329. push %rbx
  330. .cfi_push %rbx
  331. push %rbp
  332. .cfi_push %rbx
  333. ___
  334. $code.=<<___ if ($win64);
  335. lea -0xa8(%rsp),%rsp
  336. movaps %xmm6,(%rsp)
  337. movaps %xmm7,0x10(%rsp)
  338. movaps %xmm8,0x20(%rsp)
  339. movaps %xmm9,0x30(%rsp)
  340. movaps %xmm10,-0x78(%rax)
  341. movaps %xmm11,-0x68(%rax)
  342. movaps %xmm12,-0x58(%rax)
  343. movaps %xmm13,-0x48(%rax)
  344. movaps %xmm14,-0x38(%rax)
  345. movaps %xmm15,-0x28(%rax)
  346. ___
  347. $code.=<<___;
  348. sub \$`$REG_SZ*18`,%rsp
  349. and \$-256,%rsp
  350. mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
  351. .cfi_cfa_expression %rsp+`$REG_SZ*17`,deref,+8
  352. .Lbody:
  353. lea K_XX_XX(%rip),$Tbl
  354. lea `$REG_SZ*16`(%rsp),%rbx
  355. .Loop_grande:
  356. mov $num,`$REG_SZ*17+8`(%rsp) # original $num
  357. xor $num,$num
  358. ___
  359. for($i=0;$i<4;$i++) {
  360. $code.=<<___;
  361. mov `16*$i+0`($inp),@ptr[$i] # input pointer
  362. mov `16*$i+8`($inp),%ecx # number of blocks
  363. cmp $num,%ecx
  364. cmovg %ecx,$num # find maximum
  365. test %ecx,%ecx
  366. mov %ecx,`4*$i`(%rbx) # initialize counters
  367. cmovle $Tbl,@ptr[$i] # cancel input
  368. ___
  369. }
  370. $code.=<<___;
  371. test $num,$num
  372. jz .Ldone
  373. movdqu 0x00($ctx),$A # load context
  374. lea 128(%rsp),%rax
  375. movdqu 0x20($ctx),$B
  376. movdqu 0x40($ctx),$C
  377. movdqu 0x60($ctx),$D
  378. movdqu 0x80($ctx),$E
  379. movdqa 0x60($Tbl),$tx # pbswap_mask
  380. movdqa -0x20($Tbl),$K # K_00_19
  381. jmp .Loop
  382. .align 32
  383. .Loop:
  384. ___
  385. for($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
  386. $code.=" movdqa 0x00($Tbl),$K\n"; # K_20_39
  387. for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
  388. $code.=" movdqa 0x20($Tbl),$K\n"; # K_40_59
  389. for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
  390. $code.=" movdqa 0x40($Tbl),$K\n"; # K_60_79
  391. for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
  392. $code.=<<___;
  393. movdqa (%rbx),@Xi[0] # pull counters
  394. mov \$1,%ecx
  395. cmp 4*0(%rbx),%ecx # examine counters
  396. pxor $t2,$t2
  397. cmovge $Tbl,@ptr[0] # cancel input
  398. cmp 4*1(%rbx),%ecx
  399. movdqa @Xi[0],@Xi[1]
  400. cmovge $Tbl,@ptr[1]
  401. cmp 4*2(%rbx),%ecx
  402. pcmpgtd $t2,@Xi[1] # mask value
  403. cmovge $Tbl,@ptr[2]
  404. cmp 4*3(%rbx),%ecx
  405. paddd @Xi[1],@Xi[0] # counters--
  406. cmovge $Tbl,@ptr[3]
  407. movdqu 0x00($ctx),$t0
  408. pand @Xi[1],$A
  409. movdqu 0x20($ctx),$t1
  410. pand @Xi[1],$B
  411. paddd $t0,$A
  412. movdqu 0x40($ctx),$t2
  413. pand @Xi[1],$C
  414. paddd $t1,$B
  415. movdqu 0x60($ctx),$t3
  416. pand @Xi[1],$D
  417. paddd $t2,$C
  418. movdqu 0x80($ctx),$tx
  419. pand @Xi[1],$E
  420. movdqu $A,0x00($ctx)
  421. paddd $t3,$D
  422. movdqu $B,0x20($ctx)
  423. paddd $tx,$E
  424. movdqu $C,0x40($ctx)
  425. movdqu $D,0x60($ctx)
  426. movdqu $E,0x80($ctx)
  427. movdqa @Xi[0],(%rbx) # save counters
  428. movdqa 0x60($Tbl),$tx # pbswap_mask
  429. movdqa -0x20($Tbl),$K # K_00_19
  430. dec $num
  431. jnz .Loop
  432. mov `$REG_SZ*17+8`(%rsp),$num
  433. lea $REG_SZ($ctx),$ctx
  434. lea `16*$REG_SZ/4`($inp),$inp
  435. dec $num
  436. jnz .Loop_grande
  437. .Ldone:
  438. mov `$REG_SZ*17`(%rsp),%rax # original %rsp
  439. .cfi_def_cfa %rax,8
  440. ___
  441. $code.=<<___ if ($win64);
  442. movaps -0xb8(%rax),%xmm6
  443. movaps -0xa8(%rax),%xmm7
  444. movaps -0x98(%rax),%xmm8
  445. movaps -0x88(%rax),%xmm9
  446. movaps -0x78(%rax),%xmm10
  447. movaps -0x68(%rax),%xmm11
  448. movaps -0x58(%rax),%xmm12
  449. movaps -0x48(%rax),%xmm13
  450. movaps -0x38(%rax),%xmm14
  451. movaps -0x28(%rax),%xmm15
  452. ___
  453. $code.=<<___;
  454. mov -16(%rax),%rbp
  455. .cfi_restore %rbp
  456. mov -8(%rax),%rbx
  457. .cfi_restore %rbx
  458. lea (%rax),%rsp
  459. .cfi_def_cfa_register %rsp
  460. .Lepilogue:
  461. ret
  462. .cfi_endproc
  463. .size sha1_multi_block,.-sha1_multi_block
  464. ___
  465. {{{
  466. my ($ABCD0,$E0,$E0_,$BSWAP,$ABCD1,$E1,$E1_)=map("%xmm$_",(0..3,8..10));
  467. my @MSG0=map("%xmm$_",(4..7));
  468. my @MSG1=map("%xmm$_",(11..14));
  469. $code.=<<___;
  470. .type sha1_multi_block_shaext,\@function,3
  471. .align 32
  472. sha1_multi_block_shaext:
  473. .cfi_startproc
  474. _shaext_shortcut:
  475. mov %rsp,%rax
  476. .cfi_def_cfa_register %rax
  477. push %rbx
  478. .cfi_push %rbx
  479. push %rbp
  480. .cfi_push %rbp
  481. ___
  482. $code.=<<___ if ($win64);
  483. lea -0xa8(%rsp),%rsp
  484. movaps %xmm6,(%rsp)
  485. movaps %xmm7,0x10(%rsp)
  486. movaps %xmm8,0x20(%rsp)
  487. movaps %xmm9,0x30(%rsp)
  488. movaps %xmm10,-0x78(%rax)
  489. movaps %xmm11,-0x68(%rax)
  490. movaps %xmm12,-0x58(%rax)
  491. movaps %xmm13,-0x48(%rax)
  492. movaps %xmm14,-0x38(%rax)
  493. movaps %xmm15,-0x28(%rax)
  494. ___
  495. $code.=<<___;
  496. sub \$`$REG_SZ*18`,%rsp
  497. shl \$1,$num # we process pair at a time
  498. and \$-256,%rsp
  499. lea 0x40($ctx),$ctx # size optimization
  500. mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
  501. .Lbody_shaext:
  502. lea `$REG_SZ*16`(%rsp),%rbx
  503. movdqa K_XX_XX+0x80(%rip),$BSWAP # byte-n-word swap
  504. .Loop_grande_shaext:
  505. mov $num,`$REG_SZ*17+8`(%rsp) # original $num
  506. xor $num,$num
  507. ___
  508. for($i=0;$i<2;$i++) {
  509. $code.=<<___;
  510. mov `16*$i+0`($inp),@ptr[$i] # input pointer
  511. mov `16*$i+8`($inp),%ecx # number of blocks
  512. cmp $num,%ecx
  513. cmovg %ecx,$num # find maximum
  514. test %ecx,%ecx
  515. mov %ecx,`4*$i`(%rbx) # initialize counters
  516. cmovle %rsp,@ptr[$i] # cancel input
  517. ___
  518. }
  519. $code.=<<___;
  520. test $num,$num
  521. jz .Ldone_shaext
  522. movq 0x00-0x40($ctx),$ABCD0 # a1.a0
  523. movq 0x20-0x40($ctx),@MSG0[0]# b1.b0
  524. movq 0x40-0x40($ctx),@MSG0[1]# c1.c0
  525. movq 0x60-0x40($ctx),@MSG0[2]# d1.d0
  526. movq 0x80-0x40($ctx),@MSG0[3]# e1.e0
  527. punpckldq @MSG0[0],$ABCD0 # b1.a1.b0.a0
  528. punpckldq @MSG0[2],@MSG0[1] # d1.c1.d0.c0
  529. movdqa $ABCD0,$ABCD1
  530. punpcklqdq @MSG0[1],$ABCD0 # d0.c0.b0.a0
  531. punpckhqdq @MSG0[1],$ABCD1 # d1.c1.b1.a1
  532. pshufd \$0b00111111,@MSG0[3],$E0
  533. pshufd \$0b01111111,@MSG0[3],$E1
  534. pshufd \$0b00011011,$ABCD0,$ABCD0
  535. pshufd \$0b00011011,$ABCD1,$ABCD1
  536. jmp .Loop_shaext
  537. .align 32
  538. .Loop_shaext:
  539. movdqu 0x00(@ptr[0]),@MSG0[0]
  540. movdqu 0x00(@ptr[1]),@MSG1[0]
  541. movdqu 0x10(@ptr[0]),@MSG0[1]
  542. movdqu 0x10(@ptr[1]),@MSG1[1]
  543. movdqu 0x20(@ptr[0]),@MSG0[2]
  544. pshufb $BSWAP,@MSG0[0]
  545. movdqu 0x20(@ptr[1]),@MSG1[2]
  546. pshufb $BSWAP,@MSG1[0]
  547. movdqu 0x30(@ptr[0]),@MSG0[3]
  548. lea 0x40(@ptr[0]),@ptr[0]
  549. pshufb $BSWAP,@MSG0[1]
  550. movdqu 0x30(@ptr[1]),@MSG1[3]
  551. lea 0x40(@ptr[1]),@ptr[1]
  552. pshufb $BSWAP,@MSG1[1]
  553. movdqa $E0,0x50(%rsp) # offload
  554. paddd @MSG0[0],$E0
  555. movdqa $E1,0x70(%rsp)
  556. paddd @MSG1[0],$E1
  557. movdqa $ABCD0,0x40(%rsp) # offload
  558. movdqa $ABCD0,$E0_
  559. movdqa $ABCD1,0x60(%rsp)
  560. movdqa $ABCD1,$E1_
  561. sha1rnds4 \$0,$E0,$ABCD0 # 0-3
  562. sha1nexte @MSG0[1],$E0_
  563. sha1rnds4 \$0,$E1,$ABCD1 # 0-3
  564. sha1nexte @MSG1[1],$E1_
  565. pshufb $BSWAP,@MSG0[2]
  566. prefetcht0 127(@ptr[0])
  567. sha1msg1 @MSG0[1],@MSG0[0]
  568. pshufb $BSWAP,@MSG1[2]
  569. prefetcht0 127(@ptr[1])
  570. sha1msg1 @MSG1[1],@MSG1[0]
  571. pshufb $BSWAP,@MSG0[3]
  572. movdqa $ABCD0,$E0
  573. pshufb $BSWAP,@MSG1[3]
  574. movdqa $ABCD1,$E1
  575. sha1rnds4 \$0,$E0_,$ABCD0 # 4-7
  576. sha1nexte @MSG0[2],$E0
  577. sha1rnds4 \$0,$E1_,$ABCD1 # 4-7
  578. sha1nexte @MSG1[2],$E1
  579. pxor @MSG0[2],@MSG0[0]
  580. sha1msg1 @MSG0[2],@MSG0[1]
  581. pxor @MSG1[2],@MSG1[0]
  582. sha1msg1 @MSG1[2],@MSG1[1]
  583. ___
  584. for($i=2;$i<20-4;$i++) {
  585. $code.=<<___;
  586. movdqa $ABCD0,$E0_
  587. movdqa $ABCD1,$E1_
  588. sha1rnds4 \$`int($i/5)`,$E0,$ABCD0 # 8-11
  589. sha1nexte @MSG0[3],$E0_
  590. sha1rnds4 \$`int($i/5)`,$E1,$ABCD1 # 8-11
  591. sha1nexte @MSG1[3],$E1_
  592. sha1msg2 @MSG0[3],@MSG0[0]
  593. sha1msg2 @MSG1[3],@MSG1[0]
  594. pxor @MSG0[3],@MSG0[1]
  595. sha1msg1 @MSG0[3],@MSG0[2]
  596. pxor @MSG1[3],@MSG1[1]
  597. sha1msg1 @MSG1[3],@MSG1[2]
  598. ___
  599. ($E0,$E0_)=($E0_,$E0); ($E1,$E1_)=($E1_,$E1);
  600. push(@MSG0,shift(@MSG0)); push(@MSG1,shift(@MSG1));
  601. }
  602. $code.=<<___;
  603. movdqa $ABCD0,$E0_
  604. movdqa $ABCD1,$E1_
  605. sha1rnds4 \$3,$E0,$ABCD0 # 64-67
  606. sha1nexte @MSG0[3],$E0_
  607. sha1rnds4 \$3,$E1,$ABCD1 # 64-67
  608. sha1nexte @MSG1[3],$E1_
  609. sha1msg2 @MSG0[3],@MSG0[0]
  610. sha1msg2 @MSG1[3],@MSG1[0]
  611. pxor @MSG0[3],@MSG0[1]
  612. pxor @MSG1[3],@MSG1[1]
  613. mov \$1,%ecx
  614. pxor @MSG0[2],@MSG0[2] # zero
  615. cmp 4*0(%rbx),%ecx # examine counters
  616. cmovge %rsp,@ptr[0] # cancel input
  617. movdqa $ABCD0,$E0
  618. movdqa $ABCD1,$E1
  619. sha1rnds4 \$3,$E0_,$ABCD0 # 68-71
  620. sha1nexte @MSG0[0],$E0
  621. sha1rnds4 \$3,$E1_,$ABCD1 # 68-71
  622. sha1nexte @MSG1[0],$E1
  623. sha1msg2 @MSG0[0],@MSG0[1]
  624. sha1msg2 @MSG1[0],@MSG1[1]
  625. cmp 4*1(%rbx),%ecx
  626. cmovge %rsp,@ptr[1]
  627. movq (%rbx),@MSG0[0] # pull counters
  628. movdqa $ABCD0,$E0_
  629. movdqa $ABCD1,$E1_
  630. sha1rnds4 \$3,$E0,$ABCD0 # 72-75
  631. sha1nexte @MSG0[1],$E0_
  632. sha1rnds4 \$3,$E1,$ABCD1 # 72-75
  633. sha1nexte @MSG1[1],$E1_
  634. pshufd \$0x00,@MSG0[0],@MSG1[2]
  635. pshufd \$0x55,@MSG0[0],@MSG1[3]
  636. movdqa @MSG0[0],@MSG0[1]
  637. pcmpgtd @MSG0[2],@MSG1[2]
  638. pcmpgtd @MSG0[2],@MSG1[3]
  639. movdqa $ABCD0,$E0
  640. movdqa $ABCD1,$E1
  641. sha1rnds4 \$3,$E0_,$ABCD0 # 76-79
  642. sha1nexte $MSG0[2],$E0
  643. sha1rnds4 \$3,$E1_,$ABCD1 # 76-79
  644. sha1nexte $MSG0[2],$E1
  645. pcmpgtd @MSG0[2],@MSG0[1] # counter mask
  646. pand @MSG1[2],$ABCD0
  647. pand @MSG1[2],$E0
  648. pand @MSG1[3],$ABCD1
  649. pand @MSG1[3],$E1
  650. paddd @MSG0[1],@MSG0[0] # counters--
  651. paddd 0x40(%rsp),$ABCD0
  652. paddd 0x50(%rsp),$E0
  653. paddd 0x60(%rsp),$ABCD1
  654. paddd 0x70(%rsp),$E1
  655. movq @MSG0[0],(%rbx) # save counters
  656. dec $num
  657. jnz .Loop_shaext
  658. mov `$REG_SZ*17+8`(%rsp),$num
  659. pshufd \$0b00011011,$ABCD0,$ABCD0
  660. pshufd \$0b00011011,$ABCD1,$ABCD1
  661. movdqa $ABCD0,@MSG0[0]
  662. punpckldq $ABCD1,$ABCD0 # b1.b0.a1.a0
  663. punpckhdq $ABCD1,@MSG0[0] # d1.d0.c1.c0
  664. punpckhdq $E1,$E0 # e1.e0.xx.xx
  665. movq $ABCD0,0x00-0x40($ctx) # a1.a0
  666. psrldq \$8,$ABCD0
  667. movq @MSG0[0],0x40-0x40($ctx)# c1.c0
  668. psrldq \$8,@MSG0[0]
  669. movq $ABCD0,0x20-0x40($ctx) # b1.b0
  670. psrldq \$8,$E0
  671. movq @MSG0[0],0x60-0x40($ctx)# d1.d0
  672. movq $E0,0x80-0x40($ctx) # e1.e0
  673. lea `$REG_SZ/2`($ctx),$ctx
  674. lea `16*2`($inp),$inp
  675. dec $num
  676. jnz .Loop_grande_shaext
  677. .Ldone_shaext:
  678. #mov `$REG_SZ*17`(%rsp),%rax # original %rsp
  679. ___
  680. $code.=<<___ if ($win64);
  681. movaps -0xb8(%rax),%xmm6
  682. movaps -0xa8(%rax),%xmm7
  683. movaps -0x98(%rax),%xmm8
  684. movaps -0x88(%rax),%xmm9
  685. movaps -0x78(%rax),%xmm10
  686. movaps -0x68(%rax),%xmm11
  687. movaps -0x58(%rax),%xmm12
  688. movaps -0x48(%rax),%xmm13
  689. movaps -0x38(%rax),%xmm14
  690. movaps -0x28(%rax),%xmm15
  691. ___
  692. $code.=<<___;
  693. mov -16(%rax),%rbp
  694. .cfi_restore %rbp
  695. mov -8(%rax),%rbx
  696. .cfi_restore %rbx
  697. lea (%rax),%rsp
  698. .cfi_def_cfa_register %rsp
  699. .Lepilogue_shaext:
  700. ret
  701. .cfi_endproc
  702. .size sha1_multi_block_shaext,.-sha1_multi_block_shaext
  703. ___
  704. }}}
  705. if ($avx) {{{
  706. sub BODY_00_19_avx {
  707. my ($i,$a,$b,$c,$d,$e)=@_;
  708. my $j=$i+1;
  709. my $k=$i+2;
  710. my $vpack = $REG_SZ==16 ? "vpunpckldq" : "vinserti128";
  711. my $ptr_n = $REG_SZ==16 ? @ptr[1] : @ptr[4];
  712. $code.=<<___ if ($i==0 && $REG_SZ==16);
  713. vmovd (@ptr[0]),@Xi[0]
  714. lea `16*4`(@ptr[0]),@ptr[0]
  715. vmovd (@ptr[1]),@Xi[2] # borrow Xi[2]
  716. lea `16*4`(@ptr[1]),@ptr[1]
  717. vpinsrd \$1,(@ptr[2]),@Xi[0],@Xi[0]
  718. lea `16*4`(@ptr[2]),@ptr[2]
  719. vpinsrd \$1,(@ptr[3]),@Xi[2],@Xi[2]
  720. lea `16*4`(@ptr[3]),@ptr[3]
  721. vmovd `4*$j-16*4`(@ptr[0]),@Xi[1]
  722. vpunpckldq @Xi[2],@Xi[0],@Xi[0]
  723. vmovd `4*$j-16*4`($ptr_n),$t3
  724. vpshufb $tx,@Xi[0],@Xi[0]
  725. ___
  726. $code.=<<___ if ($i<15 && $REG_SZ==16); # just load input
  727. vpinsrd \$1,`4*$j-16*4`(@ptr[2]),@Xi[1],@Xi[1]
  728. vpinsrd \$1,`4*$j-16*4`(@ptr[3]),$t3,$t3
  729. ___
  730. $code.=<<___ if ($i==0 && $REG_SZ==32);
  731. vmovd (@ptr[0]),@Xi[0]
  732. lea `16*4`(@ptr[0]),@ptr[0]
  733. vmovd (@ptr[4]),@Xi[2] # borrow Xi[2]
  734. lea `16*4`(@ptr[4]),@ptr[4]
  735. vmovd (@ptr[1]),$t2
  736. lea `16*4`(@ptr[1]),@ptr[1]
  737. vmovd (@ptr[5]),$t1
  738. lea `16*4`(@ptr[5]),@ptr[5]
  739. vpinsrd \$1,(@ptr[2]),@Xi[0],@Xi[0]
  740. lea `16*4`(@ptr[2]),@ptr[2]
  741. vpinsrd \$1,(@ptr[6]),@Xi[2],@Xi[2]
  742. lea `16*4`(@ptr[6]),@ptr[6]
  743. vpinsrd \$1,(@ptr[3]),$t2,$t2
  744. lea `16*4`(@ptr[3]),@ptr[3]
  745. vpunpckldq $t2,@Xi[0],@Xi[0]
  746. vpinsrd \$1,(@ptr[7]),$t1,$t1
  747. lea `16*4`(@ptr[7]),@ptr[7]
  748. vpunpckldq $t1,@Xi[2],@Xi[2]
  749. vmovd `4*$j-16*4`(@ptr[0]),@Xi[1]
  750. vinserti128 @Xi[2],@Xi[0],@Xi[0]
  751. vmovd `4*$j-16*4`($ptr_n),$t3
  752. vpshufb $tx,@Xi[0],@Xi[0]
  753. ___
  754. $code.=<<___ if ($i<15 && $REG_SZ==32); # just load input
  755. vmovd `4*$j-16*4`(@ptr[1]),$t2
  756. vmovd `4*$j-16*4`(@ptr[5]),$t1
  757. vpinsrd \$1,`4*$j-16*4`(@ptr[2]),@Xi[1],@Xi[1]
  758. vpinsrd \$1,`4*$j-16*4`(@ptr[6]),$t3,$t3
  759. vpinsrd \$1,`4*$j-16*4`(@ptr[3]),$t2,$t2
  760. vpunpckldq $t2,@Xi[1],@Xi[1]
  761. vpinsrd \$1,`4*$j-16*4`(@ptr[7]),$t1,$t1
  762. vpunpckldq $t1,$t3,$t3
  763. ___
  764. $code.=<<___ if ($i<14);
  765. vpaddd $K,$e,$e # e+=K_00_19
  766. vpslld \$5,$a,$t2
  767. vpandn $d,$b,$t1
  768. vpand $c,$b,$t0
  769. vmovdqa @Xi[0],`&Xi_off($i)`
  770. vpaddd @Xi[0],$e,$e # e+=X[i]
  771. $vpack $t3,@Xi[1],@Xi[1]
  772. vpsrld \$27,$a,$t3
  773. vpxor $t1,$t0,$t0 # Ch(b,c,d)
  774. vmovd `4*$k-16*4`(@ptr[0]),@Xi[2]
  775. vpslld \$30,$b,$t1
  776. vpor $t3,$t2,$t2 # rol(a,5)
  777. vmovd `4*$k-16*4`($ptr_n),$t3
  778. vpaddd $t0,$e,$e # e+=Ch(b,c,d)
  779. vpsrld \$2,$b,$b
  780. vpaddd $t2,$e,$e # e+=rol(a,5)
  781. vpshufb $tx,@Xi[1],@Xi[1]
  782. vpor $t1,$b,$b # b=rol(b,30)
  783. ___
  784. $code.=<<___ if ($i==14);
  785. vpaddd $K,$e,$e # e+=K_00_19
  786. prefetcht0 63(@ptr[0])
  787. vpslld \$5,$a,$t2
  788. vpandn $d,$b,$t1
  789. vpand $c,$b,$t0
  790. vmovdqa @Xi[0],`&Xi_off($i)`
  791. vpaddd @Xi[0],$e,$e # e+=X[i]
  792. $vpack $t3,@Xi[1],@Xi[1]
  793. vpsrld \$27,$a,$t3
  794. prefetcht0 63(@ptr[1])
  795. vpxor $t1,$t0,$t0 # Ch(b,c,d)
  796. vpslld \$30,$b,$t1
  797. vpor $t3,$t2,$t2 # rol(a,5)
  798. prefetcht0 63(@ptr[2])
  799. vpaddd $t0,$e,$e # e+=Ch(b,c,d)
  800. vpsrld \$2,$b,$b
  801. vpaddd $t2,$e,$e # e+=rol(a,5)
  802. prefetcht0 63(@ptr[3])
  803. vpshufb $tx,@Xi[1],@Xi[1]
  804. vpor $t1,$b,$b # b=rol(b,30)
  805. ___
  806. $code.=<<___ if ($i>=13 && $i<15);
  807. vmovdqa `&Xi_off($j+2)`,@Xi[3] # preload "X[2]"
  808. ___
  809. $code.=<<___ if ($i>=15); # apply Xupdate
  810. vpxor @Xi[-2],@Xi[1],@Xi[1] # "X[13]"
  811. vmovdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
  812. vpaddd $K,$e,$e # e+=K_00_19
  813. vpslld \$5,$a,$t2
  814. vpandn $d,$b,$t1
  815. `"prefetcht0 63(@ptr[4])" if ($i==15 && $REG_SZ==32)`
  816. vpand $c,$b,$t0
  817. vmovdqa @Xi[0],`&Xi_off($i)`
  818. vpaddd @Xi[0],$e,$e # e+=X[i]
  819. vpxor `&Xi_off($j+8)`,@Xi[1],@Xi[1]
  820. vpsrld \$27,$a,$t3
  821. vpxor $t1,$t0,$t0 # Ch(b,c,d)
  822. vpxor @Xi[3],@Xi[1],@Xi[1]
  823. `"prefetcht0 63(@ptr[5])" if ($i==15 && $REG_SZ==32)`
  824. vpslld \$30,$b,$t1
  825. vpor $t3,$t2,$t2 # rol(a,5)
  826. vpaddd $t0,$e,$e # e+=Ch(b,c,d)
  827. `"prefetcht0 63(@ptr[6])" if ($i==15 && $REG_SZ==32)`
  828. vpsrld \$31,@Xi[1],$tx
  829. vpaddd @Xi[1],@Xi[1],@Xi[1]
  830. vpsrld \$2,$b,$b
  831. `"prefetcht0 63(@ptr[7])" if ($i==15 && $REG_SZ==32)`
  832. vpaddd $t2,$e,$e # e+=rol(a,5)
  833. vpor $tx,@Xi[1],@Xi[1] # rol \$1,@Xi[1]
  834. vpor $t1,$b,$b # b=rol(b,30)
  835. ___
  836. push(@Xi,shift(@Xi));
  837. }
  838. sub BODY_20_39_avx {
  839. my ($i,$a,$b,$c,$d,$e)=@_;
  840. my $j=$i+1;
  841. $code.=<<___ if ($i<79);
  842. vpxor @Xi[-2],@Xi[1],@Xi[1] # "X[13]"
  843. vmovdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
  844. vpslld \$5,$a,$t2
  845. vpaddd $K,$e,$e # e+=K_20_39
  846. vpxor $b,$d,$t0
  847. ___
  848. $code.=<<___ if ($i<72);
  849. vmovdqa @Xi[0],`&Xi_off($i)`
  850. ___
  851. $code.=<<___ if ($i<79);
  852. vpaddd @Xi[0],$e,$e # e+=X[i]
  853. vpxor `&Xi_off($j+8)`,@Xi[1],@Xi[1]
  854. vpsrld \$27,$a,$t3
  855. vpxor $c,$t0,$t0 # Parity(b,c,d)
  856. vpxor @Xi[3],@Xi[1],@Xi[1]
  857. vpslld \$30,$b,$t1
  858. vpor $t3,$t2,$t2 # rol(a,5)
  859. vpaddd $t0,$e,$e # e+=Parity(b,c,d)
  860. vpsrld \$31,@Xi[1],$tx
  861. vpaddd @Xi[1],@Xi[1],@Xi[1]
  862. vpsrld \$2,$b,$b
  863. vpaddd $t2,$e,$e # e+=rol(a,5)
  864. vpor $tx,@Xi[1],@Xi[1] # rol(@Xi[1],1)
  865. vpor $t1,$b,$b # b=rol(b,30)
  866. ___
  867. $code.=<<___ if ($i==79);
  868. vpslld \$5,$a,$t2
  869. vpaddd $K,$e,$e # e+=K_20_39
  870. vpxor $b,$d,$t0
  871. vpsrld \$27,$a,$t3
  872. vpaddd @Xi[0],$e,$e # e+=X[i]
  873. vpxor $c,$t0,$t0 # Parity(b,c,d)
  874. vpslld \$30,$b,$t1
  875. vpor $t3,$t2,$t2 # rol(a,5)
  876. vpaddd $t0,$e,$e # e+=Parity(b,c,d)
  877. vpsrld \$2,$b,$b
  878. vpaddd $t2,$e,$e # e+=rol(a,5)
  879. vpor $t1,$b,$b # b=rol(b,30)
  880. ___
  881. push(@Xi,shift(@Xi));
  882. }
  883. sub BODY_40_59_avx {
  884. my ($i,$a,$b,$c,$d,$e)=@_;
  885. my $j=$i+1;
  886. $code.=<<___;
  887. vpxor @Xi[-2],@Xi[1],@Xi[1] # "X[13]"
  888. vmovdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
  889. vpaddd $K,$e,$e # e+=K_40_59
  890. vpslld \$5,$a,$t2
  891. vpand $c,$d,$t1
  892. vpxor `&Xi_off($j+8)`,@Xi[1],@Xi[1]
  893. vpaddd $t1,$e,$e
  894. vpsrld \$27,$a,$t3
  895. vpxor $c,$d,$t0
  896. vpxor @Xi[3],@Xi[1],@Xi[1]
  897. vmovdqu @Xi[0],`&Xi_off($i)`
  898. vpaddd @Xi[0],$e,$e # e+=X[i]
  899. vpor $t3,$t2,$t2 # rol(a,5)
  900. vpsrld \$31,@Xi[1],$tx
  901. vpand $b,$t0,$t0
  902. vpaddd @Xi[1],@Xi[1],@Xi[1]
  903. vpslld \$30,$b,$t1
  904. vpaddd $t0,$e,$e # e+=Maj(b,d,c)
  905. vpsrld \$2,$b,$b
  906. vpaddd $t2,$e,$e # e+=rol(a,5)
  907. vpor $tx,@Xi[1],@Xi[1] # rol(@X[1],1)
  908. vpor $t1,$b,$b # b=rol(b,30)
  909. ___
  910. push(@Xi,shift(@Xi));
  911. }
  912. $code.=<<___;
  913. .type sha1_multi_block_avx,\@function,3
  914. .align 32
  915. sha1_multi_block_avx:
  916. .cfi_startproc
  917. _avx_shortcut:
  918. ___
  919. $code.=<<___ if ($avx>1);
  920. shr \$32,%rcx
  921. cmp \$2,$num
  922. jb .Lavx
  923. test \$`1<<5`,%ecx
  924. jnz _avx2_shortcut
  925. jmp .Lavx
  926. .align 32
  927. .Lavx:
  928. ___
  929. $code.=<<___;
  930. mov %rsp,%rax
  931. .cfi_def_cfa_register %rax
  932. push %rbx
  933. .cfi_push %rbx
  934. push %rbp
  935. .cfi_push %rbp
  936. ___
  937. $code.=<<___ if ($win64);
  938. lea -0xa8(%rsp),%rsp
  939. movaps %xmm6,(%rsp)
  940. movaps %xmm7,0x10(%rsp)
  941. movaps %xmm8,0x20(%rsp)
  942. movaps %xmm9,0x30(%rsp)
  943. movaps %xmm10,-0x78(%rax)
  944. movaps %xmm11,-0x68(%rax)
  945. movaps %xmm12,-0x58(%rax)
  946. movaps %xmm13,-0x48(%rax)
  947. movaps %xmm14,-0x38(%rax)
  948. movaps %xmm15,-0x28(%rax)
  949. ___
  950. $code.=<<___;
  951. sub \$`$REG_SZ*18`, %rsp
  952. and \$-256,%rsp
  953. mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
  954. .cfi_cfa_expression %rsp+`$REG_SZ*17`,deref,+8
  955. .Lbody_avx:
  956. lea K_XX_XX(%rip),$Tbl
  957. lea `$REG_SZ*16`(%rsp),%rbx
  958. vzeroupper
  959. .Loop_grande_avx:
  960. mov $num,`$REG_SZ*17+8`(%rsp) # original $num
  961. xor $num,$num
  962. ___
  963. for($i=0;$i<4;$i++) {
  964. $code.=<<___;
  965. mov `16*$i+0`($inp),@ptr[$i] # input pointer
  966. mov `16*$i+8`($inp),%ecx # number of blocks
  967. cmp $num,%ecx
  968. cmovg %ecx,$num # find maximum
  969. test %ecx,%ecx
  970. mov %ecx,`4*$i`(%rbx) # initialize counters
  971. cmovle $Tbl,@ptr[$i] # cancel input
  972. ___
  973. }
  974. $code.=<<___;
  975. test $num,$num
  976. jz .Ldone_avx
  977. vmovdqu 0x00($ctx),$A # load context
  978. lea 128(%rsp),%rax
  979. vmovdqu 0x20($ctx),$B
  980. vmovdqu 0x40($ctx),$C
  981. vmovdqu 0x60($ctx),$D
  982. vmovdqu 0x80($ctx),$E
  983. vmovdqu 0x60($Tbl),$tx # pbswap_mask
  984. jmp .Loop_avx
  985. .align 32
  986. .Loop_avx:
  987. ___
  988. $code.=" vmovdqa -0x20($Tbl),$K\n"; # K_00_19
  989. for($i=0;$i<20;$i++) { &BODY_00_19_avx($i,@V); unshift(@V,pop(@V)); }
  990. $code.=" vmovdqa 0x00($Tbl),$K\n"; # K_20_39
  991. for(;$i<40;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
  992. $code.=" vmovdqa 0x20($Tbl),$K\n"; # K_40_59
  993. for(;$i<60;$i++) { &BODY_40_59_avx($i,@V); unshift(@V,pop(@V)); }
  994. $code.=" vmovdqa 0x40($Tbl),$K\n"; # K_60_79
  995. for(;$i<80;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
  996. $code.=<<___;
  997. mov \$1,%ecx
  998. ___
  999. for($i=0;$i<4;$i++) {
  1000. $code.=<<___;
  1001. cmp `4*$i`(%rbx),%ecx # examine counters
  1002. cmovge $Tbl,@ptr[$i] # cancel input
  1003. ___
  1004. }
  1005. $code.=<<___;
  1006. vmovdqu (%rbx),$t0 # pull counters
  1007. vpxor $t2,$t2,$t2
  1008. vmovdqa $t0,$t1
  1009. vpcmpgtd $t2,$t1,$t1 # mask value
  1010. vpaddd $t1,$t0,$t0 # counters--
  1011. vpand $t1,$A,$A
  1012. vpand $t1,$B,$B
  1013. vpaddd 0x00($ctx),$A,$A
  1014. vpand $t1,$C,$C
  1015. vpaddd 0x20($ctx),$B,$B
  1016. vpand $t1,$D,$D
  1017. vpaddd 0x40($ctx),$C,$C
  1018. vpand $t1,$E,$E
  1019. vpaddd 0x60($ctx),$D,$D
  1020. vpaddd 0x80($ctx),$E,$E
  1021. vmovdqu $A,0x00($ctx)
  1022. vmovdqu $B,0x20($ctx)
  1023. vmovdqu $C,0x40($ctx)
  1024. vmovdqu $D,0x60($ctx)
  1025. vmovdqu $E,0x80($ctx)
  1026. vmovdqu $t0,(%rbx) # save counters
  1027. vmovdqu 0x60($Tbl),$tx # pbswap_mask
  1028. dec $num
  1029. jnz .Loop_avx
  1030. mov `$REG_SZ*17+8`(%rsp),$num
  1031. lea $REG_SZ($ctx),$ctx
  1032. lea `16*$REG_SZ/4`($inp),$inp
  1033. dec $num
  1034. jnz .Loop_grande_avx
  1035. .Ldone_avx:
  1036. mov `$REG_SZ*17`(%rsp),%rax # original %rsp
  1037. .cfi_def_cfa %rax,8
  1038. vzeroupper
  1039. ___
  1040. $code.=<<___ if ($win64);
  1041. movaps -0xb8(%rax),%xmm6
  1042. movaps -0xa8(%rax),%xmm7
  1043. movaps -0x98(%rax),%xmm8
  1044. movaps -0x88(%rax),%xmm9
  1045. movaps -0x78(%rax),%xmm10
  1046. movaps -0x68(%rax),%xmm11
  1047. movaps -0x58(%rax),%xmm12
  1048. movaps -0x48(%rax),%xmm13
  1049. movaps -0x38(%rax),%xmm14
  1050. movaps -0x28(%rax),%xmm15
  1051. ___
  1052. $code.=<<___;
  1053. mov -16(%rax),%rbp
  1054. .cfi_restore %rbp
  1055. mov -8(%rax),%rbx
  1056. .cfi_restore %rbx
  1057. lea (%rax),%rsp
  1058. .cfi_def_cfa_register %rsp
  1059. .Lepilogue_avx:
  1060. ret
  1061. .cfi_endproc
  1062. .size sha1_multi_block_avx,.-sha1_multi_block_avx
  1063. ___
  1064. if ($avx>1) {
  1065. $code =~ s/\`([^\`]*)\`/eval $1/gem;
  1066. $REG_SZ=32;
  1067. @ptr=map("%r$_",(12..15,8..11));
  1068. @V=($A,$B,$C,$D,$E)=map("%ymm$_",(0..4));
  1069. ($t0,$t1,$t2,$t3,$tx)=map("%ymm$_",(5..9));
  1070. @Xi=map("%ymm$_",(10..14));
  1071. $K="%ymm15";
  1072. $code.=<<___;
  1073. .type sha1_multi_block_avx2,\@function,3
  1074. .align 32
  1075. sha1_multi_block_avx2:
  1076. .cfi_startproc
  1077. _avx2_shortcut:
  1078. mov %rsp,%rax
  1079. .cfi_def_cfa_register %rax
  1080. push %rbx
  1081. .cfi_push %rbx
  1082. push %rbp
  1083. .cfi_push %rbp
  1084. push %r12
  1085. .cfi_push %r12
  1086. push %r13
  1087. .cfi_push %r13
  1088. push %r14
  1089. .cfi_push %r14
  1090. push %r15
  1091. .cfi_push %r15
  1092. ___
  1093. $code.=<<___ if ($win64);
  1094. lea -0xa8(%rsp),%rsp
  1095. movaps %xmm6,(%rsp)
  1096. movaps %xmm7,0x10(%rsp)
  1097. movaps %xmm8,0x20(%rsp)
  1098. movaps %xmm9,0x30(%rsp)
  1099. movaps %xmm10,0x40(%rsp)
  1100. movaps %xmm11,0x50(%rsp)
  1101. movaps %xmm12,-0x78(%rax)
  1102. movaps %xmm13,-0x68(%rax)
  1103. movaps %xmm14,-0x58(%rax)
  1104. movaps %xmm15,-0x48(%rax)
  1105. ___
  1106. $code.=<<___;
  1107. sub \$`$REG_SZ*18`, %rsp
  1108. and \$-256,%rsp
  1109. mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
  1110. .cfi_cfa_expression %rsp+`$REG_SZ*17`,deref,+8
  1111. .Lbody_avx2:
  1112. lea K_XX_XX(%rip),$Tbl
  1113. shr \$1,$num
  1114. vzeroupper
  1115. .Loop_grande_avx2:
  1116. mov $num,`$REG_SZ*17+8`(%rsp) # original $num
  1117. xor $num,$num
  1118. lea `$REG_SZ*16`(%rsp),%rbx
  1119. ___
  1120. for($i=0;$i<8;$i++) {
  1121. $code.=<<___;
  1122. mov `16*$i+0`($inp),@ptr[$i] # input pointer
  1123. mov `16*$i+8`($inp),%ecx # number of blocks
  1124. cmp $num,%ecx
  1125. cmovg %ecx,$num # find maximum
  1126. test %ecx,%ecx
  1127. mov %ecx,`4*$i`(%rbx) # initialize counters
  1128. cmovle $Tbl,@ptr[$i] # cancel input
  1129. ___
  1130. }
  1131. $code.=<<___;
  1132. vmovdqu 0x00($ctx),$A # load context
  1133. lea 128(%rsp),%rax
  1134. vmovdqu 0x20($ctx),$B
  1135. lea 256+128(%rsp),%rbx
  1136. vmovdqu 0x40($ctx),$C
  1137. vmovdqu 0x60($ctx),$D
  1138. vmovdqu 0x80($ctx),$E
  1139. vmovdqu 0x60($Tbl),$tx # pbswap_mask
  1140. jmp .Loop_avx2
  1141. .align 32
  1142. .Loop_avx2:
  1143. ___
  1144. $code.=" vmovdqa -0x20($Tbl),$K\n"; # K_00_19
  1145. for($i=0;$i<20;$i++) { &BODY_00_19_avx($i,@V); unshift(@V,pop(@V)); }
  1146. $code.=" vmovdqa 0x00($Tbl),$K\n"; # K_20_39
  1147. for(;$i<40;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
  1148. $code.=" vmovdqa 0x20($Tbl),$K\n"; # K_40_59
  1149. for(;$i<60;$i++) { &BODY_40_59_avx($i,@V); unshift(@V,pop(@V)); }
  1150. $code.=" vmovdqa 0x40($Tbl),$K\n"; # K_60_79
  1151. for(;$i<80;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
  1152. $code.=<<___;
  1153. mov \$1,%ecx
  1154. lea `$REG_SZ*16`(%rsp),%rbx
  1155. ___
  1156. for($i=0;$i<8;$i++) {
  1157. $code.=<<___;
  1158. cmp `4*$i`(%rbx),%ecx # examine counters
  1159. cmovge $Tbl,@ptr[$i] # cancel input
  1160. ___
  1161. }
  1162. $code.=<<___;
  1163. vmovdqu (%rbx),$t0 # pull counters
  1164. vpxor $t2,$t2,$t2
  1165. vmovdqa $t0,$t1
  1166. vpcmpgtd $t2,$t1,$t1 # mask value
  1167. vpaddd $t1,$t0,$t0 # counters--
  1168. vpand $t1,$A,$A
  1169. vpand $t1,$B,$B
  1170. vpaddd 0x00($ctx),$A,$A
  1171. vpand $t1,$C,$C
  1172. vpaddd 0x20($ctx),$B,$B
  1173. vpand $t1,$D,$D
  1174. vpaddd 0x40($ctx),$C,$C
  1175. vpand $t1,$E,$E
  1176. vpaddd 0x60($ctx),$D,$D
  1177. vpaddd 0x80($ctx),$E,$E
  1178. vmovdqu $A,0x00($ctx)
  1179. vmovdqu $B,0x20($ctx)
  1180. vmovdqu $C,0x40($ctx)
  1181. vmovdqu $D,0x60($ctx)
  1182. vmovdqu $E,0x80($ctx)
  1183. vmovdqu $t0,(%rbx) # save counters
  1184. lea 256+128(%rsp),%rbx
  1185. vmovdqu 0x60($Tbl),$tx # pbswap_mask
  1186. dec $num
  1187. jnz .Loop_avx2
  1188. #mov `$REG_SZ*17+8`(%rsp),$num
  1189. #lea $REG_SZ($ctx),$ctx
  1190. #lea `16*$REG_SZ/4`($inp),$inp
  1191. #dec $num
  1192. #jnz .Loop_grande_avx2
  1193. .Ldone_avx2:
  1194. mov `$REG_SZ*17`(%rsp),%rax # original %rsp
  1195. .cfi_def_cfa %rax,8
  1196. vzeroupper
  1197. ___
  1198. $code.=<<___ if ($win64);
  1199. movaps -0xd8(%rax),%xmm6
  1200. movaps -0xc8(%rax),%xmm7
  1201. movaps -0xb8(%rax),%xmm8
  1202. movaps -0xa8(%rax),%xmm9
  1203. movaps -0x98(%rax),%xmm10
  1204. movaps -0x88(%rax),%xmm11
  1205. movaps -0x78(%rax),%xmm12
  1206. movaps -0x68(%rax),%xmm13
  1207. movaps -0x58(%rax),%xmm14
  1208. movaps -0x48(%rax),%xmm15
  1209. ___
  1210. $code.=<<___;
  1211. mov -48(%rax),%r15
  1212. .cfi_restore %r15
  1213. mov -40(%rax),%r14
  1214. .cfi_restore %r14
  1215. mov -32(%rax),%r13
  1216. .cfi_restore %r13
  1217. mov -24(%rax),%r12
  1218. .cfi_restore %r12
  1219. mov -16(%rax),%rbp
  1220. .cfi_restore %rbp
  1221. mov -8(%rax),%rbx
  1222. .cfi_restore %rbx
  1223. lea (%rax),%rsp
  1224. .cfi_def_cfa_register %rsp
  1225. .Lepilogue_avx2:
  1226. ret
  1227. .cfi_endproc
  1228. .size sha1_multi_block_avx2,.-sha1_multi_block_avx2
  1229. ___
  1230. } }}}
  1231. $code.=<<___;
  1232. .align 256
  1233. .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
  1234. .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
  1235. K_XX_XX:
  1236. .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
  1237. .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
  1238. .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
  1239. .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
  1240. .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
  1241. .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
  1242. .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap
  1243. .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap
  1244. .byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0
  1245. .asciz "SHA1 multi-block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  1246. ___
  1247. if ($win64) {
  1248. # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
  1249. # CONTEXT *context,DISPATCHER_CONTEXT *disp)
  1250. $rec="%rcx";
  1251. $frame="%rdx";
  1252. $context="%r8";
  1253. $disp="%r9";
  1254. $code.=<<___;
  1255. .extern __imp_RtlVirtualUnwind
  1256. .type se_handler,\@abi-omnipotent
  1257. .align 16
  1258. se_handler:
  1259. push %rsi
  1260. push %rdi
  1261. push %rbx
  1262. push %rbp
  1263. push %r12
  1264. push %r13
  1265. push %r14
  1266. push %r15
  1267. pushfq
  1268. sub \$64,%rsp
  1269. mov 120($context),%rax # pull context->Rax
  1270. mov 248($context),%rbx # pull context->Rip
  1271. mov 8($disp),%rsi # disp->ImageBase
  1272. mov 56($disp),%r11 # disp->HandlerData
  1273. mov 0(%r11),%r10d # HandlerData[0]
  1274. lea (%rsi,%r10),%r10 # end of prologue label
  1275. cmp %r10,%rbx # context->Rip<.Lbody
  1276. jb .Lin_prologue
  1277. mov 152($context),%rax # pull context->Rsp
  1278. mov 4(%r11),%r10d # HandlerData[1]
  1279. lea (%rsi,%r10),%r10 # epilogue label
  1280. cmp %r10,%rbx # context->Rip>=.Lepilogue
  1281. jae .Lin_prologue
  1282. mov `16*17`(%rax),%rax # pull saved stack pointer
  1283. mov -8(%rax),%rbx
  1284. mov -16(%rax),%rbp
  1285. mov %rbx,144($context) # restore context->Rbx
  1286. mov %rbp,160($context) # restore context->Rbp
  1287. lea -24-10*16(%rax),%rsi
  1288. lea 512($context),%rdi # &context.Xmm6
  1289. mov \$20,%ecx
  1290. .long 0xa548f3fc # cld; rep movsq
  1291. .Lin_prologue:
  1292. mov 8(%rax),%rdi
  1293. mov 16(%rax),%rsi
  1294. mov %rax,152($context) # restore context->Rsp
  1295. mov %rsi,168($context) # restore context->Rsi
  1296. mov %rdi,176($context) # restore context->Rdi
  1297. mov 40($disp),%rdi # disp->ContextRecord
  1298. mov $context,%rsi # context
  1299. mov \$154,%ecx # sizeof(CONTEXT)
  1300. .long 0xa548f3fc # cld; rep movsq
  1301. mov $disp,%rsi
  1302. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  1303. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  1304. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  1305. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  1306. mov 40(%rsi),%r10 # disp->ContextRecord
  1307. lea 56(%rsi),%r11 # &disp->HandlerData
  1308. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  1309. mov %r10,32(%rsp) # arg5
  1310. mov %r11,40(%rsp) # arg6
  1311. mov %r12,48(%rsp) # arg7
  1312. mov %rcx,56(%rsp) # arg8, (NULL)
  1313. call *__imp_RtlVirtualUnwind(%rip)
  1314. mov \$1,%eax # ExceptionContinueSearch
  1315. add \$64,%rsp
  1316. popfq
  1317. pop %r15
  1318. pop %r14
  1319. pop %r13
  1320. pop %r12
  1321. pop %rbp
  1322. pop %rbx
  1323. pop %rdi
  1324. pop %rsi
  1325. ret
  1326. .size se_handler,.-se_handler
  1327. ___
  1328. $code.=<<___ if ($avx>1);
  1329. .type avx2_handler,\@abi-omnipotent
  1330. .align 16
  1331. avx2_handler:
  1332. push %rsi
  1333. push %rdi
  1334. push %rbx
  1335. push %rbp
  1336. push %r12
  1337. push %r13
  1338. push %r14
  1339. push %r15
  1340. pushfq
  1341. sub \$64,%rsp
  1342. mov 120($context),%rax # pull context->Rax
  1343. mov 248($context),%rbx # pull context->Rip
  1344. mov 8($disp),%rsi # disp->ImageBase
  1345. mov 56($disp),%r11 # disp->HandlerData
  1346. mov 0(%r11),%r10d # HandlerData[0]
  1347. lea (%rsi,%r10),%r10 # end of prologue label
  1348. cmp %r10,%rbx # context->Rip<body label
  1349. jb .Lin_prologue
  1350. mov 152($context),%rax # pull context->Rsp
  1351. mov 4(%r11),%r10d # HandlerData[1]
  1352. lea (%rsi,%r10),%r10 # epilogue label
  1353. cmp %r10,%rbx # context->Rip>=epilogue label
  1354. jae .Lin_prologue
  1355. mov `32*17`($context),%rax # pull saved stack pointer
  1356. mov -8(%rax),%rbx
  1357. mov -16(%rax),%rbp
  1358. mov -24(%rax),%r12
  1359. mov -32(%rax),%r13
  1360. mov -40(%rax),%r14
  1361. mov -48(%rax),%r15
  1362. mov %rbx,144($context) # restore context->Rbx
  1363. mov %rbp,160($context) # restore context->Rbp
  1364. mov %r12,216($context) # restore context->R12
  1365. mov %r13,224($context) # restore context->R13
  1366. mov %r14,232($context) # restore context->R14
  1367. mov %r15,240($context) # restore context->R15
  1368. lea -56-10*16(%rax),%rsi
  1369. lea 512($context),%rdi # &context.Xmm6
  1370. mov \$20,%ecx
  1371. .long 0xa548f3fc # cld; rep movsq
  1372. jmp .Lin_prologue
  1373. .size avx2_handler,.-avx2_handler
  1374. ___
  1375. $code.=<<___;
  1376. .section .pdata
  1377. .align 4
  1378. .rva .LSEH_begin_sha1_multi_block
  1379. .rva .LSEH_end_sha1_multi_block
  1380. .rva .LSEH_info_sha1_multi_block
  1381. .rva .LSEH_begin_sha1_multi_block_shaext
  1382. .rva .LSEH_end_sha1_multi_block_shaext
  1383. .rva .LSEH_info_sha1_multi_block_shaext
  1384. ___
  1385. $code.=<<___ if ($avx);
  1386. .rva .LSEH_begin_sha1_multi_block_avx
  1387. .rva .LSEH_end_sha1_multi_block_avx
  1388. .rva .LSEH_info_sha1_multi_block_avx
  1389. ___
  1390. $code.=<<___ if ($avx>1);
  1391. .rva .LSEH_begin_sha1_multi_block_avx2
  1392. .rva .LSEH_end_sha1_multi_block_avx2
  1393. .rva .LSEH_info_sha1_multi_block_avx2
  1394. ___
  1395. $code.=<<___;
  1396. .section .xdata
  1397. .align 8
  1398. .LSEH_info_sha1_multi_block:
  1399. .byte 9,0,0,0
  1400. .rva se_handler
  1401. .rva .Lbody,.Lepilogue # HandlerData[]
  1402. .LSEH_info_sha1_multi_block_shaext:
  1403. .byte 9,0,0,0
  1404. .rva se_handler
  1405. .rva .Lbody_shaext,.Lepilogue_shaext # HandlerData[]
  1406. ___
  1407. $code.=<<___ if ($avx);
  1408. .LSEH_info_sha1_multi_block_avx:
  1409. .byte 9,0,0,0
  1410. .rva se_handler
  1411. .rva .Lbody_avx,.Lepilogue_avx # HandlerData[]
  1412. ___
  1413. $code.=<<___ if ($avx>1);
  1414. .LSEH_info_sha1_multi_block_avx2:
  1415. .byte 9,0,0,0
  1416. .rva avx2_handler
  1417. .rva .Lbody_avx2,.Lepilogue_avx2 # HandlerData[]
  1418. ___
  1419. }
  1420. ####################################################################
  1421. sub rex {
  1422. local *opcode=shift;
  1423. my ($dst,$src)=@_;
  1424. my $rex=0;
  1425. $rex|=0x04 if ($dst>=8);
  1426. $rex|=0x01 if ($src>=8);
  1427. unshift @opcode,$rex|0x40 if ($rex);
  1428. }
  1429. sub sha1rnds4 {
  1430. if (@_[0] =~ /\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
  1431. my @opcode=(0x0f,0x3a,0xcc);
  1432. rex(\@opcode,$3,$2);
  1433. push @opcode,0xc0|($2&7)|(($3&7)<<3); # ModR/M
  1434. my $c=$1;
  1435. push @opcode,$c=~/^0/?oct($c):$c;
  1436. return ".byte\t".join(',',@opcode);
  1437. } else {
  1438. return "sha1rnds4\t".@_[0];
  1439. }
  1440. }
  1441. sub sha1op38 {
  1442. my $instr = shift;
  1443. my %opcodelet = (
  1444. "sha1nexte" => 0xc8,
  1445. "sha1msg1" => 0xc9,
  1446. "sha1msg2" => 0xca );
  1447. if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-9]+),\s*%xmm([0-9]+)/) {
  1448. my @opcode=(0x0f,0x38);
  1449. rex(\@opcode,$2,$1);
  1450. push @opcode,$opcodelet{$instr};
  1451. push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M
  1452. return ".byte\t".join(',',@opcode);
  1453. } else {
  1454. return $instr."\t".@_[0];
  1455. }
  1456. }
  1457. foreach (split("\n",$code)) {
  1458. s/\`([^\`]*)\`/eval($1)/ge;
  1459. s/\b(sha1rnds4)\s+(.*)/sha1rnds4($2)/geo or
  1460. s/\b(sha1[^\s]*)\s+(.*)/sha1op38($1,$2)/geo or
  1461. s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
  1462. s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go or
  1463. s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+),%ymm([0-9]+)/$1$2%xmm$3,%xmm$4/go or
  1464. s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
  1465. s/\b(vinserti128)\b(\s+)%ymm/$1$2\$1,%xmm/go or
  1466. s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
  1467. print $_,"\n";
  1468. }
  1469. close STDOUT;