sha256-586.pl 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296
  1. #! /usr/bin/env perl
  2. # Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # SHA256 block transform for x86. September 2007.
  17. #
  18. # Performance improvement over compiler generated code varies from
  19. # 10% to 40% [see below]. Not very impressive on some µ-archs, but
  20. # it's 5 times smaller and optimizes amount of writes.
  21. #
  22. # May 2012.
  23. #
  24. # Optimization including two of Pavel Semjanov's ideas, alternative
  25. # Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
  26. # ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
  27. # 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
  28. # on P4, where it kills performance, nor Sandy Bridge, where folded
  29. # loop is approximately as fast...
  30. #
  31. # June 2012.
  32. #
  33. # Add AMD XOP-specific code path, >30% improvement on Bulldozer over
  34. # May version, >60% over original. Add AVX+shrd code path, >25%
  35. # improvement on Sandy Bridge over May version, 60% over original.
  36. #
  37. # May 2013.
  38. #
  39. # Replace AMD XOP code path with SSSE3 to cover more processors.
  40. # (Biggest improvement coefficient is on upcoming Atom Silvermont,
  41. # not shown.) Add AVX+BMI code path.
  42. #
  43. # March 2014.
  44. #
  45. # Add support for Intel SHA Extensions.
  46. #
  47. # Performance in clock cycles per processed byte (less is better):
  48. #
  49. # gcc icc x86 asm(*) SIMD x86_64 asm(**)
  50. # Pentium 46 57 40/38 - -
  51. # PIII 36 33 27/24 - -
  52. # P4 41 38 28 - 17.3
  53. # AMD K8 27 25 19/15.5 - 14.9
  54. # Core2 26 23 18/15.6 14.3 13.8
  55. # Westmere 27 - 19/15.7 13.4 12.3
  56. # Sandy Bridge 25 - 15.9 12.4 11.6
  57. # Ivy Bridge 24 - 15.0 11.4 10.3
  58. # Haswell 22 - 13.9 9.46 7.80
  59. # Skylake 20 - 14.9 9.50 7.70
  60. # Bulldozer 36 - 27/22 17.0 13.6
  61. # VIA Nano 36 - 25/22 16.8 16.5
  62. # Atom 50 - 30/25 21.9 18.9
  63. # Silvermont 40 - 34/31 22.9 20.6
  64. # Goldmont 29 - 20 16.3(***)
  65. #
  66. # (*) numbers after slash are for unrolled loop, where applicable;
  67. # (**) x86_64 assembly performance is presented for reference
  68. # purposes, results are best-available;
  69. # (***) SHAEXT result is 4.1, strangely enough better than 64-bit one;
  70. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  71. push(@INC,"${dir}","${dir}../../perlasm");
  72. require "x86asm.pl";
  73. $output=pop;
  74. open STDOUT,">$output";
  75. &asm_init($ARGV[0],$ARGV[$#ARGV] eq "386");
  76. $xmm=$avx=0;
  77. for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
  78. if ($xmm && `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  79. =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
  80. $avx = ($1>=2.19) + ($1>=2.22);
  81. }
  82. if ($xmm && !$avx && $ARGV[0] eq "win32n" &&
  83. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
  84. $avx = ($1>=2.03) + ($1>=2.10);
  85. }
  86. if ($xmm && !$avx && $ARGV[0] eq "win32" &&
  87. `ml 2>&1` =~ /Version ([0-9]+)\./) {
  88. $avx = ($1>=10) + ($1>=11);
  89. }
  90. if ($xmm && !$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
  91. $avx = ($2>=3.0) + ($2>3.0);
  92. }
  93. $shaext=$xmm; ### set to zero if compiling for 1.0.1
  94. $unroll_after = 64*4; # If pre-evicted from L1P cache first spin of
  95. # fully unrolled loop was measured to run about
  96. # 3-4x slower. If slowdown coefficient is N and
  97. # unrolled loop is m times faster, then you break
  98. # even at (N-1)/(m-1) blocks. Then it needs to be
  99. # adjusted for probability of code being evicted,
  100. # code size/cache size=1/4. Typical m is 1.15...
  101. $A="eax";
  102. $E="edx";
  103. $T="ebx";
  104. $Aoff=&DWP(4,"esp");
  105. $Boff=&DWP(8,"esp");
  106. $Coff=&DWP(12,"esp");
  107. $Doff=&DWP(16,"esp");
  108. $Eoff=&DWP(20,"esp");
  109. $Foff=&DWP(24,"esp");
  110. $Goff=&DWP(28,"esp");
  111. $Hoff=&DWP(32,"esp");
  112. $Xoff=&DWP(36,"esp");
  113. $K256="ebp";
  114. sub BODY_16_63() {
  115. &mov ($T,"ecx"); # "ecx" is preloaded
  116. &mov ("esi",&DWP(4*(9+15+16-14),"esp"));
  117. &ror ("ecx",18-7);
  118. &mov ("edi","esi");
  119. &ror ("esi",19-17);
  120. &xor ("ecx",$T);
  121. &shr ($T,3);
  122. &ror ("ecx",7);
  123. &xor ("esi","edi");
  124. &xor ($T,"ecx"); # T = sigma0(X[-15])
  125. &ror ("esi",17);
  126. &add ($T,&DWP(4*(9+15+16),"esp")); # T += X[-16]
  127. &shr ("edi",10);
  128. &add ($T,&DWP(4*(9+15+16-9),"esp")); # T += X[-7]
  129. #&xor ("edi","esi") # sigma1(X[-2])
  130. # &add ($T,"edi"); # T += sigma1(X[-2])
  131. # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
  132. &BODY_00_15(1);
  133. }
  134. sub BODY_00_15() {
  135. my $in_16_63=shift;
  136. &mov ("ecx",$E);
  137. &xor ("edi","esi") if ($in_16_63); # sigma1(X[-2])
  138. &mov ("esi",$Foff);
  139. &ror ("ecx",25-11);
  140. &add ($T,"edi") if ($in_16_63); # T += sigma1(X[-2])
  141. &mov ("edi",$Goff);
  142. &xor ("ecx",$E);
  143. &xor ("esi","edi");
  144. &mov ($T,&DWP(4*(9+15),"esp")) if (!$in_16_63);
  145. &mov (&DWP(4*(9+15),"esp"),$T) if ($in_16_63); # save X[0]
  146. &ror ("ecx",11-6);
  147. &and ("esi",$E);
  148. &mov ($Eoff,$E); # modulo-scheduled
  149. &xor ($E,"ecx");
  150. &add ($T,$Hoff); # T += h
  151. &xor ("esi","edi"); # Ch(e,f,g)
  152. &ror ($E,6); # Sigma1(e)
  153. &mov ("ecx",$A);
  154. &add ($T,"esi"); # T += Ch(e,f,g)
  155. &ror ("ecx",22-13);
  156. &add ($T,$E); # T += Sigma1(e)
  157. &mov ("edi",$Boff);
  158. &xor ("ecx",$A);
  159. &mov ($Aoff,$A); # modulo-scheduled
  160. &lea ("esp",&DWP(-4,"esp"));
  161. &ror ("ecx",13-2);
  162. &mov ("esi",&DWP(0,$K256));
  163. &xor ("ecx",$A);
  164. &mov ($E,$Eoff); # e in next iteration, d in this one
  165. &xor ($A,"edi"); # a ^= b
  166. &ror ("ecx",2); # Sigma0(a)
  167. &add ($T,"esi"); # T+= K[i]
  168. &mov (&DWP(0,"esp"),$A); # (b^c) in next round
  169. &add ($E,$T); # d += T
  170. &and ($A,&DWP(4,"esp")); # a &= (b^c)
  171. &add ($T,"ecx"); # T += Sigma0(a)
  172. &xor ($A,"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
  173. &mov ("ecx",&DWP(4*(9+15+16-1),"esp")) if ($in_16_63); # preload T
  174. &add ($K256,4);
  175. &add ($A,$T); # h += T
  176. }
  177. &external_label("OPENSSL_ia32cap_P") if (!$i386);
  178. &function_begin("sha256_block_data_order");
  179. &mov ("esi",wparam(0)); # ctx
  180. &mov ("edi",wparam(1)); # inp
  181. &mov ("eax",wparam(2)); # num
  182. &mov ("ebx","esp"); # saved sp
  183. &call (&label("pic_point")); # make it PIC!
  184. &set_label("pic_point");
  185. &blindpop($K256);
  186. &lea ($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
  187. &sub ("esp",16);
  188. &and ("esp",-64);
  189. &shl ("eax",6);
  190. &add ("eax","edi");
  191. &mov (&DWP(0,"esp"),"esi"); # ctx
  192. &mov (&DWP(4,"esp"),"edi"); # inp
  193. &mov (&DWP(8,"esp"),"eax"); # inp+num*128
  194. &mov (&DWP(12,"esp"),"ebx"); # saved sp
  195. if (!$i386 && $xmm) {
  196. &picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
  197. &mov ("ecx",&DWP(0,"edx"));
  198. &mov ("ebx",&DWP(4,"edx"));
  199. &test ("ecx",1<<20); # check for P4
  200. &jnz (&label("loop"));
  201. &mov ("edx",&DWP(8,"edx")) if ($xmm);
  202. &test ("ecx",1<<24); # check for FXSR
  203. &jz ($unroll_after?&label("no_xmm"):&label("loop"));
  204. &and ("ecx",1<<30); # mask "Intel CPU" bit
  205. &and ("ebx",1<<28|1<<9); # mask AVX and SSSE3 bits
  206. &test ("edx",1<<29) if ($shaext); # check for SHA
  207. &jnz (&label("shaext")) if ($shaext);
  208. &or ("ecx","ebx");
  209. &and ("ecx",1<<28|1<<30);
  210. &cmp ("ecx",1<<28|1<<30);
  211. if ($xmm) {
  212. &je (&label("AVX")) if ($avx);
  213. &test ("ebx",1<<9); # check for SSSE3
  214. &jnz (&label("SSSE3"));
  215. } else {
  216. &je (&label("loop_shrd"));
  217. }
  218. if ($unroll_after) {
  219. &set_label("no_xmm");
  220. &sub ("eax","edi");
  221. &cmp ("eax",$unroll_after);
  222. &jae (&label("unrolled"));
  223. } }
  224. &jmp (&label("loop"));
  225. sub COMPACT_LOOP() {
  226. my $suffix=shift;
  227. &set_label("loop$suffix",$suffix?32:16);
  228. # copy input block to stack reversing byte and dword order
  229. for($i=0;$i<4;$i++) {
  230. &mov ("eax",&DWP($i*16+0,"edi"));
  231. &mov ("ebx",&DWP($i*16+4,"edi"));
  232. &mov ("ecx",&DWP($i*16+8,"edi"));
  233. &bswap ("eax");
  234. &mov ("edx",&DWP($i*16+12,"edi"));
  235. &bswap ("ebx");
  236. &push ("eax");
  237. &bswap ("ecx");
  238. &push ("ebx");
  239. &bswap ("edx");
  240. &push ("ecx");
  241. &push ("edx");
  242. }
  243. &add ("edi",64);
  244. &lea ("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
  245. &mov (&DWP(4*(9+16)+4,"esp"),"edi");
  246. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  247. &mov ($A,&DWP(0,"esi"));
  248. &mov ("ebx",&DWP(4,"esi"));
  249. &mov ("ecx",&DWP(8,"esi"));
  250. &mov ("edi",&DWP(12,"esi"));
  251. # &mov ($Aoff,$A);
  252. &mov ($Boff,"ebx");
  253. &xor ("ebx","ecx");
  254. &mov ($Coff,"ecx");
  255. &mov ($Doff,"edi");
  256. &mov (&DWP(0,"esp"),"ebx"); # magic
  257. &mov ($E,&DWP(16,"esi"));
  258. &mov ("ebx",&DWP(20,"esi"));
  259. &mov ("ecx",&DWP(24,"esi"));
  260. &mov ("edi",&DWP(28,"esi"));
  261. # &mov ($Eoff,$E);
  262. &mov ($Foff,"ebx");
  263. &mov ($Goff,"ecx");
  264. &mov ($Hoff,"edi");
  265. &set_label("00_15$suffix",16);
  266. &BODY_00_15();
  267. &cmp ("esi",0xc19bf174);
  268. &jne (&label("00_15$suffix"));
  269. &mov ("ecx",&DWP(4*(9+15+16-1),"esp")); # preloaded in BODY_00_15(1)
  270. &jmp (&label("16_63$suffix"));
  271. &set_label("16_63$suffix",16);
  272. &BODY_16_63();
  273. &cmp ("esi",0xc67178f2);
  274. &jne (&label("16_63$suffix"));
  275. &mov ("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
  276. # &mov ($A,$Aoff);
  277. &mov ("ebx",$Boff);
  278. # &mov ("edi",$Coff);
  279. &mov ("ecx",$Doff);
  280. &add ($A,&DWP(0,"esi"));
  281. &add ("ebx",&DWP(4,"esi"));
  282. &add ("edi",&DWP(8,"esi"));
  283. &add ("ecx",&DWP(12,"esi"));
  284. &mov (&DWP(0,"esi"),$A);
  285. &mov (&DWP(4,"esi"),"ebx");
  286. &mov (&DWP(8,"esi"),"edi");
  287. &mov (&DWP(12,"esi"),"ecx");
  288. # &mov ($E,$Eoff);
  289. &mov ("eax",$Foff);
  290. &mov ("ebx",$Goff);
  291. &mov ("ecx",$Hoff);
  292. &mov ("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
  293. &add ($E,&DWP(16,"esi"));
  294. &add ("eax",&DWP(20,"esi"));
  295. &add ("ebx",&DWP(24,"esi"));
  296. &add ("ecx",&DWP(28,"esi"));
  297. &mov (&DWP(16,"esi"),$E);
  298. &mov (&DWP(20,"esi"),"eax");
  299. &mov (&DWP(24,"esi"),"ebx");
  300. &mov (&DWP(28,"esi"),"ecx");
  301. &lea ("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
  302. &sub ($K256,4*64); # rewind K
  303. &cmp ("edi",&DWP(8,"esp")); # are we done yet?
  304. &jb (&label("loop$suffix"));
  305. }
  306. &COMPACT_LOOP();
  307. &mov ("esp",&DWP(12,"esp")); # restore sp
  308. &function_end_A();
  309. if (!$i386 && !$xmm) {
  310. # ~20% improvement on Sandy Bridge
  311. local *ror = sub { &shrd(@_[0],@_) };
  312. &COMPACT_LOOP("_shrd");
  313. &mov ("esp",&DWP(12,"esp")); # restore sp
  314. &function_end_A();
  315. }
  316. &set_label("K256",64); # Yes! I keep it in the code segment!
  317. @K256=( 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
  318. 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
  319. 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
  320. 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
  321. 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
  322. 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
  323. 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
  324. 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
  325. 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
  326. 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
  327. 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
  328. 0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
  329. 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
  330. 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
  331. 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
  332. 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 );
  333. &data_word(@K256);
  334. &data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # byte swap mask
  335. &asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
  336. ($a,$b,$c,$d,$e,$f,$g,$h)=(0..7); # offsets
  337. sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
  338. if (!$i386 && $unroll_after) {
  339. my @AH=($A,$K256);
  340. &set_label("unrolled",16);
  341. &lea ("esp",&DWP(-96,"esp"));
  342. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  343. &mov ($AH[0],&DWP(0,"esi"));
  344. &mov ($AH[1],&DWP(4,"esi"));
  345. &mov ("ecx",&DWP(8,"esi"));
  346. &mov ("ebx",&DWP(12,"esi"));
  347. #&mov (&DWP(0,"esp"),$AH[0]);
  348. &mov (&DWP(4,"esp"),$AH[1]);
  349. &xor ($AH[1],"ecx"); # magic
  350. &mov (&DWP(8,"esp"),"ecx");
  351. &mov (&DWP(12,"esp"),"ebx");
  352. &mov ($E,&DWP(16,"esi"));
  353. &mov ("ebx",&DWP(20,"esi"));
  354. &mov ("ecx",&DWP(24,"esi"));
  355. &mov ("esi",&DWP(28,"esi"));
  356. #&mov (&DWP(16,"esp"),$E);
  357. &mov (&DWP(20,"esp"),"ebx");
  358. &mov (&DWP(24,"esp"),"ecx");
  359. &mov (&DWP(28,"esp"),"esi");
  360. &jmp (&label("grand_loop"));
  361. &set_label("grand_loop",16);
  362. # copy input block to stack reversing byte order
  363. for($i=0;$i<5;$i++) {
  364. &mov ("ebx",&DWP(12*$i+0,"edi"));
  365. &mov ("ecx",&DWP(12*$i+4,"edi"));
  366. &bswap ("ebx");
  367. &mov ("esi",&DWP(12*$i+8,"edi"));
  368. &bswap ("ecx");
  369. &mov (&DWP(32+12*$i+0,"esp"),"ebx");
  370. &bswap ("esi");
  371. &mov (&DWP(32+12*$i+4,"esp"),"ecx");
  372. &mov (&DWP(32+12*$i+8,"esp"),"esi");
  373. }
  374. &mov ("ebx",&DWP($i*12,"edi"));
  375. &add ("edi",64);
  376. &bswap ("ebx");
  377. &mov (&DWP(96+4,"esp"),"edi");
  378. &mov (&DWP(32+12*$i,"esp"),"ebx");
  379. my ($t1,$t2) = ("ecx","esi");
  380. for ($i=0;$i<64;$i++) {
  381. if ($i>=16) {
  382. &mov ($T,$t1); # $t1 is preloaded
  383. # &mov ($t2,&DWP(32+4*(($i+14)&15),"esp"));
  384. &ror ($t1,18-7);
  385. &mov ("edi",$t2);
  386. &ror ($t2,19-17);
  387. &xor ($t1,$T);
  388. &shr ($T,3);
  389. &ror ($t1,7);
  390. &xor ($t2,"edi");
  391. &xor ($T,$t1); # T = sigma0(X[-15])
  392. &ror ($t2,17);
  393. &add ($T,&DWP(32+4*($i&15),"esp")); # T += X[-16]
  394. &shr ("edi",10);
  395. &add ($T,&DWP(32+4*(($i+9)&15),"esp")); # T += X[-7]
  396. #&xor ("edi",$t2) # sigma1(X[-2])
  397. # &add ($T,"edi"); # T += sigma1(X[-2])
  398. # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
  399. }
  400. &mov ($t1,$E);
  401. &xor ("edi",$t2) if ($i>=16); # sigma1(X[-2])
  402. &mov ($t2,&off($f));
  403. &ror ($E,25-11);
  404. &add ($T,"edi") if ($i>=16); # T += sigma1(X[-2])
  405. &mov ("edi",&off($g));
  406. &xor ($E,$t1);
  407. &mov ($T,&DWP(32+4*($i&15),"esp")) if ($i<16); # X[i]
  408. &mov (&DWP(32+4*($i&15),"esp"),$T) if ($i>=16 && $i<62); # save X[0]
  409. &xor ($t2,"edi");
  410. &ror ($E,11-6);
  411. &and ($t2,$t1);
  412. &mov (&off($e),$t1); # save $E, modulo-scheduled
  413. &xor ($E,$t1);
  414. &add ($T,&off($h)); # T += h
  415. &xor ("edi",$t2); # Ch(e,f,g)
  416. &ror ($E,6); # Sigma1(e)
  417. &mov ($t1,$AH[0]);
  418. &add ($T,"edi"); # T += Ch(e,f,g)
  419. &ror ($t1,22-13);
  420. &mov ($t2,$AH[0]);
  421. &mov ("edi",&off($b));
  422. &xor ($t1,$AH[0]);
  423. &mov (&off($a),$AH[0]); # save $A, modulo-scheduled
  424. &xor ($AH[0],"edi"); # a ^= b, (b^c) in next round
  425. &ror ($t1,13-2);
  426. &and ($AH[1],$AH[0]); # (b^c) &= (a^b)
  427. &lea ($E,&DWP(@K256[$i],$T,$E)); # T += Sigma1(1)+K[i]
  428. &xor ($t1,$t2);
  429. &xor ($AH[1],"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
  430. &mov ($t2,&DWP(32+4*(($i+2)&15),"esp")) if ($i>=15 && $i<63);
  431. &ror ($t1,2); # Sigma0(a)
  432. &add ($AH[1],$E); # h += T
  433. &add ($E,&off($d)); # d += T
  434. &add ($AH[1],$t1); # h += Sigma0(a)
  435. &mov ($t1,&DWP(32+4*(($i+15)&15),"esp")) if ($i>=15 && $i<63);
  436. @AH = reverse(@AH); # rotate(a,h)
  437. ($t1,$t2) = ($t2,$t1); # rotate(t1,t2)
  438. }
  439. &mov ("esi",&DWP(96,"esp")); #ctx
  440. #&mov ($AH[0],&DWP(0,"esp"));
  441. &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
  442. #&mov ("edi", &DWP(8,"esp"));
  443. &mov ("ecx",&DWP(12,"esp"));
  444. &add ($AH[0],&DWP(0,"esi"));
  445. &add ($AH[1],&DWP(4,"esi"));
  446. &add ("edi",&DWP(8,"esi"));
  447. &add ("ecx",&DWP(12,"esi"));
  448. &mov (&DWP(0,"esi"),$AH[0]);
  449. &mov (&DWP(4,"esi"),$AH[1]);
  450. &mov (&DWP(8,"esi"),"edi");
  451. &mov (&DWP(12,"esi"),"ecx");
  452. #&mov (&DWP(0,"esp"),$AH[0]);
  453. &mov (&DWP(4,"esp"),$AH[1]);
  454. &xor ($AH[1],"edi"); # magic
  455. &mov (&DWP(8,"esp"),"edi");
  456. &mov (&DWP(12,"esp"),"ecx");
  457. #&mov ($E,&DWP(16,"esp"));
  458. &mov ("edi",&DWP(20,"esp"));
  459. &mov ("ebx",&DWP(24,"esp"));
  460. &mov ("ecx",&DWP(28,"esp"));
  461. &add ($E,&DWP(16,"esi"));
  462. &add ("edi",&DWP(20,"esi"));
  463. &add ("ebx",&DWP(24,"esi"));
  464. &add ("ecx",&DWP(28,"esi"));
  465. &mov (&DWP(16,"esi"),$E);
  466. &mov (&DWP(20,"esi"),"edi");
  467. &mov (&DWP(24,"esi"),"ebx");
  468. &mov (&DWP(28,"esi"),"ecx");
  469. #&mov (&DWP(16,"esp"),$E);
  470. &mov (&DWP(20,"esp"),"edi");
  471. &mov ("edi",&DWP(96+4,"esp")); # inp
  472. &mov (&DWP(24,"esp"),"ebx");
  473. &mov (&DWP(28,"esp"),"ecx");
  474. &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
  475. &jb (&label("grand_loop"));
  476. &mov ("esp",&DWP(96+12,"esp")); # restore sp
  477. &function_end_A();
  478. }
  479. if (!$i386 && $xmm) {{{
  480. if ($shaext) {
  481. ######################################################################
  482. # Intel SHA Extensions implementation of SHA256 update function.
  483. #
  484. my ($ctx,$inp,$end)=("esi","edi","eax");
  485. my ($Wi,$ABEF,$CDGH,$TMP)=map("xmm$_",(0..2,7));
  486. my @MSG=map("xmm$_",(3..6));
  487. sub sha256op38 {
  488. my ($opcodelet,$dst,$src)=@_;
  489. if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
  490. { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
  491. }
  492. sub sha256rnds2 { sha256op38(0xcb,@_); }
  493. sub sha256msg1 { sha256op38(0xcc,@_); }
  494. sub sha256msg2 { sha256op38(0xcd,@_); }
  495. &set_label("shaext",32);
  496. &sub ("esp",32);
  497. &movdqu ($ABEF,&QWP(0,$ctx)); # DCBA
  498. &lea ($K256,&DWP(0x80,$K256));
  499. &movdqu ($CDGH,&QWP(16,$ctx)); # HGFE
  500. &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
  501. &pshufd ($Wi,$ABEF,0x1b); # ABCD
  502. &pshufd ($ABEF,$ABEF,0xb1); # CDAB
  503. &pshufd ($CDGH,$CDGH,0x1b); # EFGH
  504. &palignr ($ABEF,$CDGH,8); # ABEF
  505. &punpcklqdq ($CDGH,$Wi); # CDGH
  506. &jmp (&label("loop_shaext"));
  507. &set_label("loop_shaext",16);
  508. &movdqu (@MSG[0],&QWP(0,$inp));
  509. &movdqu (@MSG[1],&QWP(0x10,$inp));
  510. &movdqu (@MSG[2],&QWP(0x20,$inp));
  511. &pshufb (@MSG[0],$TMP);
  512. &movdqu (@MSG[3],&QWP(0x30,$inp));
  513. &movdqa (&QWP(16,"esp"),$CDGH); # offload
  514. &movdqa ($Wi,&QWP(0*16-0x80,$K256));
  515. &paddd ($Wi,@MSG[0]);
  516. &pshufb (@MSG[1],$TMP);
  517. &sha256rnds2 ($CDGH,$ABEF); # 0-3
  518. &pshufd ($Wi,$Wi,0x0e);
  519. &nop ();
  520. &movdqa (&QWP(0,"esp"),$ABEF); # offload
  521. &sha256rnds2 ($ABEF,$CDGH);
  522. &movdqa ($Wi,&QWP(1*16-0x80,$K256));
  523. &paddd ($Wi,@MSG[1]);
  524. &pshufb (@MSG[2],$TMP);
  525. &sha256rnds2 ($CDGH,$ABEF); # 4-7
  526. &pshufd ($Wi,$Wi,0x0e);
  527. &lea ($inp,&DWP(0x40,$inp));
  528. &sha256msg1 (@MSG[0],@MSG[1]);
  529. &sha256rnds2 ($ABEF,$CDGH);
  530. &movdqa ($Wi,&QWP(2*16-0x80,$K256));
  531. &paddd ($Wi,@MSG[2]);
  532. &pshufb (@MSG[3],$TMP);
  533. &sha256rnds2 ($CDGH,$ABEF); # 8-11
  534. &pshufd ($Wi,$Wi,0x0e);
  535. &movdqa ($TMP,@MSG[3]);
  536. &palignr ($TMP,@MSG[2],4);
  537. &nop ();
  538. &paddd (@MSG[0],$TMP);
  539. &sha256msg1 (@MSG[1],@MSG[2]);
  540. &sha256rnds2 ($ABEF,$CDGH);
  541. &movdqa ($Wi,&QWP(3*16-0x80,$K256));
  542. &paddd ($Wi,@MSG[3]);
  543. &sha256msg2 (@MSG[0],@MSG[3]);
  544. &sha256rnds2 ($CDGH,$ABEF); # 12-15
  545. &pshufd ($Wi,$Wi,0x0e);
  546. &movdqa ($TMP,@MSG[0]);
  547. &palignr ($TMP,@MSG[3],4);
  548. &nop ();
  549. &paddd (@MSG[1],$TMP);
  550. &sha256msg1 (@MSG[2],@MSG[3]);
  551. &sha256rnds2 ($ABEF,$CDGH);
  552. for($i=4;$i<16-3;$i++) {
  553. &movdqa ($Wi,&QWP($i*16-0x80,$K256));
  554. &paddd ($Wi,@MSG[0]);
  555. &sha256msg2 (@MSG[1],@MSG[0]);
  556. &sha256rnds2 ($CDGH,$ABEF); # 16-19...
  557. &pshufd ($Wi,$Wi,0x0e);
  558. &movdqa ($TMP,@MSG[1]);
  559. &palignr ($TMP,@MSG[0],4);
  560. &nop ();
  561. &paddd (@MSG[2],$TMP);
  562. &sha256msg1 (@MSG[3],@MSG[0]);
  563. &sha256rnds2 ($ABEF,$CDGH);
  564. push(@MSG,shift(@MSG));
  565. }
  566. &movdqa ($Wi,&QWP(13*16-0x80,$K256));
  567. &paddd ($Wi,@MSG[0]);
  568. &sha256msg2 (@MSG[1],@MSG[0]);
  569. &sha256rnds2 ($CDGH,$ABEF); # 52-55
  570. &pshufd ($Wi,$Wi,0x0e);
  571. &movdqa ($TMP,@MSG[1])
  572. &palignr ($TMP,@MSG[0],4);
  573. &sha256rnds2 ($ABEF,$CDGH);
  574. &paddd (@MSG[2],$TMP);
  575. &movdqa ($Wi,&QWP(14*16-0x80,$K256));
  576. &paddd ($Wi,@MSG[1]);
  577. &sha256rnds2 ($CDGH,$ABEF); # 56-59
  578. &pshufd ($Wi,$Wi,0x0e);
  579. &sha256msg2 (@MSG[2],@MSG[1]);
  580. &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
  581. &sha256rnds2 ($ABEF,$CDGH);
  582. &movdqa ($Wi,&QWP(15*16-0x80,$K256));
  583. &paddd ($Wi,@MSG[2]);
  584. &nop ();
  585. &sha256rnds2 ($CDGH,$ABEF); # 60-63
  586. &pshufd ($Wi,$Wi,0x0e);
  587. &cmp ($end,$inp);
  588. &nop ();
  589. &sha256rnds2 ($ABEF,$CDGH);
  590. &paddd ($CDGH,&QWP(16,"esp"));
  591. &paddd ($ABEF,&QWP(0,"esp"));
  592. &jnz (&label("loop_shaext"));
  593. &pshufd ($CDGH,$CDGH,0xb1); # DCHG
  594. &pshufd ($TMP,$ABEF,0x1b); # FEBA
  595. &pshufd ($ABEF,$ABEF,0xb1); # BAFE
  596. &punpckhqdq ($ABEF,$CDGH); # DCBA
  597. &palignr ($CDGH,$TMP,8); # HGFE
  598. &mov ("esp",&DWP(32+12,"esp"));
  599. &movdqu (&QWP(0,$ctx),$ABEF);
  600. &movdqu (&QWP(16,$ctx),$CDGH);
  601. &function_end_A();
  602. }
  603. my @X = map("xmm$_",(0..3));
  604. my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
  605. my @AH = ($A,$T);
  606. &set_label("SSSE3",32);
  607. &lea ("esp",&DWP(-96,"esp"));
  608. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  609. &mov ($AH[0],&DWP(0,"esi"));
  610. &mov ($AH[1],&DWP(4,"esi"));
  611. &mov ("ecx",&DWP(8,"esi"));
  612. &mov ("edi",&DWP(12,"esi"));
  613. #&mov (&DWP(0,"esp"),$AH[0]);
  614. &mov (&DWP(4,"esp"),$AH[1]);
  615. &xor ($AH[1],"ecx"); # magic
  616. &mov (&DWP(8,"esp"),"ecx");
  617. &mov (&DWP(12,"esp"),"edi");
  618. &mov ($E,&DWP(16,"esi"));
  619. &mov ("edi",&DWP(20,"esi"));
  620. &mov ("ecx",&DWP(24,"esi"));
  621. &mov ("esi",&DWP(28,"esi"));
  622. #&mov (&DWP(16,"esp"),$E);
  623. &mov (&DWP(20,"esp"),"edi");
  624. &mov ("edi",&DWP(96+4,"esp")); # inp
  625. &mov (&DWP(24,"esp"),"ecx");
  626. &mov (&DWP(28,"esp"),"esi");
  627. &movdqa ($t3,&QWP(256,$K256));
  628. &jmp (&label("grand_ssse3"));
  629. &set_label("grand_ssse3",16);
  630. # load input, reverse byte order, add K256[0..15], save to stack
  631. &movdqu (@X[0],&QWP(0,"edi"));
  632. &movdqu (@X[1],&QWP(16,"edi"));
  633. &movdqu (@X[2],&QWP(32,"edi"));
  634. &movdqu (@X[3],&QWP(48,"edi"));
  635. &add ("edi",64);
  636. &pshufb (@X[0],$t3);
  637. &mov (&DWP(96+4,"esp"),"edi");
  638. &pshufb (@X[1],$t3);
  639. &movdqa ($t0,&QWP(0,$K256));
  640. &pshufb (@X[2],$t3);
  641. &movdqa ($t1,&QWP(16,$K256));
  642. &paddd ($t0,@X[0]);
  643. &pshufb (@X[3],$t3);
  644. &movdqa ($t2,&QWP(32,$K256));
  645. &paddd ($t1,@X[1]);
  646. &movdqa ($t3,&QWP(48,$K256));
  647. &movdqa (&QWP(32+0,"esp"),$t0);
  648. &paddd ($t2,@X[2]);
  649. &movdqa (&QWP(32+16,"esp"),$t1);
  650. &paddd ($t3,@X[3]);
  651. &movdqa (&QWP(32+32,"esp"),$t2);
  652. &movdqa (&QWP(32+48,"esp"),$t3);
  653. &jmp (&label("ssse3_00_47"));
  654. &set_label("ssse3_00_47",16);
  655. &add ($K256,64);
  656. sub SSSE3_00_47 () {
  657. my $j = shift;
  658. my $body = shift;
  659. my @X = @_;
  660. my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
  661. eval(shift(@insns));
  662. &movdqa ($t0,@X[1]);
  663. eval(shift(@insns)); # @
  664. eval(shift(@insns));
  665. &movdqa ($t3,@X[3]);
  666. eval(shift(@insns));
  667. eval(shift(@insns));
  668. &palignr ($t0,@X[0],4); # X[1..4]
  669. eval(shift(@insns));
  670. eval(shift(@insns)); # @
  671. eval(shift(@insns));
  672. &palignr ($t3,@X[2],4); # X[9..12]
  673. eval(shift(@insns));
  674. eval(shift(@insns));
  675. eval(shift(@insns));
  676. &movdqa ($t1,$t0);
  677. eval(shift(@insns)); # @
  678. eval(shift(@insns));
  679. &movdqa ($t2,$t0);
  680. eval(shift(@insns));
  681. eval(shift(@insns));
  682. &psrld ($t0,3);
  683. eval(shift(@insns));
  684. eval(shift(@insns)); # @
  685. &paddd (@X[0],$t3); # X[0..3] += X[9..12]
  686. eval(shift(@insns));
  687. eval(shift(@insns));
  688. &psrld ($t2,7);
  689. eval(shift(@insns));
  690. eval(shift(@insns));
  691. eval(shift(@insns)); # @
  692. eval(shift(@insns));
  693. &pshufd ($t3,@X[3],0b11111010); # X[14..15]
  694. eval(shift(@insns));
  695. eval(shift(@insns));
  696. &pslld ($t1,32-18);
  697. eval(shift(@insns));
  698. eval(shift(@insns)); # @
  699. &pxor ($t0,$t2);
  700. eval(shift(@insns));
  701. eval(shift(@insns));
  702. &psrld ($t2,18-7);
  703. eval(shift(@insns));
  704. eval(shift(@insns));
  705. eval(shift(@insns)); # @
  706. &pxor ($t0,$t1);
  707. eval(shift(@insns));
  708. eval(shift(@insns));
  709. &pslld ($t1,18-7);
  710. eval(shift(@insns));
  711. eval(shift(@insns));
  712. eval(shift(@insns)); # @
  713. &pxor ($t0,$t2);
  714. eval(shift(@insns));
  715. eval(shift(@insns));
  716. &movdqa ($t2,$t3);
  717. eval(shift(@insns));
  718. eval(shift(@insns));
  719. eval(shift(@insns)); # @
  720. &pxor ($t0,$t1); # sigma0(X[1..4])
  721. eval(shift(@insns));
  722. eval(shift(@insns));
  723. &psrld ($t3,10);
  724. eval(shift(@insns));
  725. eval(shift(@insns));
  726. eval(shift(@insns)); # @
  727. &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
  728. eval(shift(@insns));
  729. eval(shift(@insns));
  730. &psrlq ($t2,17);
  731. eval(shift(@insns));
  732. eval(shift(@insns));
  733. eval(shift(@insns)); # @
  734. &pxor ($t3,$t2);
  735. eval(shift(@insns));
  736. eval(shift(@insns));
  737. &psrlq ($t2,19-17);
  738. eval(shift(@insns));
  739. eval(shift(@insns));
  740. eval(shift(@insns)); # @
  741. &pxor ($t3,$t2);
  742. eval(shift(@insns));
  743. eval(shift(@insns));
  744. &pshufd ($t3,$t3,0b10000000);
  745. eval(shift(@insns));
  746. eval(shift(@insns));
  747. eval(shift(@insns)); # @
  748. eval(shift(@insns));
  749. eval(shift(@insns));
  750. eval(shift(@insns));
  751. eval(shift(@insns));
  752. eval(shift(@insns)); # @
  753. eval(shift(@insns));
  754. &psrldq ($t3,8);
  755. eval(shift(@insns));
  756. eval(shift(@insns));
  757. eval(shift(@insns));
  758. &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
  759. eval(shift(@insns)); # @
  760. eval(shift(@insns));
  761. eval(shift(@insns));
  762. eval(shift(@insns));
  763. eval(shift(@insns));
  764. eval(shift(@insns)); # @
  765. eval(shift(@insns));
  766. &pshufd ($t3,@X[0],0b01010000); # X[16..17]
  767. eval(shift(@insns));
  768. eval(shift(@insns));
  769. eval(shift(@insns));
  770. &movdqa ($t2,$t3);
  771. eval(shift(@insns)); # @
  772. &psrld ($t3,10);
  773. eval(shift(@insns));
  774. &psrlq ($t2,17);
  775. eval(shift(@insns));
  776. eval(shift(@insns));
  777. eval(shift(@insns));
  778. eval(shift(@insns)); # @
  779. &pxor ($t3,$t2);
  780. eval(shift(@insns));
  781. eval(shift(@insns));
  782. &psrlq ($t2,19-17);
  783. eval(shift(@insns));
  784. eval(shift(@insns));
  785. eval(shift(@insns)); # @
  786. &pxor ($t3,$t2);
  787. eval(shift(@insns));
  788. eval(shift(@insns));
  789. eval(shift(@insns));
  790. &pshufd ($t3,$t3,0b00001000);
  791. eval(shift(@insns));
  792. eval(shift(@insns)); # @
  793. &movdqa ($t2,&QWP(16*$j,$K256));
  794. eval(shift(@insns));
  795. eval(shift(@insns));
  796. &pslldq ($t3,8);
  797. eval(shift(@insns));
  798. eval(shift(@insns));
  799. eval(shift(@insns)); # @
  800. eval(shift(@insns));
  801. eval(shift(@insns));
  802. eval(shift(@insns));
  803. eval(shift(@insns));
  804. eval(shift(@insns)); # @
  805. &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
  806. eval(shift(@insns));
  807. eval(shift(@insns));
  808. eval(shift(@insns));
  809. eval(shift(@insns));
  810. &paddd ($t2,@X[0]);
  811. eval(shift(@insns)); # @
  812. foreach (@insns) { eval; } # remaining instructions
  813. &movdqa (&QWP(32+16*$j,"esp"),$t2);
  814. }
  815. sub body_00_15 () {
  816. (
  817. '&mov ("ecx",$E);',
  818. '&ror ($E,25-11);',
  819. '&mov ("esi",&off($f));',
  820. '&xor ($E,"ecx");',
  821. '&mov ("edi",&off($g));',
  822. '&xor ("esi","edi");',
  823. '&ror ($E,11-6);',
  824. '&and ("esi","ecx");',
  825. '&mov (&off($e),"ecx");', # save $E, modulo-scheduled
  826. '&xor ($E,"ecx");',
  827. '&xor ("edi","esi");', # Ch(e,f,g)
  828. '&ror ($E,6);', # T = Sigma1(e)
  829. '&mov ("ecx",$AH[0]);',
  830. '&add ($E,"edi");', # T += Ch(e,f,g)
  831. '&mov ("edi",&off($b));',
  832. '&mov ("esi",$AH[0]);',
  833. '&ror ("ecx",22-13);',
  834. '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
  835. '&xor ("ecx",$AH[0]);',
  836. '&xor ($AH[0],"edi");', # a ^= b, (b^c) in next round
  837. '&add ($E,&off($h));', # T += h
  838. '&ror ("ecx",13-2);',
  839. '&and ($AH[1],$AH[0]);', # (b^c) &= (a^b)
  840. '&xor ("ecx","esi");',
  841. '&add ($E,&DWP(32+4*($i&15),"esp"));', # T += K[i]+X[i]
  842. '&xor ($AH[1],"edi");', # h = Maj(a,b,c) = Ch(a^b,c,b)
  843. '&ror ("ecx",2);', # Sigma0(a)
  844. '&add ($AH[1],$E);', # h += T
  845. '&add ($E,&off($d));', # d += T
  846. '&add ($AH[1],"ecx");'. # h += Sigma0(a)
  847. '@AH = reverse(@AH); $i++;' # rotate(a,h)
  848. );
  849. }
  850. for ($i=0,$j=0; $j<4; $j++) {
  851. &SSSE3_00_47($j,\&body_00_15,@X);
  852. push(@X,shift(@X)); # rotate(@X)
  853. }
  854. &cmp (&DWP(16*$j,$K256),0x00010203);
  855. &jne (&label("ssse3_00_47"));
  856. for ($i=0; $i<16; ) {
  857. foreach(body_00_15()) { eval; }
  858. }
  859. &mov ("esi",&DWP(96,"esp")); #ctx
  860. #&mov ($AH[0],&DWP(0,"esp"));
  861. &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
  862. #&mov ("edi", &DWP(8,"esp"));
  863. &mov ("ecx",&DWP(12,"esp"));
  864. &add ($AH[0],&DWP(0,"esi"));
  865. &add ($AH[1],&DWP(4,"esi"));
  866. &add ("edi",&DWP(8,"esi"));
  867. &add ("ecx",&DWP(12,"esi"));
  868. &mov (&DWP(0,"esi"),$AH[0]);
  869. &mov (&DWP(4,"esi"),$AH[1]);
  870. &mov (&DWP(8,"esi"),"edi");
  871. &mov (&DWP(12,"esi"),"ecx");
  872. #&mov (&DWP(0,"esp"),$AH[0]);
  873. &mov (&DWP(4,"esp"),$AH[1]);
  874. &xor ($AH[1],"edi"); # magic
  875. &mov (&DWP(8,"esp"),"edi");
  876. &mov (&DWP(12,"esp"),"ecx");
  877. #&mov ($E,&DWP(16,"esp"));
  878. &mov ("edi",&DWP(20,"esp"));
  879. &mov ("ecx",&DWP(24,"esp"));
  880. &add ($E,&DWP(16,"esi"));
  881. &add ("edi",&DWP(20,"esi"));
  882. &add ("ecx",&DWP(24,"esi"));
  883. &mov (&DWP(16,"esi"),$E);
  884. &mov (&DWP(20,"esi"),"edi");
  885. &mov (&DWP(20,"esp"),"edi");
  886. &mov ("edi",&DWP(28,"esp"));
  887. &mov (&DWP(24,"esi"),"ecx");
  888. #&mov (&DWP(16,"esp"),$E);
  889. &add ("edi",&DWP(28,"esi"));
  890. &mov (&DWP(24,"esp"),"ecx");
  891. &mov (&DWP(28,"esi"),"edi");
  892. &mov (&DWP(28,"esp"),"edi");
  893. &mov ("edi",&DWP(96+4,"esp")); # inp
  894. &movdqa ($t3,&QWP(64,$K256));
  895. &sub ($K256,3*64); # rewind K
  896. &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
  897. &jb (&label("grand_ssse3"));
  898. &mov ("esp",&DWP(96+12,"esp")); # restore sp
  899. &function_end_A();
  900. if ($avx) {
  901. &set_label("AVX",32);
  902. if ($avx>1) {
  903. &and ("edx",1<<8|1<<3); # check for BMI2+BMI1
  904. &cmp ("edx",1<<8|1<<3);
  905. &je (&label("AVX_BMI"));
  906. }
  907. &lea ("esp",&DWP(-96,"esp"));
  908. &vzeroall ();
  909. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  910. &mov ($AH[0],&DWP(0,"esi"));
  911. &mov ($AH[1],&DWP(4,"esi"));
  912. &mov ("ecx",&DWP(8,"esi"));
  913. &mov ("edi",&DWP(12,"esi"));
  914. #&mov (&DWP(0,"esp"),$AH[0]);
  915. &mov (&DWP(4,"esp"),$AH[1]);
  916. &xor ($AH[1],"ecx"); # magic
  917. &mov (&DWP(8,"esp"),"ecx");
  918. &mov (&DWP(12,"esp"),"edi");
  919. &mov ($E,&DWP(16,"esi"));
  920. &mov ("edi",&DWP(20,"esi"));
  921. &mov ("ecx",&DWP(24,"esi"));
  922. &mov ("esi",&DWP(28,"esi"));
  923. #&mov (&DWP(16,"esp"),$E);
  924. &mov (&DWP(20,"esp"),"edi");
  925. &mov ("edi",&DWP(96+4,"esp")); # inp
  926. &mov (&DWP(24,"esp"),"ecx");
  927. &mov (&DWP(28,"esp"),"esi");
  928. &vmovdqa ($t3,&QWP(256,$K256));
  929. &jmp (&label("grand_avx"));
  930. &set_label("grand_avx",32);
  931. # load input, reverse byte order, add K256[0..15], save to stack
  932. &vmovdqu (@X[0],&QWP(0,"edi"));
  933. &vmovdqu (@X[1],&QWP(16,"edi"));
  934. &vmovdqu (@X[2],&QWP(32,"edi"));
  935. &vmovdqu (@X[3],&QWP(48,"edi"));
  936. &add ("edi",64);
  937. &vpshufb (@X[0],@X[0],$t3);
  938. &mov (&DWP(96+4,"esp"),"edi");
  939. &vpshufb (@X[1],@X[1],$t3);
  940. &vpshufb (@X[2],@X[2],$t3);
  941. &vpaddd ($t0,@X[0],&QWP(0,$K256));
  942. &vpshufb (@X[3],@X[3],$t3);
  943. &vpaddd ($t1,@X[1],&QWP(16,$K256));
  944. &vpaddd ($t2,@X[2],&QWP(32,$K256));
  945. &vpaddd ($t3,@X[3],&QWP(48,$K256));
  946. &vmovdqa (&QWP(32+0,"esp"),$t0);
  947. &vmovdqa (&QWP(32+16,"esp"),$t1);
  948. &vmovdqa (&QWP(32+32,"esp"),$t2);
  949. &vmovdqa (&QWP(32+48,"esp"),$t3);
  950. &jmp (&label("avx_00_47"));
  951. &set_label("avx_00_47",16);
  952. &add ($K256,64);
  953. sub Xupdate_AVX () {
  954. (
  955. '&vpalignr ($t0,@X[1],@X[0],4);', # X[1..4]
  956. '&vpalignr ($t3,@X[3],@X[2],4);', # X[9..12]
  957. '&vpsrld ($t2,$t0,7);',
  958. '&vpaddd (@X[0],@X[0],$t3);', # X[0..3] += X[9..16]
  959. '&vpsrld ($t3,$t0,3);',
  960. '&vpslld ($t1,$t0,14);',
  961. '&vpxor ($t0,$t3,$t2);',
  962. '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
  963. '&vpsrld ($t2,$t2,18-7);',
  964. '&vpxor ($t0,$t0,$t1);',
  965. '&vpslld ($t1,$t1,25-14);',
  966. '&vpxor ($t0,$t0,$t2);',
  967. '&vpsrld ($t2,$t3,10);',
  968. '&vpxor ($t0,$t0,$t1);', # sigma0(X[1..4])
  969. '&vpsrlq ($t1,$t3,17);',
  970. '&vpaddd (@X[0],@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
  971. '&vpxor ($t2,$t2,$t1);',
  972. '&vpsrlq ($t3,$t3,19);',
  973. '&vpxor ($t2,$t2,$t3);', # sigma1(X[14..15]
  974. '&vpshufd ($t3,$t2,0b10000100);',
  975. '&vpsrldq ($t3,$t3,8);',
  976. '&vpaddd (@X[0],@X[0],$t3);', # X[0..1] += sigma1(X[14..15])
  977. '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
  978. '&vpsrld ($t2,$t3,10);',
  979. '&vpsrlq ($t1,$t3,17);',
  980. '&vpxor ($t2,$t2,$t1);',
  981. '&vpsrlq ($t3,$t3,19);',
  982. '&vpxor ($t2,$t2,$t3);', # sigma1(X[16..17]
  983. '&vpshufd ($t3,$t2,0b11101000);',
  984. '&vpslldq ($t3,$t3,8);',
  985. '&vpaddd (@X[0],@X[0],$t3);' # X[2..3] += sigma1(X[16..17])
  986. );
  987. }
  988. local *ror = sub { &shrd(@_[0],@_) };
  989. sub AVX_00_47 () {
  990. my $j = shift;
  991. my $body = shift;
  992. my @X = @_;
  993. my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
  994. my $insn;
  995. foreach (Xupdate_AVX()) { # 31 instructions
  996. eval;
  997. eval(shift(@insns));
  998. eval(shift(@insns));
  999. eval($insn = shift(@insns));
  1000. eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
  1001. }
  1002. &vpaddd ($t2,@X[0],&QWP(16*$j,$K256));
  1003. foreach (@insns) { eval; } # remaining instructions
  1004. &vmovdqa (&QWP(32+16*$j,"esp"),$t2);
  1005. }
  1006. for ($i=0,$j=0; $j<4; $j++) {
  1007. &AVX_00_47($j,\&body_00_15,@X);
  1008. push(@X,shift(@X)); # rotate(@X)
  1009. }
  1010. &cmp (&DWP(16*$j,$K256),0x00010203);
  1011. &jne (&label("avx_00_47"));
  1012. for ($i=0; $i<16; ) {
  1013. foreach(body_00_15()) { eval; }
  1014. }
  1015. &mov ("esi",&DWP(96,"esp")); #ctx
  1016. #&mov ($AH[0],&DWP(0,"esp"));
  1017. &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
  1018. #&mov ("edi", &DWP(8,"esp"));
  1019. &mov ("ecx",&DWP(12,"esp"));
  1020. &add ($AH[0],&DWP(0,"esi"));
  1021. &add ($AH[1],&DWP(4,"esi"));
  1022. &add ("edi",&DWP(8,"esi"));
  1023. &add ("ecx",&DWP(12,"esi"));
  1024. &mov (&DWP(0,"esi"),$AH[0]);
  1025. &mov (&DWP(4,"esi"),$AH[1]);
  1026. &mov (&DWP(8,"esi"),"edi");
  1027. &mov (&DWP(12,"esi"),"ecx");
  1028. #&mov (&DWP(0,"esp"),$AH[0]);
  1029. &mov (&DWP(4,"esp"),$AH[1]);
  1030. &xor ($AH[1],"edi"); # magic
  1031. &mov (&DWP(8,"esp"),"edi");
  1032. &mov (&DWP(12,"esp"),"ecx");
  1033. #&mov ($E,&DWP(16,"esp"));
  1034. &mov ("edi",&DWP(20,"esp"));
  1035. &mov ("ecx",&DWP(24,"esp"));
  1036. &add ($E,&DWP(16,"esi"));
  1037. &add ("edi",&DWP(20,"esi"));
  1038. &add ("ecx",&DWP(24,"esi"));
  1039. &mov (&DWP(16,"esi"),$E);
  1040. &mov (&DWP(20,"esi"),"edi");
  1041. &mov (&DWP(20,"esp"),"edi");
  1042. &mov ("edi",&DWP(28,"esp"));
  1043. &mov (&DWP(24,"esi"),"ecx");
  1044. #&mov (&DWP(16,"esp"),$E);
  1045. &add ("edi",&DWP(28,"esi"));
  1046. &mov (&DWP(24,"esp"),"ecx");
  1047. &mov (&DWP(28,"esi"),"edi");
  1048. &mov (&DWP(28,"esp"),"edi");
  1049. &mov ("edi",&DWP(96+4,"esp")); # inp
  1050. &vmovdqa ($t3,&QWP(64,$K256));
  1051. &sub ($K256,3*64); # rewind K
  1052. &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
  1053. &jb (&label("grand_avx"));
  1054. &mov ("esp",&DWP(96+12,"esp")); # restore sp
  1055. &vzeroall ();
  1056. &function_end_A();
  1057. if ($avx>1) {
  1058. sub bodyx_00_15 () { # +10%
  1059. (
  1060. '&rorx ("ecx",$E,6)',
  1061. '&rorx ("esi",$E,11)',
  1062. '&mov (&off($e),$E)', # save $E, modulo-scheduled
  1063. '&rorx ("edi",$E,25)',
  1064. '&xor ("ecx","esi")',
  1065. '&andn ("esi",$E,&off($g))',
  1066. '&xor ("ecx","edi")', # Sigma1(e)
  1067. '&and ($E,&off($f))',
  1068. '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
  1069. '&or ($E,"esi")', # T = Ch(e,f,g)
  1070. '&rorx ("edi",$AH[0],2)',
  1071. '&rorx ("esi",$AH[0],13)',
  1072. '&lea ($E,&DWP(0,$E,"ecx"))', # T += Sigma1(e)
  1073. '&rorx ("ecx",$AH[0],22)',
  1074. '&xor ("esi","edi")',
  1075. '&mov ("edi",&off($b))',
  1076. '&xor ("ecx","esi")', # Sigma0(a)
  1077. '&xor ($AH[0],"edi")', # a ^= b, (b^c) in next round
  1078. '&add ($E,&off($h))', # T += h
  1079. '&and ($AH[1],$AH[0])', # (b^c) &= (a^b)
  1080. '&add ($E,&DWP(32+4*($i&15),"esp"))', # T += K[i]+X[i]
  1081. '&xor ($AH[1],"edi")', # h = Maj(a,b,c) = Ch(a^b,c,b)
  1082. '&add ("ecx",$E)', # h += T
  1083. '&add ($E,&off($d))', # d += T
  1084. '&lea ($AH[1],&DWP(0,$AH[1],"ecx"));'. # h += Sigma0(a)
  1085. '@AH = reverse(@AH); $i++;' # rotate(a,h)
  1086. );
  1087. }
  1088. &set_label("AVX_BMI",32);
  1089. &lea ("esp",&DWP(-96,"esp"));
  1090. &vzeroall ();
  1091. # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
  1092. &mov ($AH[0],&DWP(0,"esi"));
  1093. &mov ($AH[1],&DWP(4,"esi"));
  1094. &mov ("ecx",&DWP(8,"esi"));
  1095. &mov ("edi",&DWP(12,"esi"));
  1096. #&mov (&DWP(0,"esp"),$AH[0]);
  1097. &mov (&DWP(4,"esp"),$AH[1]);
  1098. &xor ($AH[1],"ecx"); # magic
  1099. &mov (&DWP(8,"esp"),"ecx");
  1100. &mov (&DWP(12,"esp"),"edi");
  1101. &mov ($E,&DWP(16,"esi"));
  1102. &mov ("edi",&DWP(20,"esi"));
  1103. &mov ("ecx",&DWP(24,"esi"));
  1104. &mov ("esi",&DWP(28,"esi"));
  1105. #&mov (&DWP(16,"esp"),$E);
  1106. &mov (&DWP(20,"esp"),"edi");
  1107. &mov ("edi",&DWP(96+4,"esp")); # inp
  1108. &mov (&DWP(24,"esp"),"ecx");
  1109. &mov (&DWP(28,"esp"),"esi");
  1110. &vmovdqa ($t3,&QWP(256,$K256));
  1111. &jmp (&label("grand_avx_bmi"));
  1112. &set_label("grand_avx_bmi",32);
  1113. # load input, reverse byte order, add K256[0..15], save to stack
  1114. &vmovdqu (@X[0],&QWP(0,"edi"));
  1115. &vmovdqu (@X[1],&QWP(16,"edi"));
  1116. &vmovdqu (@X[2],&QWP(32,"edi"));
  1117. &vmovdqu (@X[3],&QWP(48,"edi"));
  1118. &add ("edi",64);
  1119. &vpshufb (@X[0],@X[0],$t3);
  1120. &mov (&DWP(96+4,"esp"),"edi");
  1121. &vpshufb (@X[1],@X[1],$t3);
  1122. &vpshufb (@X[2],@X[2],$t3);
  1123. &vpaddd ($t0,@X[0],&QWP(0,$K256));
  1124. &vpshufb (@X[3],@X[3],$t3);
  1125. &vpaddd ($t1,@X[1],&QWP(16,$K256));
  1126. &vpaddd ($t2,@X[2],&QWP(32,$K256));
  1127. &vpaddd ($t3,@X[3],&QWP(48,$K256));
  1128. &vmovdqa (&QWP(32+0,"esp"),$t0);
  1129. &vmovdqa (&QWP(32+16,"esp"),$t1);
  1130. &vmovdqa (&QWP(32+32,"esp"),$t2);
  1131. &vmovdqa (&QWP(32+48,"esp"),$t3);
  1132. &jmp (&label("avx_bmi_00_47"));
  1133. &set_label("avx_bmi_00_47",16);
  1134. &add ($K256,64);
  1135. for ($i=0,$j=0; $j<4; $j++) {
  1136. &AVX_00_47($j,\&bodyx_00_15,@X);
  1137. push(@X,shift(@X)); # rotate(@X)
  1138. }
  1139. &cmp (&DWP(16*$j,$K256),0x00010203);
  1140. &jne (&label("avx_bmi_00_47"));
  1141. for ($i=0; $i<16; ) {
  1142. foreach(bodyx_00_15()) { eval; }
  1143. }
  1144. &mov ("esi",&DWP(96,"esp")); #ctx
  1145. #&mov ($AH[0],&DWP(0,"esp"));
  1146. &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
  1147. #&mov ("edi", &DWP(8,"esp"));
  1148. &mov ("ecx",&DWP(12,"esp"));
  1149. &add ($AH[0],&DWP(0,"esi"));
  1150. &add ($AH[1],&DWP(4,"esi"));
  1151. &add ("edi",&DWP(8,"esi"));
  1152. &add ("ecx",&DWP(12,"esi"));
  1153. &mov (&DWP(0,"esi"),$AH[0]);
  1154. &mov (&DWP(4,"esi"),$AH[1]);
  1155. &mov (&DWP(8,"esi"),"edi");
  1156. &mov (&DWP(12,"esi"),"ecx");
  1157. #&mov (&DWP(0,"esp"),$AH[0]);
  1158. &mov (&DWP(4,"esp"),$AH[1]);
  1159. &xor ($AH[1],"edi"); # magic
  1160. &mov (&DWP(8,"esp"),"edi");
  1161. &mov (&DWP(12,"esp"),"ecx");
  1162. #&mov ($E,&DWP(16,"esp"));
  1163. &mov ("edi",&DWP(20,"esp"));
  1164. &mov ("ecx",&DWP(24,"esp"));
  1165. &add ($E,&DWP(16,"esi"));
  1166. &add ("edi",&DWP(20,"esi"));
  1167. &add ("ecx",&DWP(24,"esi"));
  1168. &mov (&DWP(16,"esi"),$E);
  1169. &mov (&DWP(20,"esi"),"edi");
  1170. &mov (&DWP(20,"esp"),"edi");
  1171. &mov ("edi",&DWP(28,"esp"));
  1172. &mov (&DWP(24,"esi"),"ecx");
  1173. #&mov (&DWP(16,"esp"),$E);
  1174. &add ("edi",&DWP(28,"esi"));
  1175. &mov (&DWP(24,"esp"),"ecx");
  1176. &mov (&DWP(28,"esi"),"edi");
  1177. &mov (&DWP(28,"esp"),"edi");
  1178. &mov ("edi",&DWP(96+4,"esp")); # inp
  1179. &vmovdqa ($t3,&QWP(64,$K256));
  1180. &sub ($K256,3*64); # rewind K
  1181. &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
  1182. &jb (&label("grand_avx_bmi"));
  1183. &mov ("esp",&DWP(96+12,"esp")); # restore sp
  1184. &vzeroall ();
  1185. &function_end_A();
  1186. }
  1187. }
  1188. }}}
  1189. &function_end_B("sha256_block_data_order");
  1190. &asm_finish();
  1191. close STDOUT;