2
0

chacha-x86.pl 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154
  1. #! /usr/bin/env perl
  2. # Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # January 2015
  17. #
  18. # ChaCha20 for x86.
  19. #
  20. # Performance in cycles per byte out of large buffer.
  21. #
  22. # 1xIALU/gcc 4xSSSE3
  23. # Pentium 17.5/+80%
  24. # PIII 14.2/+60%
  25. # P4 18.6/+84%
  26. # Core2 9.56/+89% 4.83
  27. # Westmere 9.50/+45% 3.35
  28. # Sandy Bridge 10.5/+47% 3.20
  29. # Haswell 8.15/+50% 2.83
  30. # Skylake 7.53/+22% 2.75
  31. # Silvermont 17.4/+36% 8.35
  32. # Goldmont 13.4/+40% 4.36
  33. # Sledgehammer 10.2/+54%
  34. # Bulldozer 13.4/+50% 4.38(*)
  35. #
  36. # (*) Bulldozer actually executes 4xXOP code path that delivers 3.55;
  37. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  38. push(@INC,"${dir}","${dir}../../perlasm");
  39. require "x86asm.pl";
  40. $output = pop and open STDOUT,">$output";
  41. &asm_init($ARGV[0],$ARGV[$#ARGV] eq "386");
  42. $xmm=$ymm=0;
  43. for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
  44. $ymm=1 if ($xmm &&
  45. `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  46. =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
  47. ($gasver=$1)>=2.19); # first version supporting AVX
  48. $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" &&
  49. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
  50. $1>=2.03); # first version supporting AVX
  51. $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" &&
  52. `ml 2>&1` =~ /Version ([0-9]+)\./ &&
  53. $1>=10); # first version supporting AVX
  54. $ymm=1 if ($xmm && !$ymm &&
  55. `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|based on LLVM) ([0-9]+\.[0-9]+)/ &&
  56. $2>=3.0); # first version supporting AVX
  57. $a="eax";
  58. ($b,$b_)=("ebx","ebp");
  59. ($c,$c_)=("ecx","esi");
  60. ($d,$d_)=("edx","edi");
  61. sub QUARTERROUND {
  62. my ($ai,$bi,$ci,$di,$i)=@_;
  63. my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next
  64. my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous
  65. # a b c d
  66. #
  67. # 0 4 8 12 < even round
  68. # 1 5 9 13
  69. # 2 6 10 14
  70. # 3 7 11 15
  71. # 0 5 10 15 < odd round
  72. # 1 6 11 12
  73. # 2 7 8 13
  74. # 3 4 9 14
  75. if ($i==0) {
  76. my $j=4;
  77. ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
  78. } elsif ($i==3) {
  79. my $j=0;
  80. ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
  81. } elsif ($i==4) {
  82. my $j=4;
  83. ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
  84. } elsif ($i==7) {
  85. my $j=0;
  86. ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
  87. }
  88. #&add ($a,$b); # see elsewhere
  89. &xor ($d,$a);
  90. &mov (&DWP(4*$cp,"esp"),$c_) if ($ai>0 && $ai<3);
  91. &rol ($d,16);
  92. &mov (&DWP(4*$bp,"esp"),$b_) if ($i!=0);
  93. &add ($c,$d);
  94. &mov ($c_,&DWP(4*$cn,"esp")) if ($ai>0 && $ai<3);
  95. &xor ($b,$c);
  96. &mov ($d_,&DWP(4*$dn,"esp")) if ($di!=$dn);
  97. &rol ($b,12);
  98. &mov ($b_,&DWP(4*$bn,"esp")) if ($i<7);
  99. &mov ($b_,&DWP(128,"esp")) if ($i==7); # loop counter
  100. &add ($a,$b);
  101. &xor ($d,$a);
  102. &mov (&DWP(4*$ai,"esp"),$a);
  103. &rol ($d,8);
  104. &mov ($a,&DWP(4*$an,"esp"));
  105. &add ($c,$d);
  106. &mov (&DWP(4*$di,"esp"),$d) if ($di!=$dn);
  107. &mov ($d_,$d) if ($di==$dn);
  108. &xor ($b,$c);
  109. &add ($a,$b_) if ($i<7); # elsewhere
  110. &rol ($b,7);
  111. ($b,$b_)=($b_,$b);
  112. ($c,$c_)=($c_,$c);
  113. ($d,$d_)=($d_,$d);
  114. }
  115. &static_label("ssse3_shortcut");
  116. &static_label("xop_shortcut");
  117. &static_label("ssse3_data");
  118. &static_label("pic_point");
  119. &function_begin("ChaCha20_ctr32");
  120. &xor ("eax","eax");
  121. &cmp ("eax",&wparam(2)); # len==0?
  122. &je (&label("no_data"));
  123. if ($xmm) {
  124. &call (&label("pic_point"));
  125. &set_label("pic_point");
  126. &blindpop("eax");
  127. &picmeup("ebp","OPENSSL_ia32cap_P","eax",&label("pic_point"));
  128. &test (&DWP(0,"ebp"),1<<24); # test FXSR bit
  129. &jz (&label("x86"));
  130. &test (&DWP(4,"ebp"),1<<9); # test SSSE3 bit
  131. &jz (&label("x86"));
  132. &jmp (&label("ssse3_shortcut"));
  133. &set_label("x86");
  134. }
  135. &mov ("esi",&wparam(3)); # key
  136. &mov ("edi",&wparam(4)); # counter and nonce
  137. &stack_push(33);
  138. &mov ("eax",&DWP(4*0,"esi")); # copy key
  139. &mov ("ebx",&DWP(4*1,"esi"));
  140. &mov ("ecx",&DWP(4*2,"esi"));
  141. &mov ("edx",&DWP(4*3,"esi"));
  142. &mov (&DWP(64+4*4,"esp"),"eax");
  143. &mov (&DWP(64+4*5,"esp"),"ebx");
  144. &mov (&DWP(64+4*6,"esp"),"ecx");
  145. &mov (&DWP(64+4*7,"esp"),"edx");
  146. &mov ("eax",&DWP(4*4,"esi"));
  147. &mov ("ebx",&DWP(4*5,"esi"));
  148. &mov ("ecx",&DWP(4*6,"esi"));
  149. &mov ("edx",&DWP(4*7,"esi"));
  150. &mov (&DWP(64+4*8,"esp"),"eax");
  151. &mov (&DWP(64+4*9,"esp"),"ebx");
  152. &mov (&DWP(64+4*10,"esp"),"ecx");
  153. &mov (&DWP(64+4*11,"esp"),"edx");
  154. &mov ("eax",&DWP(4*0,"edi")); # copy counter and nonce
  155. &mov ("ebx",&DWP(4*1,"edi"));
  156. &mov ("ecx",&DWP(4*2,"edi"));
  157. &mov ("edx",&DWP(4*3,"edi"));
  158. &sub ("eax",1);
  159. &mov (&DWP(64+4*12,"esp"),"eax");
  160. &mov (&DWP(64+4*13,"esp"),"ebx");
  161. &mov (&DWP(64+4*14,"esp"),"ecx");
  162. &mov (&DWP(64+4*15,"esp"),"edx");
  163. &jmp (&label("entry"));
  164. &set_label("outer_loop",16);
  165. &mov (&wparam(1),$b); # save input
  166. &mov (&wparam(0),$a); # save output
  167. &mov (&wparam(2),$c); # save len
  168. &set_label("entry");
  169. &mov ($a,0x61707865);
  170. &mov (&DWP(4*1,"esp"),0x3320646e);
  171. &mov (&DWP(4*2,"esp"),0x79622d32);
  172. &mov (&DWP(4*3,"esp"),0x6b206574);
  173. &mov ($b, &DWP(64+4*5,"esp")); # copy key material
  174. &mov ($b_,&DWP(64+4*6,"esp"));
  175. &mov ($c, &DWP(64+4*10,"esp"));
  176. &mov ($c_,&DWP(64+4*11,"esp"));
  177. &mov ($d, &DWP(64+4*13,"esp"));
  178. &mov ($d_,&DWP(64+4*14,"esp"));
  179. &mov (&DWP(4*5,"esp"),$b);
  180. &mov (&DWP(4*6,"esp"),$b_);
  181. &mov (&DWP(4*10,"esp"),$c);
  182. &mov (&DWP(4*11,"esp"),$c_);
  183. &mov (&DWP(4*13,"esp"),$d);
  184. &mov (&DWP(4*14,"esp"),$d_);
  185. &mov ($b, &DWP(64+4*7,"esp"));
  186. &mov ($d_,&DWP(64+4*15,"esp"));
  187. &mov ($d, &DWP(64+4*12,"esp"));
  188. &mov ($b_,&DWP(64+4*4,"esp"));
  189. &mov ($c, &DWP(64+4*8,"esp"));
  190. &mov ($c_,&DWP(64+4*9,"esp"));
  191. &add ($d,1); # counter value
  192. &mov (&DWP(4*7,"esp"),$b);
  193. &mov (&DWP(4*15,"esp"),$d_);
  194. &mov (&DWP(64+4*12,"esp"),$d); # save counter value
  195. &mov ($b,10); # loop counter
  196. &jmp (&label("loop"));
  197. &set_label("loop",16);
  198. &add ($a,$b_); # elsewhere
  199. &mov (&DWP(128,"esp"),$b); # save loop counter
  200. &mov ($b,$b_);
  201. &QUARTERROUND(0, 4, 8, 12, 0);
  202. &QUARTERROUND(1, 5, 9, 13, 1);
  203. &QUARTERROUND(2, 6,10, 14, 2);
  204. &QUARTERROUND(3, 7,11, 15, 3);
  205. &QUARTERROUND(0, 5,10, 15, 4);
  206. &QUARTERROUND(1, 6,11, 12, 5);
  207. &QUARTERROUND(2, 7, 8, 13, 6);
  208. &QUARTERROUND(3, 4, 9, 14, 7);
  209. &dec ($b);
  210. &jnz (&label("loop"));
  211. &mov ($b,&wparam(2)); # load len
  212. &add ($a,0x61707865); # accumulate key material
  213. &add ($b_,&DWP(64+4*4,"esp"));
  214. &add ($c, &DWP(64+4*8,"esp"));
  215. &add ($c_,&DWP(64+4*9,"esp"));
  216. &cmp ($b,64);
  217. &jb (&label("tail"));
  218. &mov ($b,&wparam(1)); # load input pointer
  219. &add ($d, &DWP(64+4*12,"esp"));
  220. &add ($d_,&DWP(64+4*14,"esp"));
  221. &xor ($a, &DWP(4*0,$b)); # xor with input
  222. &xor ($b_,&DWP(4*4,$b));
  223. &mov (&DWP(4*0,"esp"),$a);
  224. &mov ($a,&wparam(0)); # load output pointer
  225. &xor ($c, &DWP(4*8,$b));
  226. &xor ($c_,&DWP(4*9,$b));
  227. &xor ($d, &DWP(4*12,$b));
  228. &xor ($d_,&DWP(4*14,$b));
  229. &mov (&DWP(4*4,$a),$b_); # write output
  230. &mov (&DWP(4*8,$a),$c);
  231. &mov (&DWP(4*9,$a),$c_);
  232. &mov (&DWP(4*12,$a),$d);
  233. &mov (&DWP(4*14,$a),$d_);
  234. &mov ($b_,&DWP(4*1,"esp"));
  235. &mov ($c, &DWP(4*2,"esp"));
  236. &mov ($c_,&DWP(4*3,"esp"));
  237. &mov ($d, &DWP(4*5,"esp"));
  238. &mov ($d_,&DWP(4*6,"esp"));
  239. &add ($b_,0x3320646e); # accumulate key material
  240. &add ($c, 0x79622d32);
  241. &add ($c_,0x6b206574);
  242. &add ($d, &DWP(64+4*5,"esp"));
  243. &add ($d_,&DWP(64+4*6,"esp"));
  244. &xor ($b_,&DWP(4*1,$b));
  245. &xor ($c, &DWP(4*2,$b));
  246. &xor ($c_,&DWP(4*3,$b));
  247. &xor ($d, &DWP(4*5,$b));
  248. &xor ($d_,&DWP(4*6,$b));
  249. &mov (&DWP(4*1,$a),$b_);
  250. &mov (&DWP(4*2,$a),$c);
  251. &mov (&DWP(4*3,$a),$c_);
  252. &mov (&DWP(4*5,$a),$d);
  253. &mov (&DWP(4*6,$a),$d_);
  254. &mov ($b_,&DWP(4*7,"esp"));
  255. &mov ($c, &DWP(4*10,"esp"));
  256. &mov ($c_,&DWP(4*11,"esp"));
  257. &mov ($d, &DWP(4*13,"esp"));
  258. &mov ($d_,&DWP(4*15,"esp"));
  259. &add ($b_,&DWP(64+4*7,"esp"));
  260. &add ($c, &DWP(64+4*10,"esp"));
  261. &add ($c_,&DWP(64+4*11,"esp"));
  262. &add ($d, &DWP(64+4*13,"esp"));
  263. &add ($d_,&DWP(64+4*15,"esp"));
  264. &xor ($b_,&DWP(4*7,$b));
  265. &xor ($c, &DWP(4*10,$b));
  266. &xor ($c_,&DWP(4*11,$b));
  267. &xor ($d, &DWP(4*13,$b));
  268. &xor ($d_,&DWP(4*15,$b));
  269. &lea ($b,&DWP(4*16,$b));
  270. &mov (&DWP(4*7,$a),$b_);
  271. &mov ($b_,&DWP(4*0,"esp"));
  272. &mov (&DWP(4*10,$a),$c);
  273. &mov ($c,&wparam(2)); # len
  274. &mov (&DWP(4*11,$a),$c_);
  275. &mov (&DWP(4*13,$a),$d);
  276. &mov (&DWP(4*15,$a),$d_);
  277. &mov (&DWP(4*0,$a),$b_);
  278. &lea ($a,&DWP(4*16,$a));
  279. &sub ($c,64);
  280. &jnz (&label("outer_loop"));
  281. &jmp (&label("done"));
  282. &set_label("tail");
  283. &add ($d, &DWP(64+4*12,"esp"));
  284. &add ($d_,&DWP(64+4*14,"esp"));
  285. &mov (&DWP(4*0,"esp"),$a);
  286. &mov (&DWP(4*4,"esp"),$b_);
  287. &mov (&DWP(4*8,"esp"),$c);
  288. &mov (&DWP(4*9,"esp"),$c_);
  289. &mov (&DWP(4*12,"esp"),$d);
  290. &mov (&DWP(4*14,"esp"),$d_);
  291. &mov ($b_,&DWP(4*1,"esp"));
  292. &mov ($c, &DWP(4*2,"esp"));
  293. &mov ($c_,&DWP(4*3,"esp"));
  294. &mov ($d, &DWP(4*5,"esp"));
  295. &mov ($d_,&DWP(4*6,"esp"));
  296. &add ($b_,0x3320646e); # accumulate key material
  297. &add ($c, 0x79622d32);
  298. &add ($c_,0x6b206574);
  299. &add ($d, &DWP(64+4*5,"esp"));
  300. &add ($d_,&DWP(64+4*6,"esp"));
  301. &mov (&DWP(4*1,"esp"),$b_);
  302. &mov (&DWP(4*2,"esp"),$c);
  303. &mov (&DWP(4*3,"esp"),$c_);
  304. &mov (&DWP(4*5,"esp"),$d);
  305. &mov (&DWP(4*6,"esp"),$d_);
  306. &mov ($b_,&DWP(4*7,"esp"));
  307. &mov ($c, &DWP(4*10,"esp"));
  308. &mov ($c_,&DWP(4*11,"esp"));
  309. &mov ($d, &DWP(4*13,"esp"));
  310. &mov ($d_,&DWP(4*15,"esp"));
  311. &add ($b_,&DWP(64+4*7,"esp"));
  312. &add ($c, &DWP(64+4*10,"esp"));
  313. &add ($c_,&DWP(64+4*11,"esp"));
  314. &add ($d, &DWP(64+4*13,"esp"));
  315. &add ($d_,&DWP(64+4*15,"esp"));
  316. &mov (&DWP(4*7,"esp"),$b_);
  317. &mov ($b_,&wparam(1)); # load input
  318. &mov (&DWP(4*10,"esp"),$c);
  319. &mov ($c,&wparam(0)); # load output
  320. &mov (&DWP(4*11,"esp"),$c_);
  321. &xor ($c_,$c_);
  322. &mov (&DWP(4*13,"esp"),$d);
  323. &mov (&DWP(4*15,"esp"),$d_);
  324. &xor ("eax","eax");
  325. &xor ("edx","edx");
  326. &set_label("tail_loop");
  327. &movb ("al",&BP(0,$c_,$b_));
  328. &movb ("dl",&BP(0,"esp",$c_));
  329. &lea ($c_,&DWP(1,$c_));
  330. &xor ("al","dl");
  331. &mov (&BP(-1,$c,$c_),"al");
  332. &dec ($b);
  333. &jnz (&label("tail_loop"));
  334. &set_label("done");
  335. &stack_pop(33);
  336. &set_label("no_data");
  337. &function_end("ChaCha20_ctr32");
  338. if ($xmm) {
  339. my ($xa,$xa_,$xb,$xb_,$xc,$xc_,$xd,$xd_)=map("xmm$_",(0..7));
  340. my ($out,$inp,$len)=("edi","esi","ecx");
  341. sub QUARTERROUND_SSSE3 {
  342. my ($ai,$bi,$ci,$di,$i)=@_;
  343. my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next
  344. my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous
  345. # a b c d
  346. #
  347. # 0 4 8 12 < even round
  348. # 1 5 9 13
  349. # 2 6 10 14
  350. # 3 7 11 15
  351. # 0 5 10 15 < odd round
  352. # 1 6 11 12
  353. # 2 7 8 13
  354. # 3 4 9 14
  355. if ($i==0) {
  356. my $j=4;
  357. ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
  358. } elsif ($i==3) {
  359. my $j=0;
  360. ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
  361. } elsif ($i==4) {
  362. my $j=4;
  363. ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
  364. } elsif ($i==7) {
  365. my $j=0;
  366. ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
  367. }
  368. #&paddd ($xa,$xb); # see elsewhere
  369. #&pxor ($xd,$xa); # see elsewhere
  370. &movdqa(&QWP(16*$cp-128,"ebx"),$xc_) if ($ai>0 && $ai<3);
  371. &pshufb ($xd,&QWP(0,"eax")); # rot16
  372. &movdqa(&QWP(16*$bp-128,"ebx"),$xb_) if ($i!=0);
  373. &paddd ($xc,$xd);
  374. &movdqa($xc_,&QWP(16*$cn-128,"ebx")) if ($ai>0 && $ai<3);
  375. &pxor ($xb,$xc);
  376. &movdqa($xb_,&QWP(16*$bn-128,"ebx")) if ($i<7);
  377. &movdqa ($xa_,$xb); # borrow as temporary
  378. &pslld ($xb,12);
  379. &psrld ($xa_,20);
  380. &por ($xb,$xa_);
  381. &movdqa($xa_,&QWP(16*$an-128,"ebx"));
  382. &paddd ($xa,$xb);
  383. &movdqa($xd_,&QWP(16*$dn-128,"ebx")) if ($di!=$dn);
  384. &pxor ($xd,$xa);
  385. &movdqa (&QWP(16*$ai-128,"ebx"),$xa);
  386. &pshufb ($xd,&QWP(16,"eax")); # rot8
  387. &paddd ($xc,$xd);
  388. &movdqa (&QWP(16*$di-128,"ebx"),$xd) if ($di!=$dn);
  389. &movdqa ($xd_,$xd) if ($di==$dn);
  390. &pxor ($xb,$xc);
  391. &paddd ($xa_,$xb_) if ($i<7); # elsewhere
  392. &movdqa ($xa,$xb); # borrow as temporary
  393. &pslld ($xb,7);
  394. &psrld ($xa,25);
  395. &pxor ($xd_,$xa_) if ($i<7); # elsewhere
  396. &por ($xb,$xa);
  397. ($xa,$xa_)=($xa_,$xa);
  398. ($xb,$xb_)=($xb_,$xb);
  399. ($xc,$xc_)=($xc_,$xc);
  400. ($xd,$xd_)=($xd_,$xd);
  401. }
  402. &function_begin("ChaCha20_ssse3");
  403. &set_label("ssse3_shortcut");
  404. if ($ymm) {
  405. &test (&DWP(4,"ebp"),1<<11); # test XOP bit
  406. &jnz (&label("xop_shortcut"));
  407. }
  408. &mov ($out,&wparam(0));
  409. &mov ($inp,&wparam(1));
  410. &mov ($len,&wparam(2));
  411. &mov ("edx",&wparam(3)); # key
  412. &mov ("ebx",&wparam(4)); # counter and nonce
  413. &mov ("ebp","esp");
  414. &stack_push (131);
  415. &and ("esp",-64);
  416. &mov (&DWP(512,"esp"),"ebp");
  417. &lea ("eax",&DWP(&label("ssse3_data")."-".
  418. &label("pic_point"),"eax"));
  419. &movdqu ("xmm3",&QWP(0,"ebx")); # counter and nonce
  420. if (defined($gasver) && $gasver>=2.17) { # even though we encode
  421. # pshufb manually, we
  422. # handle only register
  423. # operands, while this
  424. # segment uses memory
  425. # operand...
  426. &cmp ($len,64*4);
  427. &jb (&label("1x"));
  428. &mov (&DWP(512+4,"esp"),"edx"); # offload pointers
  429. &mov (&DWP(512+8,"esp"),"ebx");
  430. &sub ($len,64*4); # bias len
  431. &lea ("ebp",&DWP(256+128,"esp")); # size optimization
  432. &movdqu ("xmm7",&QWP(0,"edx")); # key
  433. &pshufd ("xmm0","xmm3",0x00);
  434. &pshufd ("xmm1","xmm3",0x55);
  435. &pshufd ("xmm2","xmm3",0xaa);
  436. &pshufd ("xmm3","xmm3",0xff);
  437. &paddd ("xmm0",&QWP(16*3,"eax")); # fix counters
  438. &pshufd ("xmm4","xmm7",0x00);
  439. &pshufd ("xmm5","xmm7",0x55);
  440. &psubd ("xmm0",&QWP(16*4,"eax"));
  441. &pshufd ("xmm6","xmm7",0xaa);
  442. &pshufd ("xmm7","xmm7",0xff);
  443. &movdqa (&QWP(16*12-128,"ebp"),"xmm0");
  444. &movdqa (&QWP(16*13-128,"ebp"),"xmm1");
  445. &movdqa (&QWP(16*14-128,"ebp"),"xmm2");
  446. &movdqa (&QWP(16*15-128,"ebp"),"xmm3");
  447. &movdqu ("xmm3",&QWP(16,"edx")); # key
  448. &movdqa (&QWP(16*4-128,"ebp"),"xmm4");
  449. &movdqa (&QWP(16*5-128,"ebp"),"xmm5");
  450. &movdqa (&QWP(16*6-128,"ebp"),"xmm6");
  451. &movdqa (&QWP(16*7-128,"ebp"),"xmm7");
  452. &movdqa ("xmm7",&QWP(16*2,"eax")); # sigma
  453. &lea ("ebx",&DWP(128,"esp")); # size optimization
  454. &pshufd ("xmm0","xmm3",0x00);
  455. &pshufd ("xmm1","xmm3",0x55);
  456. &pshufd ("xmm2","xmm3",0xaa);
  457. &pshufd ("xmm3","xmm3",0xff);
  458. &pshufd ("xmm4","xmm7",0x00);
  459. &pshufd ("xmm5","xmm7",0x55);
  460. &pshufd ("xmm6","xmm7",0xaa);
  461. &pshufd ("xmm7","xmm7",0xff);
  462. &movdqa (&QWP(16*8-128,"ebp"),"xmm0");
  463. &movdqa (&QWP(16*9-128,"ebp"),"xmm1");
  464. &movdqa (&QWP(16*10-128,"ebp"),"xmm2");
  465. &movdqa (&QWP(16*11-128,"ebp"),"xmm3");
  466. &movdqa (&QWP(16*0-128,"ebp"),"xmm4");
  467. &movdqa (&QWP(16*1-128,"ebp"),"xmm5");
  468. &movdqa (&QWP(16*2-128,"ebp"),"xmm6");
  469. &movdqa (&QWP(16*3-128,"ebp"),"xmm7");
  470. &lea ($inp,&DWP(128,$inp)); # size optimization
  471. &lea ($out,&DWP(128,$out)); # size optimization
  472. &jmp (&label("outer_loop"));
  473. &set_label("outer_loop",16);
  474. #&movdqa ("xmm0",&QWP(16*0-128,"ebp")); # copy key material
  475. &movdqa ("xmm1",&QWP(16*1-128,"ebp"));
  476. &movdqa ("xmm2",&QWP(16*2-128,"ebp"));
  477. &movdqa ("xmm3",&QWP(16*3-128,"ebp"));
  478. #&movdqa ("xmm4",&QWP(16*4-128,"ebp"));
  479. &movdqa ("xmm5",&QWP(16*5-128,"ebp"));
  480. &movdqa ("xmm6",&QWP(16*6-128,"ebp"));
  481. &movdqa ("xmm7",&QWP(16*7-128,"ebp"));
  482. #&movdqa (&QWP(16*0-128,"ebx"),"xmm0");
  483. &movdqa (&QWP(16*1-128,"ebx"),"xmm1");
  484. &movdqa (&QWP(16*2-128,"ebx"),"xmm2");
  485. &movdqa (&QWP(16*3-128,"ebx"),"xmm3");
  486. #&movdqa (&QWP(16*4-128,"ebx"),"xmm4");
  487. &movdqa (&QWP(16*5-128,"ebx"),"xmm5");
  488. &movdqa (&QWP(16*6-128,"ebx"),"xmm6");
  489. &movdqa (&QWP(16*7-128,"ebx"),"xmm7");
  490. #&movdqa ("xmm0",&QWP(16*8-128,"ebp"));
  491. #&movdqa ("xmm1",&QWP(16*9-128,"ebp"));
  492. &movdqa ("xmm2",&QWP(16*10-128,"ebp"));
  493. &movdqa ("xmm3",&QWP(16*11-128,"ebp"));
  494. &movdqa ("xmm4",&QWP(16*12-128,"ebp"));
  495. &movdqa ("xmm5",&QWP(16*13-128,"ebp"));
  496. &movdqa ("xmm6",&QWP(16*14-128,"ebp"));
  497. &movdqa ("xmm7",&QWP(16*15-128,"ebp"));
  498. &paddd ("xmm4",&QWP(16*4,"eax")); # counter value
  499. #&movdqa (&QWP(16*8-128,"ebx"),"xmm0");
  500. #&movdqa (&QWP(16*9-128,"ebx"),"xmm1");
  501. &movdqa (&QWP(16*10-128,"ebx"),"xmm2");
  502. &movdqa (&QWP(16*11-128,"ebx"),"xmm3");
  503. &movdqa (&QWP(16*12-128,"ebx"),"xmm4");
  504. &movdqa (&QWP(16*13-128,"ebx"),"xmm5");
  505. &movdqa (&QWP(16*14-128,"ebx"),"xmm6");
  506. &movdqa (&QWP(16*15-128,"ebx"),"xmm7");
  507. &movdqa (&QWP(16*12-128,"ebp"),"xmm4"); # save counter value
  508. &movdqa ($xa, &QWP(16*0-128,"ebp"));
  509. &movdqa ($xd, "xmm4");
  510. &movdqa ($xb_,&QWP(16*4-128,"ebp"));
  511. &movdqa ($xc, &QWP(16*8-128,"ebp"));
  512. &movdqa ($xc_,&QWP(16*9-128,"ebp"));
  513. &mov ("edx",10); # loop counter
  514. &nop ();
  515. &set_label("loop",16);
  516. &paddd ($xa,$xb_); # elsewhere
  517. &movdqa ($xb,$xb_);
  518. &pxor ($xd,$xa); # elsewhere
  519. &QUARTERROUND_SSSE3(0, 4, 8, 12, 0);
  520. &QUARTERROUND_SSSE3(1, 5, 9, 13, 1);
  521. &QUARTERROUND_SSSE3(2, 6,10, 14, 2);
  522. &QUARTERROUND_SSSE3(3, 7,11, 15, 3);
  523. &QUARTERROUND_SSSE3(0, 5,10, 15, 4);
  524. &QUARTERROUND_SSSE3(1, 6,11, 12, 5);
  525. &QUARTERROUND_SSSE3(2, 7, 8, 13, 6);
  526. &QUARTERROUND_SSSE3(3, 4, 9, 14, 7);
  527. &dec ("edx");
  528. &jnz (&label("loop"));
  529. &movdqa (&QWP(16*4-128,"ebx"),$xb_);
  530. &movdqa (&QWP(16*8-128,"ebx"),$xc);
  531. &movdqa (&QWP(16*9-128,"ebx"),$xc_);
  532. &movdqa (&QWP(16*12-128,"ebx"),$xd);
  533. &movdqa (&QWP(16*14-128,"ebx"),$xd_);
  534. my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7));
  535. #&movdqa ($xa0,&QWP(16*0-128,"ebx")); # it's there
  536. &movdqa ($xa1,&QWP(16*1-128,"ebx"));
  537. &movdqa ($xa2,&QWP(16*2-128,"ebx"));
  538. &movdqa ($xa3,&QWP(16*3-128,"ebx"));
  539. for($i=0;$i<256;$i+=64) {
  540. &paddd ($xa0,&QWP($i+16*0-128,"ebp")); # accumulate key material
  541. &paddd ($xa1,&QWP($i+16*1-128,"ebp"));
  542. &paddd ($xa2,&QWP($i+16*2-128,"ebp"));
  543. &paddd ($xa3,&QWP($i+16*3-128,"ebp"));
  544. &movdqa ($xt2,$xa0); # "de-interlace" data
  545. &punpckldq ($xa0,$xa1);
  546. &movdqa ($xt3,$xa2);
  547. &punpckldq ($xa2,$xa3);
  548. &punpckhdq ($xt2,$xa1);
  549. &punpckhdq ($xt3,$xa3);
  550. &movdqa ($xa1,$xa0);
  551. &punpcklqdq ($xa0,$xa2); # "a0"
  552. &movdqa ($xa3,$xt2);
  553. &punpcklqdq ($xt2,$xt3); # "a2"
  554. &punpckhqdq ($xa1,$xa2); # "a1"
  555. &punpckhqdq ($xa3,$xt3); # "a3"
  556. #($xa2,$xt2)=($xt2,$xa2);
  557. &movdqu ($xt0,&QWP(64*0-128,$inp)); # load input
  558. &movdqu ($xt1,&QWP(64*1-128,$inp));
  559. &movdqu ($xa2,&QWP(64*2-128,$inp));
  560. &movdqu ($xt3,&QWP(64*3-128,$inp));
  561. &lea ($inp,&QWP($i<192?16:(64*4-16*3),$inp));
  562. &pxor ($xt0,$xa0);
  563. &movdqa ($xa0,&QWP($i+16*4-128,"ebx")) if ($i<192);
  564. &pxor ($xt1,$xa1);
  565. &movdqa ($xa1,&QWP($i+16*5-128,"ebx")) if ($i<192);
  566. &pxor ($xt2,$xa2);
  567. &movdqa ($xa2,&QWP($i+16*6-128,"ebx")) if ($i<192);
  568. &pxor ($xt3,$xa3);
  569. &movdqa ($xa3,&QWP($i+16*7-128,"ebx")) if ($i<192);
  570. &movdqu (&QWP(64*0-128,$out),$xt0); # store output
  571. &movdqu (&QWP(64*1-128,$out),$xt1);
  572. &movdqu (&QWP(64*2-128,$out),$xt2);
  573. &movdqu (&QWP(64*3-128,$out),$xt3);
  574. &lea ($out,&QWP($i<192?16:(64*4-16*3),$out));
  575. }
  576. &sub ($len,64*4);
  577. &jnc (&label("outer_loop"));
  578. &add ($len,64*4);
  579. &jz (&label("done"));
  580. &mov ("ebx",&DWP(512+8,"esp")); # restore pointers
  581. &lea ($inp,&DWP(-128,$inp));
  582. &mov ("edx",&DWP(512+4,"esp"));
  583. &lea ($out,&DWP(-128,$out));
  584. &movd ("xmm2",&DWP(16*12-128,"ebp")); # counter value
  585. &movdqu ("xmm3",&QWP(0,"ebx"));
  586. &paddd ("xmm2",&QWP(16*6,"eax")); # +four
  587. &pand ("xmm3",&QWP(16*7,"eax"));
  588. &por ("xmm3","xmm2"); # counter value
  589. }
  590. {
  591. my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("xmm$_",(0..7));
  592. sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round
  593. &paddd ($a,$b);
  594. &pxor ($d,$a);
  595. &pshufb ($d,$rot16);
  596. &paddd ($c,$d);
  597. &pxor ($b,$c);
  598. &movdqa ($t,$b);
  599. &psrld ($b,20);
  600. &pslld ($t,12);
  601. &por ($b,$t);
  602. &paddd ($a,$b);
  603. &pxor ($d,$a);
  604. &pshufb ($d,$rot24);
  605. &paddd ($c,$d);
  606. &pxor ($b,$c);
  607. &movdqa ($t,$b);
  608. &psrld ($b,25);
  609. &pslld ($t,7);
  610. &por ($b,$t);
  611. }
  612. &set_label("1x");
  613. &movdqa ($a,&QWP(16*2,"eax")); # sigma
  614. &movdqu ($b,&QWP(0,"edx"));
  615. &movdqu ($c,&QWP(16,"edx"));
  616. #&movdqu ($d,&QWP(0,"ebx")); # already loaded
  617. &movdqa ($rot16,&QWP(0,"eax"));
  618. &movdqa ($rot24,&QWP(16,"eax"));
  619. &mov (&DWP(16*3,"esp"),"ebp");
  620. &movdqa (&QWP(16*0,"esp"),$a);
  621. &movdqa (&QWP(16*1,"esp"),$b);
  622. &movdqa (&QWP(16*2,"esp"),$c);
  623. &movdqa (&QWP(16*3,"esp"),$d);
  624. &mov ("edx",10);
  625. &jmp (&label("loop1x"));
  626. &set_label("outer1x",16);
  627. &movdqa ($d,&QWP(16*5,"eax")); # one
  628. &movdqa ($a,&QWP(16*0,"esp"));
  629. &movdqa ($b,&QWP(16*1,"esp"));
  630. &movdqa ($c,&QWP(16*2,"esp"));
  631. &paddd ($d,&QWP(16*3,"esp"));
  632. &mov ("edx",10);
  633. &movdqa (&QWP(16*3,"esp"),$d);
  634. &jmp (&label("loop1x"));
  635. &set_label("loop1x",16);
  636. &SSSE3ROUND();
  637. &pshufd ($c,$c,0b01001110);
  638. &pshufd ($b,$b,0b00111001);
  639. &pshufd ($d,$d,0b10010011);
  640. &nop ();
  641. &SSSE3ROUND();
  642. &pshufd ($c,$c,0b01001110);
  643. &pshufd ($b,$b,0b10010011);
  644. &pshufd ($d,$d,0b00111001);
  645. &dec ("edx");
  646. &jnz (&label("loop1x"));
  647. &paddd ($a,&QWP(16*0,"esp"));
  648. &paddd ($b,&QWP(16*1,"esp"));
  649. &paddd ($c,&QWP(16*2,"esp"));
  650. &paddd ($d,&QWP(16*3,"esp"));
  651. &cmp ($len,64);
  652. &jb (&label("tail"));
  653. &movdqu ($t,&QWP(16*0,$inp));
  654. &movdqu ($t1,&QWP(16*1,$inp));
  655. &pxor ($a,$t); # xor with input
  656. &movdqu ($t,&QWP(16*2,$inp));
  657. &pxor ($b,$t1);
  658. &movdqu ($t1,&QWP(16*3,$inp));
  659. &pxor ($c,$t);
  660. &pxor ($d,$t1);
  661. &lea ($inp,&DWP(16*4,$inp)); # inp+=64
  662. &movdqu (&QWP(16*0,$out),$a); # write output
  663. &movdqu (&QWP(16*1,$out),$b);
  664. &movdqu (&QWP(16*2,$out),$c);
  665. &movdqu (&QWP(16*3,$out),$d);
  666. &lea ($out,&DWP(16*4,$out)); # inp+=64
  667. &sub ($len,64);
  668. &jnz (&label("outer1x"));
  669. &jmp (&label("done"));
  670. &set_label("tail");
  671. &movdqa (&QWP(16*0,"esp"),$a);
  672. &movdqa (&QWP(16*1,"esp"),$b);
  673. &movdqa (&QWP(16*2,"esp"),$c);
  674. &movdqa (&QWP(16*3,"esp"),$d);
  675. &xor ("eax","eax");
  676. &xor ("edx","edx");
  677. &xor ("ebp","ebp");
  678. &set_label("tail_loop");
  679. &movb ("al",&BP(0,"esp","ebp"));
  680. &movb ("dl",&BP(0,$inp,"ebp"));
  681. &lea ("ebp",&DWP(1,"ebp"));
  682. &xor ("al","dl");
  683. &movb (&BP(-1,$out,"ebp"),"al");
  684. &dec ($len);
  685. &jnz (&label("tail_loop"));
  686. }
  687. &set_label("done");
  688. &mov ("esp",&DWP(512,"esp"));
  689. &function_end("ChaCha20_ssse3");
  690. &align (64);
  691. &set_label("ssse3_data");
  692. &data_byte(0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd);
  693. &data_byte(0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe);
  694. &data_word(0x61707865,0x3320646e,0x79622d32,0x6b206574);
  695. &data_word(0,1,2,3);
  696. &data_word(4,4,4,4);
  697. &data_word(1,0,0,0);
  698. &data_word(4,0,0,0);
  699. &data_word(0,-1,-1,-1);
  700. &align (64);
  701. }
  702. &asciz ("ChaCha20 for x86, CRYPTOGAMS by <appro\@openssl.org>");
  703. if ($ymm) {
  704. my ($xa,$xa_,$xb,$xb_,$xc,$xc_,$xd,$xd_)=map("xmm$_",(0..7));
  705. my ($out,$inp,$len)=("edi","esi","ecx");
  706. sub QUARTERROUND_XOP {
  707. my ($ai,$bi,$ci,$di,$i)=@_;
  708. my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next
  709. my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous
  710. # a b c d
  711. #
  712. # 0 4 8 12 < even round
  713. # 1 5 9 13
  714. # 2 6 10 14
  715. # 3 7 11 15
  716. # 0 5 10 15 < odd round
  717. # 1 6 11 12
  718. # 2 7 8 13
  719. # 3 4 9 14
  720. if ($i==0) {
  721. my $j=4;
  722. ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
  723. } elsif ($i==3) {
  724. my $j=0;
  725. ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
  726. } elsif ($i==4) {
  727. my $j=4;
  728. ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
  729. } elsif ($i==7) {
  730. my $j=0;
  731. ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
  732. }
  733. #&vpaddd ($xa,$xa,$xb); # see elsewhere
  734. #&vpxor ($xd,$xd,$xa); # see elsewhere
  735. &vmovdqa (&QWP(16*$cp-128,"ebx"),$xc_) if ($ai>0 && $ai<3);
  736. &vprotd ($xd,$xd,16);
  737. &vmovdqa (&QWP(16*$bp-128,"ebx"),$xb_) if ($i!=0);
  738. &vpaddd ($xc,$xc,$xd);
  739. &vmovdqa ($xc_,&QWP(16*$cn-128,"ebx")) if ($ai>0 && $ai<3);
  740. &vpxor ($xb,$i!=0?$xb:$xb_,$xc);
  741. &vmovdqa ($xa_,&QWP(16*$an-128,"ebx"));
  742. &vprotd ($xb,$xb,12);
  743. &vmovdqa ($xb_,&QWP(16*$bn-128,"ebx")) if ($i<7);
  744. &vpaddd ($xa,$xa,$xb);
  745. &vmovdqa ($xd_,&QWP(16*$dn-128,"ebx")) if ($di!=$dn);
  746. &vpxor ($xd,$xd,$xa);
  747. &vpaddd ($xa_,$xa_,$xb_) if ($i<7); # elsewhere
  748. &vprotd ($xd,$xd,8);
  749. &vmovdqa (&QWP(16*$ai-128,"ebx"),$xa);
  750. &vpaddd ($xc,$xc,$xd);
  751. &vmovdqa (&QWP(16*$di-128,"ebx"),$xd) if ($di!=$dn);
  752. &vpxor ($xb,$xb,$xc);
  753. &vpxor ($xd_,$di==$dn?$xd:$xd_,$xa_) if ($i<7); # elsewhere
  754. &vprotd ($xb,$xb,7);
  755. ($xa,$xa_)=($xa_,$xa);
  756. ($xb,$xb_)=($xb_,$xb);
  757. ($xc,$xc_)=($xc_,$xc);
  758. ($xd,$xd_)=($xd_,$xd);
  759. }
  760. &function_begin("ChaCha20_xop");
  761. &set_label("xop_shortcut");
  762. &mov ($out,&wparam(0));
  763. &mov ($inp,&wparam(1));
  764. &mov ($len,&wparam(2));
  765. &mov ("edx",&wparam(3)); # key
  766. &mov ("ebx",&wparam(4)); # counter and nonce
  767. &vzeroupper ();
  768. &mov ("ebp","esp");
  769. &stack_push (131);
  770. &and ("esp",-64);
  771. &mov (&DWP(512,"esp"),"ebp");
  772. &lea ("eax",&DWP(&label("ssse3_data")."-".
  773. &label("pic_point"),"eax"));
  774. &vmovdqu ("xmm3",&QWP(0,"ebx")); # counter and nonce
  775. &cmp ($len,64*4);
  776. &jb (&label("1x"));
  777. &mov (&DWP(512+4,"esp"),"edx"); # offload pointers
  778. &mov (&DWP(512+8,"esp"),"ebx");
  779. &sub ($len,64*4); # bias len
  780. &lea ("ebp",&DWP(256+128,"esp")); # size optimization
  781. &vmovdqu ("xmm7",&QWP(0,"edx")); # key
  782. &vpshufd ("xmm0","xmm3",0x00);
  783. &vpshufd ("xmm1","xmm3",0x55);
  784. &vpshufd ("xmm2","xmm3",0xaa);
  785. &vpshufd ("xmm3","xmm3",0xff);
  786. &vpaddd ("xmm0","xmm0",&QWP(16*3,"eax")); # fix counters
  787. &vpshufd ("xmm4","xmm7",0x00);
  788. &vpshufd ("xmm5","xmm7",0x55);
  789. &vpsubd ("xmm0","xmm0",&QWP(16*4,"eax"));
  790. &vpshufd ("xmm6","xmm7",0xaa);
  791. &vpshufd ("xmm7","xmm7",0xff);
  792. &vmovdqa (&QWP(16*12-128,"ebp"),"xmm0");
  793. &vmovdqa (&QWP(16*13-128,"ebp"),"xmm1");
  794. &vmovdqa (&QWP(16*14-128,"ebp"),"xmm2");
  795. &vmovdqa (&QWP(16*15-128,"ebp"),"xmm3");
  796. &vmovdqu ("xmm3",&QWP(16,"edx")); # key
  797. &vmovdqa (&QWP(16*4-128,"ebp"),"xmm4");
  798. &vmovdqa (&QWP(16*5-128,"ebp"),"xmm5");
  799. &vmovdqa (&QWP(16*6-128,"ebp"),"xmm6");
  800. &vmovdqa (&QWP(16*7-128,"ebp"),"xmm7");
  801. &vmovdqa ("xmm7",&QWP(16*2,"eax")); # sigma
  802. &lea ("ebx",&DWP(128,"esp")); # size optimization
  803. &vpshufd ("xmm0","xmm3",0x00);
  804. &vpshufd ("xmm1","xmm3",0x55);
  805. &vpshufd ("xmm2","xmm3",0xaa);
  806. &vpshufd ("xmm3","xmm3",0xff);
  807. &vpshufd ("xmm4","xmm7",0x00);
  808. &vpshufd ("xmm5","xmm7",0x55);
  809. &vpshufd ("xmm6","xmm7",0xaa);
  810. &vpshufd ("xmm7","xmm7",0xff);
  811. &vmovdqa (&QWP(16*8-128,"ebp"),"xmm0");
  812. &vmovdqa (&QWP(16*9-128,"ebp"),"xmm1");
  813. &vmovdqa (&QWP(16*10-128,"ebp"),"xmm2");
  814. &vmovdqa (&QWP(16*11-128,"ebp"),"xmm3");
  815. &vmovdqa (&QWP(16*0-128,"ebp"),"xmm4");
  816. &vmovdqa (&QWP(16*1-128,"ebp"),"xmm5");
  817. &vmovdqa (&QWP(16*2-128,"ebp"),"xmm6");
  818. &vmovdqa (&QWP(16*3-128,"ebp"),"xmm7");
  819. &lea ($inp,&DWP(128,$inp)); # size optimization
  820. &lea ($out,&DWP(128,$out)); # size optimization
  821. &jmp (&label("outer_loop"));
  822. &set_label("outer_loop",32);
  823. #&vmovdqa ("xmm0",&QWP(16*0-128,"ebp")); # copy key material
  824. &vmovdqa ("xmm1",&QWP(16*1-128,"ebp"));
  825. &vmovdqa ("xmm2",&QWP(16*2-128,"ebp"));
  826. &vmovdqa ("xmm3",&QWP(16*3-128,"ebp"));
  827. #&vmovdqa ("xmm4",&QWP(16*4-128,"ebp"));
  828. &vmovdqa ("xmm5",&QWP(16*5-128,"ebp"));
  829. &vmovdqa ("xmm6",&QWP(16*6-128,"ebp"));
  830. &vmovdqa ("xmm7",&QWP(16*7-128,"ebp"));
  831. #&vmovdqa (&QWP(16*0-128,"ebx"),"xmm0");
  832. &vmovdqa (&QWP(16*1-128,"ebx"),"xmm1");
  833. &vmovdqa (&QWP(16*2-128,"ebx"),"xmm2");
  834. &vmovdqa (&QWP(16*3-128,"ebx"),"xmm3");
  835. #&vmovdqa (&QWP(16*4-128,"ebx"),"xmm4");
  836. &vmovdqa (&QWP(16*5-128,"ebx"),"xmm5");
  837. &vmovdqa (&QWP(16*6-128,"ebx"),"xmm6");
  838. &vmovdqa (&QWP(16*7-128,"ebx"),"xmm7");
  839. #&vmovdqa ("xmm0",&QWP(16*8-128,"ebp"));
  840. #&vmovdqa ("xmm1",&QWP(16*9-128,"ebp"));
  841. &vmovdqa ("xmm2",&QWP(16*10-128,"ebp"));
  842. &vmovdqa ("xmm3",&QWP(16*11-128,"ebp"));
  843. &vmovdqa ("xmm4",&QWP(16*12-128,"ebp"));
  844. &vmovdqa ("xmm5",&QWP(16*13-128,"ebp"));
  845. &vmovdqa ("xmm6",&QWP(16*14-128,"ebp"));
  846. &vmovdqa ("xmm7",&QWP(16*15-128,"ebp"));
  847. &vpaddd ("xmm4","xmm4",&QWP(16*4,"eax")); # counter value
  848. #&vmovdqa (&QWP(16*8-128,"ebx"),"xmm0");
  849. #&vmovdqa (&QWP(16*9-128,"ebx"),"xmm1");
  850. &vmovdqa (&QWP(16*10-128,"ebx"),"xmm2");
  851. &vmovdqa (&QWP(16*11-128,"ebx"),"xmm3");
  852. &vmovdqa (&QWP(16*12-128,"ebx"),"xmm4");
  853. &vmovdqa (&QWP(16*13-128,"ebx"),"xmm5");
  854. &vmovdqa (&QWP(16*14-128,"ebx"),"xmm6");
  855. &vmovdqa (&QWP(16*15-128,"ebx"),"xmm7");
  856. &vmovdqa (&QWP(16*12-128,"ebp"),"xmm4"); # save counter value
  857. &vmovdqa ($xa, &QWP(16*0-128,"ebp"));
  858. &vmovdqa ($xd, "xmm4");
  859. &vmovdqa ($xb_,&QWP(16*4-128,"ebp"));
  860. &vmovdqa ($xc, &QWP(16*8-128,"ebp"));
  861. &vmovdqa ($xc_,&QWP(16*9-128,"ebp"));
  862. &mov ("edx",10); # loop counter
  863. &nop ();
  864. &set_label("loop",32);
  865. &vpaddd ($xa,$xa,$xb_); # elsewhere
  866. &vpxor ($xd,$xd,$xa); # elsewhere
  867. &QUARTERROUND_XOP(0, 4, 8, 12, 0);
  868. &QUARTERROUND_XOP(1, 5, 9, 13, 1);
  869. &QUARTERROUND_XOP(2, 6,10, 14, 2);
  870. &QUARTERROUND_XOP(3, 7,11, 15, 3);
  871. &QUARTERROUND_XOP(0, 5,10, 15, 4);
  872. &QUARTERROUND_XOP(1, 6,11, 12, 5);
  873. &QUARTERROUND_XOP(2, 7, 8, 13, 6);
  874. &QUARTERROUND_XOP(3, 4, 9, 14, 7);
  875. &dec ("edx");
  876. &jnz (&label("loop"));
  877. &vmovdqa (&QWP(16*4-128,"ebx"),$xb_);
  878. &vmovdqa (&QWP(16*8-128,"ebx"),$xc);
  879. &vmovdqa (&QWP(16*9-128,"ebx"),$xc_);
  880. &vmovdqa (&QWP(16*12-128,"ebx"),$xd);
  881. &vmovdqa (&QWP(16*14-128,"ebx"),$xd_);
  882. my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7));
  883. #&vmovdqa ($xa0,&QWP(16*0-128,"ebx")); # it's there
  884. &vmovdqa ($xa1,&QWP(16*1-128,"ebx"));
  885. &vmovdqa ($xa2,&QWP(16*2-128,"ebx"));
  886. &vmovdqa ($xa3,&QWP(16*3-128,"ebx"));
  887. for($i=0;$i<256;$i+=64) {
  888. &vpaddd ($xa0,$xa0,&QWP($i+16*0-128,"ebp")); # accumulate key material
  889. &vpaddd ($xa1,$xa1,&QWP($i+16*1-128,"ebp"));
  890. &vpaddd ($xa2,$xa2,&QWP($i+16*2-128,"ebp"));
  891. &vpaddd ($xa3,$xa3,&QWP($i+16*3-128,"ebp"));
  892. &vpunpckldq ($xt2,$xa0,$xa1); # "de-interlace" data
  893. &vpunpckldq ($xt3,$xa2,$xa3);
  894. &vpunpckhdq ($xa0,$xa0,$xa1);
  895. &vpunpckhdq ($xa2,$xa2,$xa3);
  896. &vpunpcklqdq ($xa1,$xt2,$xt3); # "a0"
  897. &vpunpckhqdq ($xt2,$xt2,$xt3); # "a1"
  898. &vpunpcklqdq ($xt3,$xa0,$xa2); # "a2"
  899. &vpunpckhqdq ($xa3,$xa0,$xa2); # "a3"
  900. &vpxor ($xt0,$xa1,&QWP(64*0-128,$inp));
  901. &vpxor ($xt1,$xt2,&QWP(64*1-128,$inp));
  902. &vpxor ($xt2,$xt3,&QWP(64*2-128,$inp));
  903. &vpxor ($xt3,$xa3,&QWP(64*3-128,$inp));
  904. &lea ($inp,&QWP($i<192?16:(64*4-16*3),$inp));
  905. &vmovdqa ($xa0,&QWP($i+16*4-128,"ebx")) if ($i<192);
  906. &vmovdqa ($xa1,&QWP($i+16*5-128,"ebx")) if ($i<192);
  907. &vmovdqa ($xa2,&QWP($i+16*6-128,"ebx")) if ($i<192);
  908. &vmovdqa ($xa3,&QWP($i+16*7-128,"ebx")) if ($i<192);
  909. &vmovdqu (&QWP(64*0-128,$out),$xt0); # store output
  910. &vmovdqu (&QWP(64*1-128,$out),$xt1);
  911. &vmovdqu (&QWP(64*2-128,$out),$xt2);
  912. &vmovdqu (&QWP(64*3-128,$out),$xt3);
  913. &lea ($out,&QWP($i<192?16:(64*4-16*3),$out));
  914. }
  915. &sub ($len,64*4);
  916. &jnc (&label("outer_loop"));
  917. &add ($len,64*4);
  918. &jz (&label("done"));
  919. &mov ("ebx",&DWP(512+8,"esp")); # restore pointers
  920. &lea ($inp,&DWP(-128,$inp));
  921. &mov ("edx",&DWP(512+4,"esp"));
  922. &lea ($out,&DWP(-128,$out));
  923. &vmovd ("xmm2",&DWP(16*12-128,"ebp")); # counter value
  924. &vmovdqu ("xmm3",&QWP(0,"ebx"));
  925. &vpaddd ("xmm2","xmm2",&QWP(16*6,"eax"));# +four
  926. &vpand ("xmm3","xmm3",&QWP(16*7,"eax"));
  927. &vpor ("xmm3","xmm3","xmm2"); # counter value
  928. {
  929. my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("xmm$_",(0..7));
  930. sub XOPROUND {
  931. &vpaddd ($a,$a,$b);
  932. &vpxor ($d,$d,$a);
  933. &vprotd ($d,$d,16);
  934. &vpaddd ($c,$c,$d);
  935. &vpxor ($b,$b,$c);
  936. &vprotd ($b,$b,12);
  937. &vpaddd ($a,$a,$b);
  938. &vpxor ($d,$d,$a);
  939. &vprotd ($d,$d,8);
  940. &vpaddd ($c,$c,$d);
  941. &vpxor ($b,$b,$c);
  942. &vprotd ($b,$b,7);
  943. }
  944. &set_label("1x");
  945. &vmovdqa ($a,&QWP(16*2,"eax")); # sigma
  946. &vmovdqu ($b,&QWP(0,"edx"));
  947. &vmovdqu ($c,&QWP(16,"edx"));
  948. #&vmovdqu ($d,&QWP(0,"ebx")); # already loaded
  949. &vmovdqa ($rot16,&QWP(0,"eax"));
  950. &vmovdqa ($rot24,&QWP(16,"eax"));
  951. &mov (&DWP(16*3,"esp"),"ebp");
  952. &vmovdqa (&QWP(16*0,"esp"),$a);
  953. &vmovdqa (&QWP(16*1,"esp"),$b);
  954. &vmovdqa (&QWP(16*2,"esp"),$c);
  955. &vmovdqa (&QWP(16*3,"esp"),$d);
  956. &mov ("edx",10);
  957. &jmp (&label("loop1x"));
  958. &set_label("outer1x",16);
  959. &vmovdqa ($d,&QWP(16*5,"eax")); # one
  960. &vmovdqa ($a,&QWP(16*0,"esp"));
  961. &vmovdqa ($b,&QWP(16*1,"esp"));
  962. &vmovdqa ($c,&QWP(16*2,"esp"));
  963. &vpaddd ($d,$d,&QWP(16*3,"esp"));
  964. &mov ("edx",10);
  965. &vmovdqa (&QWP(16*3,"esp"),$d);
  966. &jmp (&label("loop1x"));
  967. &set_label("loop1x",16);
  968. &XOPROUND();
  969. &vpshufd ($c,$c,0b01001110);
  970. &vpshufd ($b,$b,0b00111001);
  971. &vpshufd ($d,$d,0b10010011);
  972. &XOPROUND();
  973. &vpshufd ($c,$c,0b01001110);
  974. &vpshufd ($b,$b,0b10010011);
  975. &vpshufd ($d,$d,0b00111001);
  976. &dec ("edx");
  977. &jnz (&label("loop1x"));
  978. &vpaddd ($a,$a,&QWP(16*0,"esp"));
  979. &vpaddd ($b,$b,&QWP(16*1,"esp"));
  980. &vpaddd ($c,$c,&QWP(16*2,"esp"));
  981. &vpaddd ($d,$d,&QWP(16*3,"esp"));
  982. &cmp ($len,64);
  983. &jb (&label("tail"));
  984. &vpxor ($a,$a,&QWP(16*0,$inp)); # xor with input
  985. &vpxor ($b,$b,&QWP(16*1,$inp));
  986. &vpxor ($c,$c,&QWP(16*2,$inp));
  987. &vpxor ($d,$d,&QWP(16*3,$inp));
  988. &lea ($inp,&DWP(16*4,$inp)); # inp+=64
  989. &vmovdqu (&QWP(16*0,$out),$a); # write output
  990. &vmovdqu (&QWP(16*1,$out),$b);
  991. &vmovdqu (&QWP(16*2,$out),$c);
  992. &vmovdqu (&QWP(16*3,$out),$d);
  993. &lea ($out,&DWP(16*4,$out)); # inp+=64
  994. &sub ($len,64);
  995. &jnz (&label("outer1x"));
  996. &jmp (&label("done"));
  997. &set_label("tail");
  998. &vmovdqa (&QWP(16*0,"esp"),$a);
  999. &vmovdqa (&QWP(16*1,"esp"),$b);
  1000. &vmovdqa (&QWP(16*2,"esp"),$c);
  1001. &vmovdqa (&QWP(16*3,"esp"),$d);
  1002. &xor ("eax","eax");
  1003. &xor ("edx","edx");
  1004. &xor ("ebp","ebp");
  1005. &set_label("tail_loop");
  1006. &movb ("al",&BP(0,"esp","ebp"));
  1007. &movb ("dl",&BP(0,$inp,"ebp"));
  1008. &lea ("ebp",&DWP(1,"ebp"));
  1009. &xor ("al","dl");
  1010. &movb (&BP(-1,$out,"ebp"),"al");
  1011. &dec ($len);
  1012. &jnz (&label("tail_loop"));
  1013. }
  1014. &set_label("done");
  1015. &vzeroupper ();
  1016. &mov ("esp",&DWP(512,"esp"));
  1017. &function_end("ChaCha20_xop");
  1018. }
  1019. &asm_finish();
  1020. close STDOUT or die "error closing STDOUT: $!";