chacha-x86_64.pl 95 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041
  1. #! /usr/bin/env perl
  2. # Copyright 2016-2021 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # November 2014
  17. #
  18. # ChaCha20 for x86_64.
  19. #
  20. # December 2016
  21. #
  22. # Add AVX512F code path.
  23. #
  24. # December 2017
  25. #
  26. # Add AVX512VL code path.
  27. #
  28. # Performance in cycles per byte out of large buffer.
  29. #
  30. # IALU/gcc 4.8(i) 1x/2xSSSE3(ii) 4xSSSE3 NxAVX(v)
  31. #
  32. # P4 9.48/+99% - -
  33. # Core2 7.83/+55% 7.90/5.76 4.35
  34. # Westmere 7.19/+50% 5.60/4.50 3.00
  35. # Sandy Bridge 8.31/+42% 5.45/4.00 2.72
  36. # Ivy Bridge 6.71/+46% 5.40/? 2.41
  37. # Haswell 5.92/+43% 5.20/3.45 2.42 1.23
  38. # Skylake[-X] 5.87/+39% 4.70/3.22 2.31 1.19[0.80(vi)]
  39. # Silvermont 12.0/+33% 7.75/6.90 7.03(iii)
  40. # Knights L 11.7/- ? 9.60(iii) 0.80
  41. # Goldmont 10.6/+17% 5.10/3.52 3.28
  42. # Sledgehammer 7.28/+52% - -
  43. # Bulldozer 9.66/+28% 9.85/5.35(iv) 3.06(iv)
  44. # Ryzen 5.96/+50% 5.19/3.00 2.40 2.09
  45. # VIA Nano 10.5/+46% 6.72/6.88 6.05
  46. #
  47. # (i) compared to older gcc 3.x one can observe >2x improvement on
  48. # most platforms;
  49. # (ii) 2xSSSE3 is code path optimized specifically for 128 bytes used
  50. # by chacha20_poly1305_tls_cipher, results are EVP-free;
  51. # (iii) this is not optimal result for Atom because of MSROM
  52. # limitations, SSE2 can do better, but gain is considered too
  53. # low to justify the [maintenance] effort;
  54. # (iv) Bulldozer actually executes 4xXOP code path that delivers 2.20
  55. # and 4.85 for 128-byte inputs;
  56. # (v) 8xAVX2, 8xAVX512VL or 16xAVX512F, whichever best applicable;
  57. # (vi) even though Skylake-X can execute AVX512F code and deliver 0.57
  58. # cpb in single thread, the corresponding capability is suppressed;
  59. # $output is the last argument if it looks like a file (it has an extension)
  60. # $flavour is the first argument if it doesn't look like a file
  61. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  62. $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  63. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  64. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  65. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  66. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  67. die "can't locate x86_64-xlate.pl";
  68. if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  69. =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
  70. $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
  71. }
  72. if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
  73. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
  74. $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
  75. $avx += 1 if ($1==2.11 && $2>=8);
  76. }
  77. if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
  78. `ml64 2>&1` =~ /Version ([0-9]+)\./) {
  79. $avx = ($1>=10) + ($1>=11);
  80. }
  81. if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|.*based on LLVM) ([0-9]+\.[0-9]+)/) {
  82. $avx = ($2>=3.0) + ($2>3.0);
  83. }
  84. open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""
  85. or die "can't call $xlate: $!";
  86. *STDOUT=*OUT;
  87. # input parameter block
  88. ($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
  89. $code.=<<___;
  90. .text
  91. .extern OPENSSL_ia32cap_P
  92. .section .rodata align=64
  93. .align 64
  94. .Lzero:
  95. .long 0,0,0,0
  96. .Lone:
  97. .long 1,0,0,0
  98. .Linc:
  99. .long 0,1,2,3
  100. .Lfour:
  101. .long 4,4,4,4
  102. .Lincy:
  103. .long 0,2,4,6,1,3,5,7
  104. .Leight:
  105. .long 8,8,8,8,8,8,8,8
  106. .Lrot16:
  107. .byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
  108. .Lrot24:
  109. .byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
  110. .Ltwoy:
  111. .long 2,0,0,0, 2,0,0,0
  112. .align 64
  113. .Lzeroz:
  114. .long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
  115. .Lfourz:
  116. .long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
  117. .Lincz:
  118. .long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
  119. .Lsixteen:
  120. .long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
  121. .Lsigma:
  122. .asciz "expand 32-byte k"
  123. .asciz "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  124. .previous
  125. ___
  126. sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
  127. { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
  128. my $arg = pop;
  129. $arg = "\$$arg" if ($arg*1 eq $arg);
  130. $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
  131. }
  132. @x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
  133. "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
  134. @t=("%esi","%edi");
  135. sub ROUND { # critical path is 24 cycles per round
  136. my ($a0,$b0,$c0,$d0)=@_;
  137. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  138. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  139. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  140. my ($xc,$xc_)=map("\"$_\"",@t);
  141. my @x=map("\"$_\"",@x);
  142. # Consider order in which variables are addressed by their
  143. # index:
  144. #
  145. # a b c d
  146. #
  147. # 0 4 8 12 < even round
  148. # 1 5 9 13
  149. # 2 6 10 14
  150. # 3 7 11 15
  151. # 0 5 10 15 < odd round
  152. # 1 6 11 12
  153. # 2 7 8 13
  154. # 3 4 9 14
  155. #
  156. # 'a', 'b' and 'd's are permanently allocated in registers,
  157. # @x[0..7,12..15], while 'c's are maintained in memory. If
  158. # you observe 'c' column, you'll notice that pair of 'c's is
  159. # invariant between rounds. This means that we have to reload
  160. # them once per round, in the middle. This is why you'll see
  161. # bunch of 'c' stores and loads in the middle, but none in
  162. # the beginning or end.
  163. # Normally instructions would be interleaved to favour in-order
  164. # execution. Generally out-of-order cores manage it gracefully,
  165. # but not this time for some reason. As in-order execution
  166. # cores are dying breed, old Atom is the only one around,
  167. # instructions are left uninterleaved. Besides, Atom is better
  168. # off executing 1xSSSE3 code anyway...
  169. (
  170. "&add (@x[$a0],@x[$b0])", # Q1
  171. "&xor (@x[$d0],@x[$a0])",
  172. "&rol (@x[$d0],16)",
  173. "&add (@x[$a1],@x[$b1])", # Q2
  174. "&xor (@x[$d1],@x[$a1])",
  175. "&rol (@x[$d1],16)",
  176. "&add ($xc,@x[$d0])",
  177. "&xor (@x[$b0],$xc)",
  178. "&rol (@x[$b0],12)",
  179. "&add ($xc_,@x[$d1])",
  180. "&xor (@x[$b1],$xc_)",
  181. "&rol (@x[$b1],12)",
  182. "&add (@x[$a0],@x[$b0])",
  183. "&xor (@x[$d0],@x[$a0])",
  184. "&rol (@x[$d0],8)",
  185. "&add (@x[$a1],@x[$b1])",
  186. "&xor (@x[$d1],@x[$a1])",
  187. "&rol (@x[$d1],8)",
  188. "&add ($xc,@x[$d0])",
  189. "&xor (@x[$b0],$xc)",
  190. "&rol (@x[$b0],7)",
  191. "&add ($xc_,@x[$d1])",
  192. "&xor (@x[$b1],$xc_)",
  193. "&rol (@x[$b1],7)",
  194. "&mov (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's
  195. "&mov (\"4*$c1(%rsp)\",$xc_)",
  196. "&mov ($xc,\"4*$c2(%rsp)\")",
  197. "&mov ($xc_,\"4*$c3(%rsp)\")",
  198. "&add (@x[$a2],@x[$b2])", # Q3
  199. "&xor (@x[$d2],@x[$a2])",
  200. "&rol (@x[$d2],16)",
  201. "&add (@x[$a3],@x[$b3])", # Q4
  202. "&xor (@x[$d3],@x[$a3])",
  203. "&rol (@x[$d3],16)",
  204. "&add ($xc,@x[$d2])",
  205. "&xor (@x[$b2],$xc)",
  206. "&rol (@x[$b2],12)",
  207. "&add ($xc_,@x[$d3])",
  208. "&xor (@x[$b3],$xc_)",
  209. "&rol (@x[$b3],12)",
  210. "&add (@x[$a2],@x[$b2])",
  211. "&xor (@x[$d2],@x[$a2])",
  212. "&rol (@x[$d2],8)",
  213. "&add (@x[$a3],@x[$b3])",
  214. "&xor (@x[$d3],@x[$a3])",
  215. "&rol (@x[$d3],8)",
  216. "&add ($xc,@x[$d2])",
  217. "&xor (@x[$b2],$xc)",
  218. "&rol (@x[$b2],7)",
  219. "&add ($xc_,@x[$d3])",
  220. "&xor (@x[$b3],$xc_)",
  221. "&rol (@x[$b3],7)"
  222. );
  223. }
  224. ########################################################################
  225. # Generic code path that handles all lengths on pre-SSSE3 processors.
  226. $code.=<<___;
  227. .globl ChaCha20_ctr32
  228. .type ChaCha20_ctr32,\@function,5
  229. .align 64
  230. ChaCha20_ctr32:
  231. .cfi_startproc
  232. cmp \$0,$len
  233. je .Lno_data
  234. mov OPENSSL_ia32cap_P+4(%rip),%r10
  235. ___
  236. $code.=<<___ if ($avx>2);
  237. bt \$48,%r10 # check for AVX512F
  238. jc .LChaCha20_avx512
  239. test %r10,%r10 # check for AVX512VL
  240. js .LChaCha20_avx512vl
  241. ___
  242. $code.=<<___;
  243. test \$`1<<(41-32)`,%r10d
  244. jnz .LChaCha20_ssse3
  245. push %rbx
  246. .cfi_push %rbx
  247. push %rbp
  248. .cfi_push %rbp
  249. push %r12
  250. .cfi_push %r12
  251. push %r13
  252. .cfi_push %r13
  253. push %r14
  254. .cfi_push %r14
  255. push %r15
  256. .cfi_push %r15
  257. sub \$64+24,%rsp
  258. .cfi_adjust_cfa_offset 64+24
  259. .Lctr32_body:
  260. #movdqa .Lsigma(%rip),%xmm0
  261. movdqu ($key),%xmm1
  262. movdqu 16($key),%xmm2
  263. movdqu ($counter),%xmm3
  264. movdqa .Lone(%rip),%xmm4
  265. #movdqa %xmm0,4*0(%rsp) # key[0]
  266. movdqa %xmm1,4*4(%rsp) # key[1]
  267. movdqa %xmm2,4*8(%rsp) # key[2]
  268. movdqa %xmm3,4*12(%rsp) # key[3]
  269. mov $len,%rbp # reassign $len
  270. jmp .Loop_outer
  271. .align 32
  272. .Loop_outer:
  273. mov \$0x61707865,@x[0] # 'expa'
  274. mov \$0x3320646e,@x[1] # 'nd 3'
  275. mov \$0x79622d32,@x[2] # '2-by'
  276. mov \$0x6b206574,@x[3] # 'te k'
  277. mov 4*4(%rsp),@x[4]
  278. mov 4*5(%rsp),@x[5]
  279. mov 4*6(%rsp),@x[6]
  280. mov 4*7(%rsp),@x[7]
  281. movd %xmm3,@x[12]
  282. mov 4*13(%rsp),@x[13]
  283. mov 4*14(%rsp),@x[14]
  284. mov 4*15(%rsp),@x[15]
  285. mov %rbp,64+0(%rsp) # save len
  286. mov \$10,%ebp
  287. mov $inp,64+8(%rsp) # save inp
  288. movq %xmm2,%rsi # "@x[8]"
  289. mov $out,64+16(%rsp) # save out
  290. mov %rsi,%rdi
  291. shr \$32,%rdi # "@x[9]"
  292. jmp .Loop
  293. .align 32
  294. .Loop:
  295. ___
  296. foreach (&ROUND (0, 4, 8,12)) { eval; }
  297. foreach (&ROUND (0, 5,10,15)) { eval; }
  298. &dec ("%ebp");
  299. &jnz (".Loop");
  300. $code.=<<___;
  301. mov @t[1],4*9(%rsp) # modulo-scheduled
  302. mov @t[0],4*8(%rsp)
  303. mov 64(%rsp),%rbp # load len
  304. movdqa %xmm2,%xmm1
  305. mov 64+8(%rsp),$inp # load inp
  306. paddd %xmm4,%xmm3 # increment counter
  307. mov 64+16(%rsp),$out # load out
  308. add \$0x61707865,@x[0] # 'expa'
  309. add \$0x3320646e,@x[1] # 'nd 3'
  310. add \$0x79622d32,@x[2] # '2-by'
  311. add \$0x6b206574,@x[3] # 'te k'
  312. add 4*4(%rsp),@x[4]
  313. add 4*5(%rsp),@x[5]
  314. add 4*6(%rsp),@x[6]
  315. add 4*7(%rsp),@x[7]
  316. add 4*12(%rsp),@x[12]
  317. add 4*13(%rsp),@x[13]
  318. add 4*14(%rsp),@x[14]
  319. add 4*15(%rsp),@x[15]
  320. paddd 4*8(%rsp),%xmm1
  321. cmp \$64,%rbp
  322. jb .Ltail
  323. xor 4*0($inp),@x[0] # xor with input
  324. xor 4*1($inp),@x[1]
  325. xor 4*2($inp),@x[2]
  326. xor 4*3($inp),@x[3]
  327. xor 4*4($inp),@x[4]
  328. xor 4*5($inp),@x[5]
  329. xor 4*6($inp),@x[6]
  330. xor 4*7($inp),@x[7]
  331. movdqu 4*8($inp),%xmm0
  332. xor 4*12($inp),@x[12]
  333. xor 4*13($inp),@x[13]
  334. xor 4*14($inp),@x[14]
  335. xor 4*15($inp),@x[15]
  336. lea 4*16($inp),$inp # inp+=64
  337. pxor %xmm1,%xmm0
  338. movdqa %xmm2,4*8(%rsp)
  339. movd %xmm3,4*12(%rsp)
  340. mov @x[0],4*0($out) # write output
  341. mov @x[1],4*1($out)
  342. mov @x[2],4*2($out)
  343. mov @x[3],4*3($out)
  344. mov @x[4],4*4($out)
  345. mov @x[5],4*5($out)
  346. mov @x[6],4*6($out)
  347. mov @x[7],4*7($out)
  348. movdqu %xmm0,4*8($out)
  349. mov @x[12],4*12($out)
  350. mov @x[13],4*13($out)
  351. mov @x[14],4*14($out)
  352. mov @x[15],4*15($out)
  353. lea 4*16($out),$out # out+=64
  354. sub \$64,%rbp
  355. jnz .Loop_outer
  356. jmp .Ldone
  357. .align 16
  358. .Ltail:
  359. mov @x[0],4*0(%rsp)
  360. mov @x[1],4*1(%rsp)
  361. xor %rbx,%rbx
  362. mov @x[2],4*2(%rsp)
  363. mov @x[3],4*3(%rsp)
  364. mov @x[4],4*4(%rsp)
  365. mov @x[5],4*5(%rsp)
  366. mov @x[6],4*6(%rsp)
  367. mov @x[7],4*7(%rsp)
  368. movdqa %xmm1,4*8(%rsp)
  369. mov @x[12],4*12(%rsp)
  370. mov @x[13],4*13(%rsp)
  371. mov @x[14],4*14(%rsp)
  372. mov @x[15],4*15(%rsp)
  373. .Loop_tail:
  374. movzb ($inp,%rbx),%eax
  375. movzb (%rsp,%rbx),%edx
  376. lea 1(%rbx),%rbx
  377. xor %edx,%eax
  378. mov %al,-1($out,%rbx)
  379. dec %rbp
  380. jnz .Loop_tail
  381. .Ldone:
  382. lea 64+24+48(%rsp),%rsi
  383. .cfi_def_cfa %rsi,8
  384. mov -48(%rsi),%r15
  385. .cfi_restore %r15
  386. mov -40(%rsi),%r14
  387. .cfi_restore %r14
  388. mov -32(%rsi),%r13
  389. .cfi_restore %r13
  390. mov -24(%rsi),%r12
  391. .cfi_restore %r12
  392. mov -16(%rsi),%rbp
  393. .cfi_restore %rbp
  394. mov -8(%rsi),%rbx
  395. .cfi_restore %rbx
  396. lea (%rsi),%rsp
  397. .cfi_def_cfa_register %rsp
  398. .Lno_data:
  399. ret
  400. .cfi_endproc
  401. .size ChaCha20_ctr32,.-ChaCha20_ctr32
  402. ___
  403. ########################################################################
  404. # SSSE3 code path that handles shorter lengths
  405. {
  406. my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
  407. sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round
  408. &paddd ($a,$b);
  409. &pxor ($d,$a);
  410. &pshufb ($d,$rot16);
  411. &paddd ($c,$d);
  412. &pxor ($b,$c);
  413. &movdqa ($t,$b);
  414. &psrld ($b,20);
  415. &pslld ($t,12);
  416. &por ($b,$t);
  417. &paddd ($a,$b);
  418. &pxor ($d,$a);
  419. &pshufb ($d,$rot24);
  420. &paddd ($c,$d);
  421. &pxor ($b,$c);
  422. &movdqa ($t,$b);
  423. &psrld ($b,25);
  424. &pslld ($t,7);
  425. &por ($b,$t);
  426. }
  427. my $xframe = $win64 ? 160+8 : 8;
  428. $code.=<<___;
  429. .type ChaCha20_ssse3,\@function,5
  430. .align 32
  431. ChaCha20_ssse3:
  432. .cfi_startproc
  433. .LChaCha20_ssse3:
  434. mov %rsp,%r9 # frame pointer
  435. .cfi_def_cfa_register %r9
  436. ___
  437. $code.=<<___ if ($avx);
  438. test \$`1<<(43-32)`,%r10d
  439. jnz .LChaCha20_4xop # XOP is fastest even if we use 1/4
  440. ___
  441. $code.=<<___;
  442. cmp \$128,$len # we might throw away some data,
  443. je .LChaCha20_128
  444. ja .LChaCha20_4x # but overall it won't be slower
  445. .Ldo_sse3_after_all:
  446. sub \$64+$xframe,%rsp
  447. ___
  448. $code.=<<___ if ($win64);
  449. movaps %xmm6,-0x28(%r9)
  450. movaps %xmm7,-0x18(%r9)
  451. .Lssse3_body:
  452. ___
  453. $code.=<<___;
  454. movdqa .Lsigma(%rip),$a
  455. movdqu ($key),$b
  456. movdqu 16($key),$c
  457. movdqu ($counter),$d
  458. movdqa .Lrot16(%rip),$rot16
  459. movdqa .Lrot24(%rip),$rot24
  460. movdqa $a,0x00(%rsp)
  461. movdqa $b,0x10(%rsp)
  462. movdqa $c,0x20(%rsp)
  463. movdqa $d,0x30(%rsp)
  464. mov \$10,$counter # reuse $counter
  465. jmp .Loop_ssse3
  466. .align 32
  467. .Loop_outer_ssse3:
  468. movdqa .Lone(%rip),$d
  469. movdqa 0x00(%rsp),$a
  470. movdqa 0x10(%rsp),$b
  471. movdqa 0x20(%rsp),$c
  472. paddd 0x30(%rsp),$d
  473. mov \$10,$counter
  474. movdqa $d,0x30(%rsp)
  475. jmp .Loop_ssse3
  476. .align 32
  477. .Loop_ssse3:
  478. ___
  479. &SSSE3ROUND();
  480. &pshufd ($c,$c,0b01001110);
  481. &pshufd ($b,$b,0b00111001);
  482. &pshufd ($d,$d,0b10010011);
  483. &nop ();
  484. &SSSE3ROUND();
  485. &pshufd ($c,$c,0b01001110);
  486. &pshufd ($b,$b,0b10010011);
  487. &pshufd ($d,$d,0b00111001);
  488. &dec ($counter);
  489. &jnz (".Loop_ssse3");
  490. $code.=<<___;
  491. paddd 0x00(%rsp),$a
  492. paddd 0x10(%rsp),$b
  493. paddd 0x20(%rsp),$c
  494. paddd 0x30(%rsp),$d
  495. cmp \$64,$len
  496. jb .Ltail_ssse3
  497. movdqu 0x00($inp),$t
  498. movdqu 0x10($inp),$t1
  499. pxor $t,$a # xor with input
  500. movdqu 0x20($inp),$t
  501. pxor $t1,$b
  502. movdqu 0x30($inp),$t1
  503. lea 0x40($inp),$inp # inp+=64
  504. pxor $t,$c
  505. pxor $t1,$d
  506. movdqu $a,0x00($out) # write output
  507. movdqu $b,0x10($out)
  508. movdqu $c,0x20($out)
  509. movdqu $d,0x30($out)
  510. lea 0x40($out),$out # out+=64
  511. sub \$64,$len
  512. jnz .Loop_outer_ssse3
  513. jmp .Ldone_ssse3
  514. .align 16
  515. .Ltail_ssse3:
  516. movdqa $a,0x00(%rsp)
  517. movdqa $b,0x10(%rsp)
  518. movdqa $c,0x20(%rsp)
  519. movdqa $d,0x30(%rsp)
  520. xor $counter,$counter
  521. .Loop_tail_ssse3:
  522. movzb ($inp,$counter),%eax
  523. movzb (%rsp,$counter),%ecx
  524. lea 1($counter),$counter
  525. xor %ecx,%eax
  526. mov %al,-1($out,$counter)
  527. dec $len
  528. jnz .Loop_tail_ssse3
  529. .Ldone_ssse3:
  530. ___
  531. $code.=<<___ if ($win64);
  532. movaps -0x28(%r9),%xmm6
  533. movaps -0x18(%r9),%xmm7
  534. ___
  535. $code.=<<___;
  536. lea (%r9),%rsp
  537. .cfi_def_cfa_register %rsp
  538. .Lssse3_epilogue:
  539. ret
  540. .cfi_endproc
  541. .size ChaCha20_ssse3,.-ChaCha20_ssse3
  542. ___
  543. }
  544. ########################################################################
  545. # SSSE3 code path that handles 128-byte inputs
  546. {
  547. my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(8,9,2..7));
  548. my ($a1,$b1,$c1,$d1)=map("%xmm$_",(10,11,0,1));
  549. sub SSSE3ROUND_2x {
  550. &paddd ($a,$b);
  551. &pxor ($d,$a);
  552. &paddd ($a1,$b1);
  553. &pxor ($d1,$a1);
  554. &pshufb ($d,$rot16);
  555. &pshufb($d1,$rot16);
  556. &paddd ($c,$d);
  557. &paddd ($c1,$d1);
  558. &pxor ($b,$c);
  559. &pxor ($b1,$c1);
  560. &movdqa ($t,$b);
  561. &psrld ($b,20);
  562. &movdqa($t1,$b1);
  563. &pslld ($t,12);
  564. &psrld ($b1,20);
  565. &por ($b,$t);
  566. &pslld ($t1,12);
  567. &por ($b1,$t1);
  568. &paddd ($a,$b);
  569. &pxor ($d,$a);
  570. &paddd ($a1,$b1);
  571. &pxor ($d1,$a1);
  572. &pshufb ($d,$rot24);
  573. &pshufb($d1,$rot24);
  574. &paddd ($c,$d);
  575. &paddd ($c1,$d1);
  576. &pxor ($b,$c);
  577. &pxor ($b1,$c1);
  578. &movdqa ($t,$b);
  579. &psrld ($b,25);
  580. &movdqa($t1,$b1);
  581. &pslld ($t,7);
  582. &psrld ($b1,25);
  583. &por ($b,$t);
  584. &pslld ($t1,7);
  585. &por ($b1,$t1);
  586. }
  587. my $xframe = $win64 ? 0x68 : 8;
  588. $code.=<<___;
  589. .type ChaCha20_128,\@function,5
  590. .align 32
  591. ChaCha20_128:
  592. .cfi_startproc
  593. .LChaCha20_128:
  594. mov %rsp,%r9 # frame pointer
  595. .cfi_def_cfa_register %r9
  596. sub \$64+$xframe,%rsp
  597. ___
  598. $code.=<<___ if ($win64);
  599. movaps %xmm6,-0x68(%r9)
  600. movaps %xmm7,-0x58(%r9)
  601. movaps %xmm8,-0x48(%r9)
  602. movaps %xmm9,-0x38(%r9)
  603. movaps %xmm10,-0x28(%r9)
  604. movaps %xmm11,-0x18(%r9)
  605. .L128_body:
  606. ___
  607. $code.=<<___;
  608. movdqa .Lsigma(%rip),$a
  609. movdqu ($key),$b
  610. movdqu 16($key),$c
  611. movdqu ($counter),$d
  612. movdqa .Lone(%rip),$d1
  613. movdqa .Lrot16(%rip),$rot16
  614. movdqa .Lrot24(%rip),$rot24
  615. movdqa $a,$a1
  616. movdqa $a,0x00(%rsp)
  617. movdqa $b,$b1
  618. movdqa $b,0x10(%rsp)
  619. movdqa $c,$c1
  620. movdqa $c,0x20(%rsp)
  621. paddd $d,$d1
  622. movdqa $d,0x30(%rsp)
  623. mov \$10,$counter # reuse $counter
  624. jmp .Loop_128
  625. .align 32
  626. .Loop_128:
  627. ___
  628. &SSSE3ROUND_2x();
  629. &pshufd ($c,$c,0b01001110);
  630. &pshufd ($b,$b,0b00111001);
  631. &pshufd ($d,$d,0b10010011);
  632. &pshufd ($c1,$c1,0b01001110);
  633. &pshufd ($b1,$b1,0b00111001);
  634. &pshufd ($d1,$d1,0b10010011);
  635. &SSSE3ROUND_2x();
  636. &pshufd ($c,$c,0b01001110);
  637. &pshufd ($b,$b,0b10010011);
  638. &pshufd ($d,$d,0b00111001);
  639. &pshufd ($c1,$c1,0b01001110);
  640. &pshufd ($b1,$b1,0b10010011);
  641. &pshufd ($d1,$d1,0b00111001);
  642. &dec ($counter);
  643. &jnz (".Loop_128");
  644. $code.=<<___;
  645. paddd 0x00(%rsp),$a
  646. paddd 0x10(%rsp),$b
  647. paddd 0x20(%rsp),$c
  648. paddd 0x30(%rsp),$d
  649. paddd .Lone(%rip),$d1
  650. paddd 0x00(%rsp),$a1
  651. paddd 0x10(%rsp),$b1
  652. paddd 0x20(%rsp),$c1
  653. paddd 0x30(%rsp),$d1
  654. movdqu 0x00($inp),$t
  655. movdqu 0x10($inp),$t1
  656. pxor $t,$a # xor with input
  657. movdqu 0x20($inp),$t
  658. pxor $t1,$b
  659. movdqu 0x30($inp),$t1
  660. pxor $t,$c
  661. movdqu 0x40($inp),$t
  662. pxor $t1,$d
  663. movdqu 0x50($inp),$t1
  664. pxor $t,$a1
  665. movdqu 0x60($inp),$t
  666. pxor $t1,$b1
  667. movdqu 0x70($inp),$t1
  668. pxor $t,$c1
  669. pxor $t1,$d1
  670. movdqu $a,0x00($out) # write output
  671. movdqu $b,0x10($out)
  672. movdqu $c,0x20($out)
  673. movdqu $d,0x30($out)
  674. movdqu $a1,0x40($out)
  675. movdqu $b1,0x50($out)
  676. movdqu $c1,0x60($out)
  677. movdqu $d1,0x70($out)
  678. ___
  679. $code.=<<___ if ($win64);
  680. movaps -0x68(%r9),%xmm6
  681. movaps -0x58(%r9),%xmm7
  682. movaps -0x48(%r9),%xmm8
  683. movaps -0x38(%r9),%xmm9
  684. movaps -0x28(%r9),%xmm10
  685. movaps -0x18(%r9),%xmm11
  686. ___
  687. $code.=<<___;
  688. lea (%r9),%rsp
  689. .cfi_def_cfa_register %rsp
  690. .L128_epilogue:
  691. ret
  692. .cfi_endproc
  693. .size ChaCha20_128,.-ChaCha20_128
  694. ___
  695. }
  696. ########################################################################
  697. # SSSE3 code path that handles longer messages.
  698. {
  699. # assign variables to favor Atom front-end
  700. my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
  701. $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
  702. my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  703. "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
  704. sub SSSE3_lane_ROUND {
  705. my ($a0,$b0,$c0,$d0)=@_;
  706. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  707. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  708. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  709. my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
  710. my @x=map("\"$_\"",@xx);
  711. # Consider order in which variables are addressed by their
  712. # index:
  713. #
  714. # a b c d
  715. #
  716. # 0 4 8 12 < even round
  717. # 1 5 9 13
  718. # 2 6 10 14
  719. # 3 7 11 15
  720. # 0 5 10 15 < odd round
  721. # 1 6 11 12
  722. # 2 7 8 13
  723. # 3 4 9 14
  724. #
  725. # 'a', 'b' and 'd's are permanently allocated in registers,
  726. # @x[0..7,12..15], while 'c's are maintained in memory. If
  727. # you observe 'c' column, you'll notice that pair of 'c's is
  728. # invariant between rounds. This means that we have to reload
  729. # them once per round, in the middle. This is why you'll see
  730. # bunch of 'c' stores and loads in the middle, but none in
  731. # the beginning or end.
  732. (
  733. "&paddd (@x[$a0],@x[$b0])", # Q1
  734. "&paddd (@x[$a1],@x[$b1])", # Q2
  735. "&pxor (@x[$d0],@x[$a0])",
  736. "&pxor (@x[$d1],@x[$a1])",
  737. "&pshufb (@x[$d0],$t1)",
  738. "&pshufb (@x[$d1],$t1)",
  739. "&paddd ($xc,@x[$d0])",
  740. "&paddd ($xc_,@x[$d1])",
  741. "&pxor (@x[$b0],$xc)",
  742. "&pxor (@x[$b1],$xc_)",
  743. "&movdqa ($t0,@x[$b0])",
  744. "&pslld (@x[$b0],12)",
  745. "&psrld ($t0,20)",
  746. "&movdqa ($t1,@x[$b1])",
  747. "&pslld (@x[$b1],12)",
  748. "&por (@x[$b0],$t0)",
  749. "&psrld ($t1,20)",
  750. "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
  751. "&por (@x[$b1],$t1)",
  752. "&paddd (@x[$a0],@x[$b0])",
  753. "&paddd (@x[$a1],@x[$b1])",
  754. "&pxor (@x[$d0],@x[$a0])",
  755. "&pxor (@x[$d1],@x[$a1])",
  756. "&pshufb (@x[$d0],$t0)",
  757. "&pshufb (@x[$d1],$t0)",
  758. "&paddd ($xc,@x[$d0])",
  759. "&paddd ($xc_,@x[$d1])",
  760. "&pxor (@x[$b0],$xc)",
  761. "&pxor (@x[$b1],$xc_)",
  762. "&movdqa ($t1,@x[$b0])",
  763. "&pslld (@x[$b0],7)",
  764. "&psrld ($t1,25)",
  765. "&movdqa ($t0,@x[$b1])",
  766. "&pslld (@x[$b1],7)",
  767. "&por (@x[$b0],$t1)",
  768. "&psrld ($t0,25)",
  769. "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
  770. "&por (@x[$b1],$t0)",
  771. "&movdqa (\"`16*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
  772. "&movdqa (\"`16*($c1-8)`(%rsp)\",$xc_)",
  773. "&movdqa ($xc,\"`16*($c2-8)`(%rsp)\")",
  774. "&movdqa ($xc_,\"`16*($c3-8)`(%rsp)\")",
  775. "&paddd (@x[$a2],@x[$b2])", # Q3
  776. "&paddd (@x[$a3],@x[$b3])", # Q4
  777. "&pxor (@x[$d2],@x[$a2])",
  778. "&pxor (@x[$d3],@x[$a3])",
  779. "&pshufb (@x[$d2],$t1)",
  780. "&pshufb (@x[$d3],$t1)",
  781. "&paddd ($xc,@x[$d2])",
  782. "&paddd ($xc_,@x[$d3])",
  783. "&pxor (@x[$b2],$xc)",
  784. "&pxor (@x[$b3],$xc_)",
  785. "&movdqa ($t0,@x[$b2])",
  786. "&pslld (@x[$b2],12)",
  787. "&psrld ($t0,20)",
  788. "&movdqa ($t1,@x[$b3])",
  789. "&pslld (@x[$b3],12)",
  790. "&por (@x[$b2],$t0)",
  791. "&psrld ($t1,20)",
  792. "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
  793. "&por (@x[$b3],$t1)",
  794. "&paddd (@x[$a2],@x[$b2])",
  795. "&paddd (@x[$a3],@x[$b3])",
  796. "&pxor (@x[$d2],@x[$a2])",
  797. "&pxor (@x[$d3],@x[$a3])",
  798. "&pshufb (@x[$d2],$t0)",
  799. "&pshufb (@x[$d3],$t0)",
  800. "&paddd ($xc,@x[$d2])",
  801. "&paddd ($xc_,@x[$d3])",
  802. "&pxor (@x[$b2],$xc)",
  803. "&pxor (@x[$b3],$xc_)",
  804. "&movdqa ($t1,@x[$b2])",
  805. "&pslld (@x[$b2],7)",
  806. "&psrld ($t1,25)",
  807. "&movdqa ($t0,@x[$b3])",
  808. "&pslld (@x[$b3],7)",
  809. "&por (@x[$b2],$t1)",
  810. "&psrld ($t0,25)",
  811. "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
  812. "&por (@x[$b3],$t0)"
  813. );
  814. }
  815. my $xframe = $win64 ? 0xa8 : 8;
  816. $code.=<<___;
  817. .type ChaCha20_4x,\@function,5
  818. .align 32
  819. ChaCha20_4x:
  820. .cfi_startproc
  821. .LChaCha20_4x:
  822. mov %rsp,%r9 # frame pointer
  823. .cfi_def_cfa_register %r9
  824. mov %r10,%r11
  825. ___
  826. $code.=<<___ if ($avx>1);
  827. shr \$32,%r10 # OPENSSL_ia32cap_P+8
  828. test \$`1<<5`,%r10 # test AVX2
  829. jnz .LChaCha20_8x
  830. ___
  831. $code.=<<___;
  832. cmp \$192,$len
  833. ja .Lproceed4x
  834. and \$`1<<26|1<<22`,%r11 # isolate XSAVE+MOVBE
  835. cmp \$`1<<22`,%r11 # check for MOVBE without XSAVE
  836. je .Ldo_sse3_after_all # to detect Atom
  837. .Lproceed4x:
  838. sub \$0x140+$xframe,%rsp
  839. ___
  840. ################ stack layout
  841. # +0x00 SIMD equivalent of @x[8-12]
  842. # ...
  843. # +0x40 constant copy of key[0-2] smashed by lanes
  844. # ...
  845. # +0x100 SIMD counters (with nonce smashed by lanes)
  846. # ...
  847. # +0x140
  848. $code.=<<___ if ($win64);
  849. movaps %xmm6,-0xa8(%r9)
  850. movaps %xmm7,-0x98(%r9)
  851. movaps %xmm8,-0x88(%r9)
  852. movaps %xmm9,-0x78(%r9)
  853. movaps %xmm10,-0x68(%r9)
  854. movaps %xmm11,-0x58(%r9)
  855. movaps %xmm12,-0x48(%r9)
  856. movaps %xmm13,-0x38(%r9)
  857. movaps %xmm14,-0x28(%r9)
  858. movaps %xmm15,-0x18(%r9)
  859. .L4x_body:
  860. ___
  861. $code.=<<___;
  862. movdqa .Lsigma(%rip),$xa3 # key[0]
  863. movdqu ($key),$xb3 # key[1]
  864. movdqu 16($key),$xt3 # key[2]
  865. movdqu ($counter),$xd3 # key[3]
  866. lea 0x100(%rsp),%rcx # size optimization
  867. lea .Lrot16(%rip),%r10
  868. lea .Lrot24(%rip),%r11
  869. pshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  870. pshufd \$0x55,$xa3,$xa1
  871. movdqa $xa0,0x40(%rsp) # ... and offload
  872. pshufd \$0xaa,$xa3,$xa2
  873. movdqa $xa1,0x50(%rsp)
  874. pshufd \$0xff,$xa3,$xa3
  875. movdqa $xa2,0x60(%rsp)
  876. movdqa $xa3,0x70(%rsp)
  877. pshufd \$0x00,$xb3,$xb0
  878. pshufd \$0x55,$xb3,$xb1
  879. movdqa $xb0,0x80-0x100(%rcx)
  880. pshufd \$0xaa,$xb3,$xb2
  881. movdqa $xb1,0x90-0x100(%rcx)
  882. pshufd \$0xff,$xb3,$xb3
  883. movdqa $xb2,0xa0-0x100(%rcx)
  884. movdqa $xb3,0xb0-0x100(%rcx)
  885. pshufd \$0x00,$xt3,$xt0 # "$xc0"
  886. pshufd \$0x55,$xt3,$xt1 # "$xc1"
  887. movdqa $xt0,0xc0-0x100(%rcx)
  888. pshufd \$0xaa,$xt3,$xt2 # "$xc2"
  889. movdqa $xt1,0xd0-0x100(%rcx)
  890. pshufd \$0xff,$xt3,$xt3 # "$xc3"
  891. movdqa $xt2,0xe0-0x100(%rcx)
  892. movdqa $xt3,0xf0-0x100(%rcx)
  893. pshufd \$0x00,$xd3,$xd0
  894. pshufd \$0x55,$xd3,$xd1
  895. paddd .Linc(%rip),$xd0 # don't save counters yet
  896. pshufd \$0xaa,$xd3,$xd2
  897. movdqa $xd1,0x110-0x100(%rcx)
  898. pshufd \$0xff,$xd3,$xd3
  899. movdqa $xd2,0x120-0x100(%rcx)
  900. movdqa $xd3,0x130-0x100(%rcx)
  901. jmp .Loop_enter4x
  902. .align 32
  903. .Loop_outer4x:
  904. movdqa 0x40(%rsp),$xa0 # re-load smashed key
  905. movdqa 0x50(%rsp),$xa1
  906. movdqa 0x60(%rsp),$xa2
  907. movdqa 0x70(%rsp),$xa3
  908. movdqa 0x80-0x100(%rcx),$xb0
  909. movdqa 0x90-0x100(%rcx),$xb1
  910. movdqa 0xa0-0x100(%rcx),$xb2
  911. movdqa 0xb0-0x100(%rcx),$xb3
  912. movdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
  913. movdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
  914. movdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
  915. movdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
  916. movdqa 0x100-0x100(%rcx),$xd0
  917. movdqa 0x110-0x100(%rcx),$xd1
  918. movdqa 0x120-0x100(%rcx),$xd2
  919. movdqa 0x130-0x100(%rcx),$xd3
  920. paddd .Lfour(%rip),$xd0 # next SIMD counters
  921. .Loop_enter4x:
  922. movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]"
  923. movdqa $xt3,0x30(%rsp) # SIMD equivalent of "@x[11]"
  924. movdqa (%r10),$xt3 # .Lrot16(%rip)
  925. mov \$10,%eax
  926. movdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
  927. jmp .Loop4x
  928. .align 32
  929. .Loop4x:
  930. ___
  931. foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
  932. foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
  933. $code.=<<___;
  934. dec %eax
  935. jnz .Loop4x
  936. paddd 0x40(%rsp),$xa0 # accumulate key material
  937. paddd 0x50(%rsp),$xa1
  938. paddd 0x60(%rsp),$xa2
  939. paddd 0x70(%rsp),$xa3
  940. movdqa $xa0,$xt2 # "de-interlace" data
  941. punpckldq $xa1,$xa0
  942. movdqa $xa2,$xt3
  943. punpckldq $xa3,$xa2
  944. punpckhdq $xa1,$xt2
  945. punpckhdq $xa3,$xt3
  946. movdqa $xa0,$xa1
  947. punpcklqdq $xa2,$xa0 # "a0"
  948. movdqa $xt2,$xa3
  949. punpcklqdq $xt3,$xt2 # "a2"
  950. punpckhqdq $xa2,$xa1 # "a1"
  951. punpckhqdq $xt3,$xa3 # "a3"
  952. ___
  953. ($xa2,$xt2)=($xt2,$xa2);
  954. $code.=<<___;
  955. paddd 0x80-0x100(%rcx),$xb0
  956. paddd 0x90-0x100(%rcx),$xb1
  957. paddd 0xa0-0x100(%rcx),$xb2
  958. paddd 0xb0-0x100(%rcx),$xb3
  959. movdqa $xa0,0x00(%rsp) # offload $xaN
  960. movdqa $xa1,0x10(%rsp)
  961. movdqa 0x20(%rsp),$xa0 # "xc2"
  962. movdqa 0x30(%rsp),$xa1 # "xc3"
  963. movdqa $xb0,$xt2
  964. punpckldq $xb1,$xb0
  965. movdqa $xb2,$xt3
  966. punpckldq $xb3,$xb2
  967. punpckhdq $xb1,$xt2
  968. punpckhdq $xb3,$xt3
  969. movdqa $xb0,$xb1
  970. punpcklqdq $xb2,$xb0 # "b0"
  971. movdqa $xt2,$xb3
  972. punpcklqdq $xt3,$xt2 # "b2"
  973. punpckhqdq $xb2,$xb1 # "b1"
  974. punpckhqdq $xt3,$xb3 # "b3"
  975. ___
  976. ($xb2,$xt2)=($xt2,$xb2);
  977. my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
  978. $code.=<<___;
  979. paddd 0xc0-0x100(%rcx),$xc0
  980. paddd 0xd0-0x100(%rcx),$xc1
  981. paddd 0xe0-0x100(%rcx),$xc2
  982. paddd 0xf0-0x100(%rcx),$xc3
  983. movdqa $xa2,0x20(%rsp) # keep offloading $xaN
  984. movdqa $xa3,0x30(%rsp)
  985. movdqa $xc0,$xt2
  986. punpckldq $xc1,$xc0
  987. movdqa $xc2,$xt3
  988. punpckldq $xc3,$xc2
  989. punpckhdq $xc1,$xt2
  990. punpckhdq $xc3,$xt3
  991. movdqa $xc0,$xc1
  992. punpcklqdq $xc2,$xc0 # "c0"
  993. movdqa $xt2,$xc3
  994. punpcklqdq $xt3,$xt2 # "c2"
  995. punpckhqdq $xc2,$xc1 # "c1"
  996. punpckhqdq $xt3,$xc3 # "c3"
  997. ___
  998. ($xc2,$xt2)=($xt2,$xc2);
  999. ($xt0,$xt1)=($xa2,$xa3); # use $xaN as temporary
  1000. $code.=<<___;
  1001. paddd 0x100-0x100(%rcx),$xd0
  1002. paddd 0x110-0x100(%rcx),$xd1
  1003. paddd 0x120-0x100(%rcx),$xd2
  1004. paddd 0x130-0x100(%rcx),$xd3
  1005. movdqa $xd0,$xt2
  1006. punpckldq $xd1,$xd0
  1007. movdqa $xd2,$xt3
  1008. punpckldq $xd3,$xd2
  1009. punpckhdq $xd1,$xt2
  1010. punpckhdq $xd3,$xt3
  1011. movdqa $xd0,$xd1
  1012. punpcklqdq $xd2,$xd0 # "d0"
  1013. movdqa $xt2,$xd3
  1014. punpcklqdq $xt3,$xt2 # "d2"
  1015. punpckhqdq $xd2,$xd1 # "d1"
  1016. punpckhqdq $xt3,$xd3 # "d3"
  1017. ___
  1018. ($xd2,$xt2)=($xt2,$xd2);
  1019. $code.=<<___;
  1020. cmp \$64*4,$len
  1021. jb .Ltail4x
  1022. movdqu 0x00($inp),$xt0 # xor with input
  1023. movdqu 0x10($inp),$xt1
  1024. movdqu 0x20($inp),$xt2
  1025. movdqu 0x30($inp),$xt3
  1026. pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
  1027. pxor $xb0,$xt1
  1028. pxor $xc0,$xt2
  1029. pxor $xd0,$xt3
  1030. movdqu $xt0,0x00($out)
  1031. movdqu 0x40($inp),$xt0
  1032. movdqu $xt1,0x10($out)
  1033. movdqu 0x50($inp),$xt1
  1034. movdqu $xt2,0x20($out)
  1035. movdqu 0x60($inp),$xt2
  1036. movdqu $xt3,0x30($out)
  1037. movdqu 0x70($inp),$xt3
  1038. lea 0x80($inp),$inp # size optimization
  1039. pxor 0x10(%rsp),$xt0
  1040. pxor $xb1,$xt1
  1041. pxor $xc1,$xt2
  1042. pxor $xd1,$xt3
  1043. movdqu $xt0,0x40($out)
  1044. movdqu 0x00($inp),$xt0
  1045. movdqu $xt1,0x50($out)
  1046. movdqu 0x10($inp),$xt1
  1047. movdqu $xt2,0x60($out)
  1048. movdqu 0x20($inp),$xt2
  1049. movdqu $xt3,0x70($out)
  1050. lea 0x80($out),$out # size optimization
  1051. movdqu 0x30($inp),$xt3
  1052. pxor 0x20(%rsp),$xt0
  1053. pxor $xb2,$xt1
  1054. pxor $xc2,$xt2
  1055. pxor $xd2,$xt3
  1056. movdqu $xt0,0x00($out)
  1057. movdqu 0x40($inp),$xt0
  1058. movdqu $xt1,0x10($out)
  1059. movdqu 0x50($inp),$xt1
  1060. movdqu $xt2,0x20($out)
  1061. movdqu 0x60($inp),$xt2
  1062. movdqu $xt3,0x30($out)
  1063. movdqu 0x70($inp),$xt3
  1064. lea 0x80($inp),$inp # inp+=64*4
  1065. pxor 0x30(%rsp),$xt0
  1066. pxor $xb3,$xt1
  1067. pxor $xc3,$xt2
  1068. pxor $xd3,$xt3
  1069. movdqu $xt0,0x40($out)
  1070. movdqu $xt1,0x50($out)
  1071. movdqu $xt2,0x60($out)
  1072. movdqu $xt3,0x70($out)
  1073. lea 0x80($out),$out # out+=64*4
  1074. sub \$64*4,$len
  1075. jnz .Loop_outer4x
  1076. jmp .Ldone4x
  1077. .Ltail4x:
  1078. cmp \$192,$len
  1079. jae .L192_or_more4x
  1080. cmp \$128,$len
  1081. jae .L128_or_more4x
  1082. cmp \$64,$len
  1083. jae .L64_or_more4x
  1084. #movdqa 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
  1085. xor %r10,%r10
  1086. #movdqa $xt0,0x00(%rsp)
  1087. movdqa $xb0,0x10(%rsp)
  1088. movdqa $xc0,0x20(%rsp)
  1089. movdqa $xd0,0x30(%rsp)
  1090. jmp .Loop_tail4x
  1091. .align 32
  1092. .L64_or_more4x:
  1093. movdqu 0x00($inp),$xt0 # xor with input
  1094. movdqu 0x10($inp),$xt1
  1095. movdqu 0x20($inp),$xt2
  1096. movdqu 0x30($inp),$xt3
  1097. pxor 0x00(%rsp),$xt0 # $xaxN is offloaded, remember?
  1098. pxor $xb0,$xt1
  1099. pxor $xc0,$xt2
  1100. pxor $xd0,$xt3
  1101. movdqu $xt0,0x00($out)
  1102. movdqu $xt1,0x10($out)
  1103. movdqu $xt2,0x20($out)
  1104. movdqu $xt3,0x30($out)
  1105. je .Ldone4x
  1106. movdqa 0x10(%rsp),$xt0 # $xaN is offloaded, remember?
  1107. lea 0x40($inp),$inp # inp+=64*1
  1108. xor %r10,%r10
  1109. movdqa $xt0,0x00(%rsp)
  1110. movdqa $xb1,0x10(%rsp)
  1111. lea 0x40($out),$out # out+=64*1
  1112. movdqa $xc1,0x20(%rsp)
  1113. sub \$64,$len # len-=64*1
  1114. movdqa $xd1,0x30(%rsp)
  1115. jmp .Loop_tail4x
  1116. .align 32
  1117. .L128_or_more4x:
  1118. movdqu 0x00($inp),$xt0 # xor with input
  1119. movdqu 0x10($inp),$xt1
  1120. movdqu 0x20($inp),$xt2
  1121. movdqu 0x30($inp),$xt3
  1122. pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
  1123. pxor $xb0,$xt1
  1124. pxor $xc0,$xt2
  1125. pxor $xd0,$xt3
  1126. movdqu $xt0,0x00($out)
  1127. movdqu 0x40($inp),$xt0
  1128. movdqu $xt1,0x10($out)
  1129. movdqu 0x50($inp),$xt1
  1130. movdqu $xt2,0x20($out)
  1131. movdqu 0x60($inp),$xt2
  1132. movdqu $xt3,0x30($out)
  1133. movdqu 0x70($inp),$xt3
  1134. pxor 0x10(%rsp),$xt0
  1135. pxor $xb1,$xt1
  1136. pxor $xc1,$xt2
  1137. pxor $xd1,$xt3
  1138. movdqu $xt0,0x40($out)
  1139. movdqu $xt1,0x50($out)
  1140. movdqu $xt2,0x60($out)
  1141. movdqu $xt3,0x70($out)
  1142. je .Ldone4x
  1143. movdqa 0x20(%rsp),$xt0 # $xaN is offloaded, remember?
  1144. lea 0x80($inp),$inp # inp+=64*2
  1145. xor %r10,%r10
  1146. movdqa $xt0,0x00(%rsp)
  1147. movdqa $xb2,0x10(%rsp)
  1148. lea 0x80($out),$out # out+=64*2
  1149. movdqa $xc2,0x20(%rsp)
  1150. sub \$128,$len # len-=64*2
  1151. movdqa $xd2,0x30(%rsp)
  1152. jmp .Loop_tail4x
  1153. .align 32
  1154. .L192_or_more4x:
  1155. movdqu 0x00($inp),$xt0 # xor with input
  1156. movdqu 0x10($inp),$xt1
  1157. movdqu 0x20($inp),$xt2
  1158. movdqu 0x30($inp),$xt3
  1159. pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
  1160. pxor $xb0,$xt1
  1161. pxor $xc0,$xt2
  1162. pxor $xd0,$xt3
  1163. movdqu $xt0,0x00($out)
  1164. movdqu 0x40($inp),$xt0
  1165. movdqu $xt1,0x10($out)
  1166. movdqu 0x50($inp),$xt1
  1167. movdqu $xt2,0x20($out)
  1168. movdqu 0x60($inp),$xt2
  1169. movdqu $xt3,0x30($out)
  1170. movdqu 0x70($inp),$xt3
  1171. lea 0x80($inp),$inp # size optimization
  1172. pxor 0x10(%rsp),$xt0
  1173. pxor $xb1,$xt1
  1174. pxor $xc1,$xt2
  1175. pxor $xd1,$xt3
  1176. movdqu $xt0,0x40($out)
  1177. movdqu 0x00($inp),$xt0
  1178. movdqu $xt1,0x50($out)
  1179. movdqu 0x10($inp),$xt1
  1180. movdqu $xt2,0x60($out)
  1181. movdqu 0x20($inp),$xt2
  1182. movdqu $xt3,0x70($out)
  1183. lea 0x80($out),$out # size optimization
  1184. movdqu 0x30($inp),$xt3
  1185. pxor 0x20(%rsp),$xt0
  1186. pxor $xb2,$xt1
  1187. pxor $xc2,$xt2
  1188. pxor $xd2,$xt3
  1189. movdqu $xt0,0x00($out)
  1190. movdqu $xt1,0x10($out)
  1191. movdqu $xt2,0x20($out)
  1192. movdqu $xt3,0x30($out)
  1193. je .Ldone4x
  1194. movdqa 0x30(%rsp),$xt0 # $xaN is offloaded, remember?
  1195. lea 0x40($inp),$inp # inp+=64*3
  1196. xor %r10,%r10
  1197. movdqa $xt0,0x00(%rsp)
  1198. movdqa $xb3,0x10(%rsp)
  1199. lea 0x40($out),$out # out+=64*3
  1200. movdqa $xc3,0x20(%rsp)
  1201. sub \$192,$len # len-=64*3
  1202. movdqa $xd3,0x30(%rsp)
  1203. .Loop_tail4x:
  1204. movzb ($inp,%r10),%eax
  1205. movzb (%rsp,%r10),%ecx
  1206. lea 1(%r10),%r10
  1207. xor %ecx,%eax
  1208. mov %al,-1($out,%r10)
  1209. dec $len
  1210. jnz .Loop_tail4x
  1211. .Ldone4x:
  1212. ___
  1213. $code.=<<___ if ($win64);
  1214. movaps -0xa8(%r9),%xmm6
  1215. movaps -0x98(%r9),%xmm7
  1216. movaps -0x88(%r9),%xmm8
  1217. movaps -0x78(%r9),%xmm9
  1218. movaps -0x68(%r9),%xmm10
  1219. movaps -0x58(%r9),%xmm11
  1220. movaps -0x48(%r9),%xmm12
  1221. movaps -0x38(%r9),%xmm13
  1222. movaps -0x28(%r9),%xmm14
  1223. movaps -0x18(%r9),%xmm15
  1224. ___
  1225. $code.=<<___;
  1226. lea (%r9),%rsp
  1227. .cfi_def_cfa_register %rsp
  1228. .L4x_epilogue:
  1229. ret
  1230. .cfi_endproc
  1231. .size ChaCha20_4x,.-ChaCha20_4x
  1232. ___
  1233. }
  1234. ########################################################################
  1235. # XOP code path that handles all lengths.
  1236. if ($avx) {
  1237. # There is some "anomaly" observed depending on instructions' size or
  1238. # alignment. If you look closely at below code you'll notice that
  1239. # sometimes argument order varies. The order affects instruction
  1240. # encoding by making it larger, and such fiddling gives 5% performance
  1241. # improvement. This is on FX-4100...
  1242. my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
  1243. $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15));
  1244. my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  1245. $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3);
  1246. sub XOP_lane_ROUND {
  1247. my ($a0,$b0,$c0,$d0)=@_;
  1248. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  1249. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  1250. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  1251. my @x=map("\"$_\"",@xx);
  1252. (
  1253. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
  1254. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
  1255. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
  1256. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
  1257. "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
  1258. "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
  1259. "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
  1260. "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
  1261. "&vprotd (@x[$d0],@x[$d0],16)",
  1262. "&vprotd (@x[$d1],@x[$d1],16)",
  1263. "&vprotd (@x[$d2],@x[$d2],16)",
  1264. "&vprotd (@x[$d3],@x[$d3],16)",
  1265. "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
  1266. "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
  1267. "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
  1268. "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
  1269. "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
  1270. "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
  1271. "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
  1272. "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
  1273. "&vprotd (@x[$b0],@x[$b0],12)",
  1274. "&vprotd (@x[$b1],@x[$b1],12)",
  1275. "&vprotd (@x[$b2],@x[$b2],12)",
  1276. "&vprotd (@x[$b3],@x[$b3],12)",
  1277. "&vpaddd (@x[$a0],@x[$b0],@x[$a0])", # flip
  1278. "&vpaddd (@x[$a1],@x[$b1],@x[$a1])", # flip
  1279. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
  1280. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
  1281. "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
  1282. "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
  1283. "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
  1284. "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
  1285. "&vprotd (@x[$d0],@x[$d0],8)",
  1286. "&vprotd (@x[$d1],@x[$d1],8)",
  1287. "&vprotd (@x[$d2],@x[$d2],8)",
  1288. "&vprotd (@x[$d3],@x[$d3],8)",
  1289. "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
  1290. "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
  1291. "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
  1292. "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
  1293. "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
  1294. "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
  1295. "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
  1296. "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
  1297. "&vprotd (@x[$b0],@x[$b0],7)",
  1298. "&vprotd (@x[$b1],@x[$b1],7)",
  1299. "&vprotd (@x[$b2],@x[$b2],7)",
  1300. "&vprotd (@x[$b3],@x[$b3],7)"
  1301. );
  1302. }
  1303. my $xframe = $win64 ? 0xa8 : 8;
  1304. $code.=<<___;
  1305. .type ChaCha20_4xop,\@function,5
  1306. .align 32
  1307. ChaCha20_4xop:
  1308. .cfi_startproc
  1309. .LChaCha20_4xop:
  1310. mov %rsp,%r9 # frame pointer
  1311. .cfi_def_cfa_register %r9
  1312. sub \$0x140+$xframe,%rsp
  1313. ___
  1314. ################ stack layout
  1315. # +0x00 SIMD equivalent of @x[8-12]
  1316. # ...
  1317. # +0x40 constant copy of key[0-2] smashed by lanes
  1318. # ...
  1319. # +0x100 SIMD counters (with nonce smashed by lanes)
  1320. # ...
  1321. # +0x140
  1322. $code.=<<___ if ($win64);
  1323. movaps %xmm6,-0xa8(%r9)
  1324. movaps %xmm7,-0x98(%r9)
  1325. movaps %xmm8,-0x88(%r9)
  1326. movaps %xmm9,-0x78(%r9)
  1327. movaps %xmm10,-0x68(%r9)
  1328. movaps %xmm11,-0x58(%r9)
  1329. movaps %xmm12,-0x48(%r9)
  1330. movaps %xmm13,-0x38(%r9)
  1331. movaps %xmm14,-0x28(%r9)
  1332. movaps %xmm15,-0x18(%r9)
  1333. .L4xop_body:
  1334. ___
  1335. $code.=<<___;
  1336. vzeroupper
  1337. vmovdqa .Lsigma(%rip),$xa3 # key[0]
  1338. vmovdqu ($key),$xb3 # key[1]
  1339. vmovdqu 16($key),$xt3 # key[2]
  1340. vmovdqu ($counter),$xd3 # key[3]
  1341. lea 0x100(%rsp),%rcx # size optimization
  1342. vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  1343. vpshufd \$0x55,$xa3,$xa1
  1344. vmovdqa $xa0,0x40(%rsp) # ... and offload
  1345. vpshufd \$0xaa,$xa3,$xa2
  1346. vmovdqa $xa1,0x50(%rsp)
  1347. vpshufd \$0xff,$xa3,$xa3
  1348. vmovdqa $xa2,0x60(%rsp)
  1349. vmovdqa $xa3,0x70(%rsp)
  1350. vpshufd \$0x00,$xb3,$xb0
  1351. vpshufd \$0x55,$xb3,$xb1
  1352. vmovdqa $xb0,0x80-0x100(%rcx)
  1353. vpshufd \$0xaa,$xb3,$xb2
  1354. vmovdqa $xb1,0x90-0x100(%rcx)
  1355. vpshufd \$0xff,$xb3,$xb3
  1356. vmovdqa $xb2,0xa0-0x100(%rcx)
  1357. vmovdqa $xb3,0xb0-0x100(%rcx)
  1358. vpshufd \$0x00,$xt3,$xt0 # "$xc0"
  1359. vpshufd \$0x55,$xt3,$xt1 # "$xc1"
  1360. vmovdqa $xt0,0xc0-0x100(%rcx)
  1361. vpshufd \$0xaa,$xt3,$xt2 # "$xc2"
  1362. vmovdqa $xt1,0xd0-0x100(%rcx)
  1363. vpshufd \$0xff,$xt3,$xt3 # "$xc3"
  1364. vmovdqa $xt2,0xe0-0x100(%rcx)
  1365. vmovdqa $xt3,0xf0-0x100(%rcx)
  1366. vpshufd \$0x00,$xd3,$xd0
  1367. vpshufd \$0x55,$xd3,$xd1
  1368. vpaddd .Linc(%rip),$xd0,$xd0 # don't save counters yet
  1369. vpshufd \$0xaa,$xd3,$xd2
  1370. vmovdqa $xd1,0x110-0x100(%rcx)
  1371. vpshufd \$0xff,$xd3,$xd3
  1372. vmovdqa $xd2,0x120-0x100(%rcx)
  1373. vmovdqa $xd3,0x130-0x100(%rcx)
  1374. jmp .Loop_enter4xop
  1375. .align 32
  1376. .Loop_outer4xop:
  1377. vmovdqa 0x40(%rsp),$xa0 # re-load smashed key
  1378. vmovdqa 0x50(%rsp),$xa1
  1379. vmovdqa 0x60(%rsp),$xa2
  1380. vmovdqa 0x70(%rsp),$xa3
  1381. vmovdqa 0x80-0x100(%rcx),$xb0
  1382. vmovdqa 0x90-0x100(%rcx),$xb1
  1383. vmovdqa 0xa0-0x100(%rcx),$xb2
  1384. vmovdqa 0xb0-0x100(%rcx),$xb3
  1385. vmovdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
  1386. vmovdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
  1387. vmovdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
  1388. vmovdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
  1389. vmovdqa 0x100-0x100(%rcx),$xd0
  1390. vmovdqa 0x110-0x100(%rcx),$xd1
  1391. vmovdqa 0x120-0x100(%rcx),$xd2
  1392. vmovdqa 0x130-0x100(%rcx),$xd3
  1393. vpaddd .Lfour(%rip),$xd0,$xd0 # next SIMD counters
  1394. .Loop_enter4xop:
  1395. mov \$10,%eax
  1396. vmovdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
  1397. jmp .Loop4xop
  1398. .align 32
  1399. .Loop4xop:
  1400. ___
  1401. foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; }
  1402. foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; }
  1403. $code.=<<___;
  1404. dec %eax
  1405. jnz .Loop4xop
  1406. vpaddd 0x40(%rsp),$xa0,$xa0 # accumulate key material
  1407. vpaddd 0x50(%rsp),$xa1,$xa1
  1408. vpaddd 0x60(%rsp),$xa2,$xa2
  1409. vpaddd 0x70(%rsp),$xa3,$xa3
  1410. vmovdqa $xt2,0x20(%rsp) # offload $xc2,3
  1411. vmovdqa $xt3,0x30(%rsp)
  1412. vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
  1413. vpunpckldq $xa3,$xa2,$xt3
  1414. vpunpckhdq $xa1,$xa0,$xa0
  1415. vpunpckhdq $xa3,$xa2,$xa2
  1416. vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
  1417. vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
  1418. vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
  1419. vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
  1420. ___
  1421. ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
  1422. $code.=<<___;
  1423. vpaddd 0x80-0x100(%rcx),$xb0,$xb0
  1424. vpaddd 0x90-0x100(%rcx),$xb1,$xb1
  1425. vpaddd 0xa0-0x100(%rcx),$xb2,$xb2
  1426. vpaddd 0xb0-0x100(%rcx),$xb3,$xb3
  1427. vmovdqa $xa0,0x00(%rsp) # offload $xa0,1
  1428. vmovdqa $xa1,0x10(%rsp)
  1429. vmovdqa 0x20(%rsp),$xa0 # "xc2"
  1430. vmovdqa 0x30(%rsp),$xa1 # "xc3"
  1431. vpunpckldq $xb1,$xb0,$xt2
  1432. vpunpckldq $xb3,$xb2,$xt3
  1433. vpunpckhdq $xb1,$xb0,$xb0
  1434. vpunpckhdq $xb3,$xb2,$xb2
  1435. vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
  1436. vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
  1437. vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
  1438. vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
  1439. ___
  1440. ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
  1441. my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
  1442. $code.=<<___;
  1443. vpaddd 0xc0-0x100(%rcx),$xc0,$xc0
  1444. vpaddd 0xd0-0x100(%rcx),$xc1,$xc1
  1445. vpaddd 0xe0-0x100(%rcx),$xc2,$xc2
  1446. vpaddd 0xf0-0x100(%rcx),$xc3,$xc3
  1447. vpunpckldq $xc1,$xc0,$xt2
  1448. vpunpckldq $xc3,$xc2,$xt3
  1449. vpunpckhdq $xc1,$xc0,$xc0
  1450. vpunpckhdq $xc3,$xc2,$xc2
  1451. vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
  1452. vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
  1453. vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
  1454. vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
  1455. ___
  1456. ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
  1457. $code.=<<___;
  1458. vpaddd 0x100-0x100(%rcx),$xd0,$xd0
  1459. vpaddd 0x110-0x100(%rcx),$xd1,$xd1
  1460. vpaddd 0x120-0x100(%rcx),$xd2,$xd2
  1461. vpaddd 0x130-0x100(%rcx),$xd3,$xd3
  1462. vpunpckldq $xd1,$xd0,$xt2
  1463. vpunpckldq $xd3,$xd2,$xt3
  1464. vpunpckhdq $xd1,$xd0,$xd0
  1465. vpunpckhdq $xd3,$xd2,$xd2
  1466. vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
  1467. vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
  1468. vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
  1469. vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
  1470. ___
  1471. ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
  1472. ($xa0,$xa1)=($xt2,$xt3);
  1473. $code.=<<___;
  1474. vmovdqa 0x00(%rsp),$xa0 # restore $xa0,1
  1475. vmovdqa 0x10(%rsp),$xa1
  1476. cmp \$64*4,$len
  1477. jb .Ltail4xop
  1478. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1479. vpxor 0x10($inp),$xb0,$xb0
  1480. vpxor 0x20($inp),$xc0,$xc0
  1481. vpxor 0x30($inp),$xd0,$xd0
  1482. vpxor 0x40($inp),$xa1,$xa1
  1483. vpxor 0x50($inp),$xb1,$xb1
  1484. vpxor 0x60($inp),$xc1,$xc1
  1485. vpxor 0x70($inp),$xd1,$xd1
  1486. lea 0x80($inp),$inp # size optimization
  1487. vpxor 0x00($inp),$xa2,$xa2
  1488. vpxor 0x10($inp),$xb2,$xb2
  1489. vpxor 0x20($inp),$xc2,$xc2
  1490. vpxor 0x30($inp),$xd2,$xd2
  1491. vpxor 0x40($inp),$xa3,$xa3
  1492. vpxor 0x50($inp),$xb3,$xb3
  1493. vpxor 0x60($inp),$xc3,$xc3
  1494. vpxor 0x70($inp),$xd3,$xd3
  1495. lea 0x80($inp),$inp # inp+=64*4
  1496. vmovdqu $xa0,0x00($out)
  1497. vmovdqu $xb0,0x10($out)
  1498. vmovdqu $xc0,0x20($out)
  1499. vmovdqu $xd0,0x30($out)
  1500. vmovdqu $xa1,0x40($out)
  1501. vmovdqu $xb1,0x50($out)
  1502. vmovdqu $xc1,0x60($out)
  1503. vmovdqu $xd1,0x70($out)
  1504. lea 0x80($out),$out # size optimization
  1505. vmovdqu $xa2,0x00($out)
  1506. vmovdqu $xb2,0x10($out)
  1507. vmovdqu $xc2,0x20($out)
  1508. vmovdqu $xd2,0x30($out)
  1509. vmovdqu $xa3,0x40($out)
  1510. vmovdqu $xb3,0x50($out)
  1511. vmovdqu $xc3,0x60($out)
  1512. vmovdqu $xd3,0x70($out)
  1513. lea 0x80($out),$out # out+=64*4
  1514. sub \$64*4,$len
  1515. jnz .Loop_outer4xop
  1516. jmp .Ldone4xop
  1517. .align 32
  1518. .Ltail4xop:
  1519. cmp \$192,$len
  1520. jae .L192_or_more4xop
  1521. cmp \$128,$len
  1522. jae .L128_or_more4xop
  1523. cmp \$64,$len
  1524. jae .L64_or_more4xop
  1525. xor %r10,%r10
  1526. vmovdqa $xa0,0x00(%rsp)
  1527. vmovdqa $xb0,0x10(%rsp)
  1528. vmovdqa $xc0,0x20(%rsp)
  1529. vmovdqa $xd0,0x30(%rsp)
  1530. jmp .Loop_tail4xop
  1531. .align 32
  1532. .L64_or_more4xop:
  1533. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1534. vpxor 0x10($inp),$xb0,$xb0
  1535. vpxor 0x20($inp),$xc0,$xc0
  1536. vpxor 0x30($inp),$xd0,$xd0
  1537. vmovdqu $xa0,0x00($out)
  1538. vmovdqu $xb0,0x10($out)
  1539. vmovdqu $xc0,0x20($out)
  1540. vmovdqu $xd0,0x30($out)
  1541. je .Ldone4xop
  1542. lea 0x40($inp),$inp # inp+=64*1
  1543. vmovdqa $xa1,0x00(%rsp)
  1544. xor %r10,%r10
  1545. vmovdqa $xb1,0x10(%rsp)
  1546. lea 0x40($out),$out # out+=64*1
  1547. vmovdqa $xc1,0x20(%rsp)
  1548. sub \$64,$len # len-=64*1
  1549. vmovdqa $xd1,0x30(%rsp)
  1550. jmp .Loop_tail4xop
  1551. .align 32
  1552. .L128_or_more4xop:
  1553. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1554. vpxor 0x10($inp),$xb0,$xb0
  1555. vpxor 0x20($inp),$xc0,$xc0
  1556. vpxor 0x30($inp),$xd0,$xd0
  1557. vpxor 0x40($inp),$xa1,$xa1
  1558. vpxor 0x50($inp),$xb1,$xb1
  1559. vpxor 0x60($inp),$xc1,$xc1
  1560. vpxor 0x70($inp),$xd1,$xd1
  1561. vmovdqu $xa0,0x00($out)
  1562. vmovdqu $xb0,0x10($out)
  1563. vmovdqu $xc0,0x20($out)
  1564. vmovdqu $xd0,0x30($out)
  1565. vmovdqu $xa1,0x40($out)
  1566. vmovdqu $xb1,0x50($out)
  1567. vmovdqu $xc1,0x60($out)
  1568. vmovdqu $xd1,0x70($out)
  1569. je .Ldone4xop
  1570. lea 0x80($inp),$inp # inp+=64*2
  1571. vmovdqa $xa2,0x00(%rsp)
  1572. xor %r10,%r10
  1573. vmovdqa $xb2,0x10(%rsp)
  1574. lea 0x80($out),$out # out+=64*2
  1575. vmovdqa $xc2,0x20(%rsp)
  1576. sub \$128,$len # len-=64*2
  1577. vmovdqa $xd2,0x30(%rsp)
  1578. jmp .Loop_tail4xop
  1579. .align 32
  1580. .L192_or_more4xop:
  1581. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1582. vpxor 0x10($inp),$xb0,$xb0
  1583. vpxor 0x20($inp),$xc0,$xc0
  1584. vpxor 0x30($inp),$xd0,$xd0
  1585. vpxor 0x40($inp),$xa1,$xa1
  1586. vpxor 0x50($inp),$xb1,$xb1
  1587. vpxor 0x60($inp),$xc1,$xc1
  1588. vpxor 0x70($inp),$xd1,$xd1
  1589. lea 0x80($inp),$inp # size optimization
  1590. vpxor 0x00($inp),$xa2,$xa2
  1591. vpxor 0x10($inp),$xb2,$xb2
  1592. vpxor 0x20($inp),$xc2,$xc2
  1593. vpxor 0x30($inp),$xd2,$xd2
  1594. vmovdqu $xa0,0x00($out)
  1595. vmovdqu $xb0,0x10($out)
  1596. vmovdqu $xc0,0x20($out)
  1597. vmovdqu $xd0,0x30($out)
  1598. vmovdqu $xa1,0x40($out)
  1599. vmovdqu $xb1,0x50($out)
  1600. vmovdqu $xc1,0x60($out)
  1601. vmovdqu $xd1,0x70($out)
  1602. lea 0x80($out),$out # size optimization
  1603. vmovdqu $xa2,0x00($out)
  1604. vmovdqu $xb2,0x10($out)
  1605. vmovdqu $xc2,0x20($out)
  1606. vmovdqu $xd2,0x30($out)
  1607. je .Ldone4xop
  1608. lea 0x40($inp),$inp # inp+=64*3
  1609. vmovdqa $xa3,0x00(%rsp)
  1610. xor %r10,%r10
  1611. vmovdqa $xb3,0x10(%rsp)
  1612. lea 0x40($out),$out # out+=64*3
  1613. vmovdqa $xc3,0x20(%rsp)
  1614. sub \$192,$len # len-=64*3
  1615. vmovdqa $xd3,0x30(%rsp)
  1616. .Loop_tail4xop:
  1617. movzb ($inp,%r10),%eax
  1618. movzb (%rsp,%r10),%ecx
  1619. lea 1(%r10),%r10
  1620. xor %ecx,%eax
  1621. mov %al,-1($out,%r10)
  1622. dec $len
  1623. jnz .Loop_tail4xop
  1624. .Ldone4xop:
  1625. vzeroupper
  1626. ___
  1627. $code.=<<___ if ($win64);
  1628. movaps -0xa8(%r9),%xmm6
  1629. movaps -0x98(%r9),%xmm7
  1630. movaps -0x88(%r9),%xmm8
  1631. movaps -0x78(%r9),%xmm9
  1632. movaps -0x68(%r9),%xmm10
  1633. movaps -0x58(%r9),%xmm11
  1634. movaps -0x48(%r9),%xmm12
  1635. movaps -0x38(%r9),%xmm13
  1636. movaps -0x28(%r9),%xmm14
  1637. movaps -0x18(%r9),%xmm15
  1638. ___
  1639. $code.=<<___;
  1640. lea (%r9),%rsp
  1641. .cfi_def_cfa_register %rsp
  1642. .L4xop_epilogue:
  1643. ret
  1644. .cfi_endproc
  1645. .size ChaCha20_4xop,.-ChaCha20_4xop
  1646. ___
  1647. }
  1648. ########################################################################
  1649. # AVX2 code path
  1650. if ($avx>1) {
  1651. my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
  1652. $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
  1653. my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  1654. "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
  1655. sub AVX2_lane_ROUND {
  1656. my ($a0,$b0,$c0,$d0)=@_;
  1657. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  1658. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  1659. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  1660. my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
  1661. my @x=map("\"$_\"",@xx);
  1662. # Consider order in which variables are addressed by their
  1663. # index:
  1664. #
  1665. # a b c d
  1666. #
  1667. # 0 4 8 12 < even round
  1668. # 1 5 9 13
  1669. # 2 6 10 14
  1670. # 3 7 11 15
  1671. # 0 5 10 15 < odd round
  1672. # 1 6 11 12
  1673. # 2 7 8 13
  1674. # 3 4 9 14
  1675. #
  1676. # 'a', 'b' and 'd's are permanently allocated in registers,
  1677. # @x[0..7,12..15], while 'c's are maintained in memory. If
  1678. # you observe 'c' column, you'll notice that pair of 'c's is
  1679. # invariant between rounds. This means that we have to reload
  1680. # them once per round, in the middle. This is why you'll see
  1681. # bunch of 'c' stores and loads in the middle, but none in
  1682. # the beginning or end.
  1683. (
  1684. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
  1685. "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
  1686. "&vpshufb (@x[$d0],@x[$d0],$t1)",
  1687. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
  1688. "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
  1689. "&vpshufb (@x[$d1],@x[$d1],$t1)",
  1690. "&vpaddd ($xc,$xc,@x[$d0])",
  1691. "&vpxor (@x[$b0],$xc,@x[$b0])",
  1692. "&vpslld ($t0,@x[$b0],12)",
  1693. "&vpsrld (@x[$b0],@x[$b0],20)",
  1694. "&vpor (@x[$b0],$t0,@x[$b0])",
  1695. "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
  1696. "&vpaddd ($xc_,$xc_,@x[$d1])",
  1697. "&vpxor (@x[$b1],$xc_,@x[$b1])",
  1698. "&vpslld ($t1,@x[$b1],12)",
  1699. "&vpsrld (@x[$b1],@x[$b1],20)",
  1700. "&vpor (@x[$b1],$t1,@x[$b1])",
  1701. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
  1702. "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
  1703. "&vpshufb (@x[$d0],@x[$d0],$t0)",
  1704. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
  1705. "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
  1706. "&vpshufb (@x[$d1],@x[$d1],$t0)",
  1707. "&vpaddd ($xc,$xc,@x[$d0])",
  1708. "&vpxor (@x[$b0],$xc,@x[$b0])",
  1709. "&vpslld ($t1,@x[$b0],7)",
  1710. "&vpsrld (@x[$b0],@x[$b0],25)",
  1711. "&vpor (@x[$b0],$t1,@x[$b0])",
  1712. "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
  1713. "&vpaddd ($xc_,$xc_,@x[$d1])",
  1714. "&vpxor (@x[$b1],$xc_,@x[$b1])",
  1715. "&vpslld ($t0,@x[$b1],7)",
  1716. "&vpsrld (@x[$b1],@x[$b1],25)",
  1717. "&vpor (@x[$b1],$t0,@x[$b1])",
  1718. "&vmovdqa (\"`32*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
  1719. "&vmovdqa (\"`32*($c1-8)`(%rsp)\",$xc_)",
  1720. "&vmovdqa ($xc,\"`32*($c2-8)`(%rsp)\")",
  1721. "&vmovdqa ($xc_,\"`32*($c3-8)`(%rsp)\")",
  1722. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
  1723. "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
  1724. "&vpshufb (@x[$d2],@x[$d2],$t1)",
  1725. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
  1726. "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
  1727. "&vpshufb (@x[$d3],@x[$d3],$t1)",
  1728. "&vpaddd ($xc,$xc,@x[$d2])",
  1729. "&vpxor (@x[$b2],$xc,@x[$b2])",
  1730. "&vpslld ($t0,@x[$b2],12)",
  1731. "&vpsrld (@x[$b2],@x[$b2],20)",
  1732. "&vpor (@x[$b2],$t0,@x[$b2])",
  1733. "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
  1734. "&vpaddd ($xc_,$xc_,@x[$d3])",
  1735. "&vpxor (@x[$b3],$xc_,@x[$b3])",
  1736. "&vpslld ($t1,@x[$b3],12)",
  1737. "&vpsrld (@x[$b3],@x[$b3],20)",
  1738. "&vpor (@x[$b3],$t1,@x[$b3])",
  1739. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
  1740. "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
  1741. "&vpshufb (@x[$d2],@x[$d2],$t0)",
  1742. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
  1743. "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
  1744. "&vpshufb (@x[$d3],@x[$d3],$t0)",
  1745. "&vpaddd ($xc,$xc,@x[$d2])",
  1746. "&vpxor (@x[$b2],$xc,@x[$b2])",
  1747. "&vpslld ($t1,@x[$b2],7)",
  1748. "&vpsrld (@x[$b2],@x[$b2],25)",
  1749. "&vpor (@x[$b2],$t1,@x[$b2])",
  1750. "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
  1751. "&vpaddd ($xc_,$xc_,@x[$d3])",
  1752. "&vpxor (@x[$b3],$xc_,@x[$b3])",
  1753. "&vpslld ($t0,@x[$b3],7)",
  1754. "&vpsrld (@x[$b3],@x[$b3],25)",
  1755. "&vpor (@x[$b3],$t0,@x[$b3])"
  1756. );
  1757. }
  1758. my $xframe = $win64 ? 0xa8 : 8;
  1759. $code.=<<___;
  1760. .type ChaCha20_8x,\@function,5
  1761. .align 32
  1762. ChaCha20_8x:
  1763. .cfi_startproc
  1764. .LChaCha20_8x:
  1765. mov %rsp,%r9 # frame register
  1766. .cfi_def_cfa_register %r9
  1767. sub \$0x280+$xframe,%rsp
  1768. and \$-32,%rsp
  1769. ___
  1770. $code.=<<___ if ($win64);
  1771. movaps %xmm6,-0xa8(%r9)
  1772. movaps %xmm7,-0x98(%r9)
  1773. movaps %xmm8,-0x88(%r9)
  1774. movaps %xmm9,-0x78(%r9)
  1775. movaps %xmm10,-0x68(%r9)
  1776. movaps %xmm11,-0x58(%r9)
  1777. movaps %xmm12,-0x48(%r9)
  1778. movaps %xmm13,-0x38(%r9)
  1779. movaps %xmm14,-0x28(%r9)
  1780. movaps %xmm15,-0x18(%r9)
  1781. .L8x_body:
  1782. ___
  1783. $code.=<<___;
  1784. vzeroupper
  1785. ################ stack layout
  1786. # +0x00 SIMD equivalent of @x[8-12]
  1787. # ...
  1788. # +0x80 constant copy of key[0-2] smashed by lanes
  1789. # ...
  1790. # +0x200 SIMD counters (with nonce smashed by lanes)
  1791. # ...
  1792. # +0x280
  1793. vbroadcasti128 .Lsigma(%rip),$xa3 # key[0]
  1794. vbroadcasti128 ($key),$xb3 # key[1]
  1795. vbroadcasti128 16($key),$xt3 # key[2]
  1796. vbroadcasti128 ($counter),$xd3 # key[3]
  1797. lea 0x100(%rsp),%rcx # size optimization
  1798. lea 0x200(%rsp),%rax # size optimization
  1799. lea .Lrot16(%rip),%r10
  1800. lea .Lrot24(%rip),%r11
  1801. vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  1802. vpshufd \$0x55,$xa3,$xa1
  1803. vmovdqa $xa0,0x80-0x100(%rcx) # ... and offload
  1804. vpshufd \$0xaa,$xa3,$xa2
  1805. vmovdqa $xa1,0xa0-0x100(%rcx)
  1806. vpshufd \$0xff,$xa3,$xa3
  1807. vmovdqa $xa2,0xc0-0x100(%rcx)
  1808. vmovdqa $xa3,0xe0-0x100(%rcx)
  1809. vpshufd \$0x00,$xb3,$xb0
  1810. vpshufd \$0x55,$xb3,$xb1
  1811. vmovdqa $xb0,0x100-0x100(%rcx)
  1812. vpshufd \$0xaa,$xb3,$xb2
  1813. vmovdqa $xb1,0x120-0x100(%rcx)
  1814. vpshufd \$0xff,$xb3,$xb3
  1815. vmovdqa $xb2,0x140-0x100(%rcx)
  1816. vmovdqa $xb3,0x160-0x100(%rcx)
  1817. vpshufd \$0x00,$xt3,$xt0 # "xc0"
  1818. vpshufd \$0x55,$xt3,$xt1 # "xc1"
  1819. vmovdqa $xt0,0x180-0x200(%rax)
  1820. vpshufd \$0xaa,$xt3,$xt2 # "xc2"
  1821. vmovdqa $xt1,0x1a0-0x200(%rax)
  1822. vpshufd \$0xff,$xt3,$xt3 # "xc3"
  1823. vmovdqa $xt2,0x1c0-0x200(%rax)
  1824. vmovdqa $xt3,0x1e0-0x200(%rax)
  1825. vpshufd \$0x00,$xd3,$xd0
  1826. vpshufd \$0x55,$xd3,$xd1
  1827. vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet
  1828. vpshufd \$0xaa,$xd3,$xd2
  1829. vmovdqa $xd1,0x220-0x200(%rax)
  1830. vpshufd \$0xff,$xd3,$xd3
  1831. vmovdqa $xd2,0x240-0x200(%rax)
  1832. vmovdqa $xd3,0x260-0x200(%rax)
  1833. jmp .Loop_enter8x
  1834. .align 32
  1835. .Loop_outer8x:
  1836. vmovdqa 0x80-0x100(%rcx),$xa0 # re-load smashed key
  1837. vmovdqa 0xa0-0x100(%rcx),$xa1
  1838. vmovdqa 0xc0-0x100(%rcx),$xa2
  1839. vmovdqa 0xe0-0x100(%rcx),$xa3
  1840. vmovdqa 0x100-0x100(%rcx),$xb0
  1841. vmovdqa 0x120-0x100(%rcx),$xb1
  1842. vmovdqa 0x140-0x100(%rcx),$xb2
  1843. vmovdqa 0x160-0x100(%rcx),$xb3
  1844. vmovdqa 0x180-0x200(%rax),$xt0 # "xc0"
  1845. vmovdqa 0x1a0-0x200(%rax),$xt1 # "xc1"
  1846. vmovdqa 0x1c0-0x200(%rax),$xt2 # "xc2"
  1847. vmovdqa 0x1e0-0x200(%rax),$xt3 # "xc3"
  1848. vmovdqa 0x200-0x200(%rax),$xd0
  1849. vmovdqa 0x220-0x200(%rax),$xd1
  1850. vmovdqa 0x240-0x200(%rax),$xd2
  1851. vmovdqa 0x260-0x200(%rax),$xd3
  1852. vpaddd .Leight(%rip),$xd0,$xd0 # next SIMD counters
  1853. .Loop_enter8x:
  1854. vmovdqa $xt2,0x40(%rsp) # SIMD equivalent of "@x[10]"
  1855. vmovdqa $xt3,0x60(%rsp) # SIMD equivalent of "@x[11]"
  1856. vbroadcasti128 (%r10),$xt3
  1857. vmovdqa $xd0,0x200-0x200(%rax) # save SIMD counters
  1858. mov \$10,%eax
  1859. jmp .Loop8x
  1860. .align 32
  1861. .Loop8x:
  1862. ___
  1863. foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
  1864. foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
  1865. $code.=<<___;
  1866. dec %eax
  1867. jnz .Loop8x
  1868. lea 0x200(%rsp),%rax # size optimization
  1869. vpaddd 0x80-0x100(%rcx),$xa0,$xa0 # accumulate key
  1870. vpaddd 0xa0-0x100(%rcx),$xa1,$xa1
  1871. vpaddd 0xc0-0x100(%rcx),$xa2,$xa2
  1872. vpaddd 0xe0-0x100(%rcx),$xa3,$xa3
  1873. vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
  1874. vpunpckldq $xa3,$xa2,$xt3
  1875. vpunpckhdq $xa1,$xa0,$xa0
  1876. vpunpckhdq $xa3,$xa2,$xa2
  1877. vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
  1878. vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
  1879. vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
  1880. vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
  1881. ___
  1882. ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
  1883. $code.=<<___;
  1884. vpaddd 0x100-0x100(%rcx),$xb0,$xb0
  1885. vpaddd 0x120-0x100(%rcx),$xb1,$xb1
  1886. vpaddd 0x140-0x100(%rcx),$xb2,$xb2
  1887. vpaddd 0x160-0x100(%rcx),$xb3,$xb3
  1888. vpunpckldq $xb1,$xb0,$xt2
  1889. vpunpckldq $xb3,$xb2,$xt3
  1890. vpunpckhdq $xb1,$xb0,$xb0
  1891. vpunpckhdq $xb3,$xb2,$xb2
  1892. vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
  1893. vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
  1894. vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
  1895. vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
  1896. ___
  1897. ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
  1898. $code.=<<___;
  1899. vperm2i128 \$0x20,$xb0,$xa0,$xt3 # "de-interlace" further
  1900. vperm2i128 \$0x31,$xb0,$xa0,$xb0
  1901. vperm2i128 \$0x20,$xb1,$xa1,$xa0
  1902. vperm2i128 \$0x31,$xb1,$xa1,$xb1
  1903. vperm2i128 \$0x20,$xb2,$xa2,$xa1
  1904. vperm2i128 \$0x31,$xb2,$xa2,$xb2
  1905. vperm2i128 \$0x20,$xb3,$xa3,$xa2
  1906. vperm2i128 \$0x31,$xb3,$xa3,$xb3
  1907. ___
  1908. ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
  1909. my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
  1910. $code.=<<___;
  1911. vmovdqa $xa0,0x00(%rsp) # offload $xaN
  1912. vmovdqa $xa1,0x20(%rsp)
  1913. vmovdqa 0x40(%rsp),$xc2 # $xa0
  1914. vmovdqa 0x60(%rsp),$xc3 # $xa1
  1915. vpaddd 0x180-0x200(%rax),$xc0,$xc0
  1916. vpaddd 0x1a0-0x200(%rax),$xc1,$xc1
  1917. vpaddd 0x1c0-0x200(%rax),$xc2,$xc2
  1918. vpaddd 0x1e0-0x200(%rax),$xc3,$xc3
  1919. vpunpckldq $xc1,$xc0,$xt2
  1920. vpunpckldq $xc3,$xc2,$xt3
  1921. vpunpckhdq $xc1,$xc0,$xc0
  1922. vpunpckhdq $xc3,$xc2,$xc2
  1923. vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
  1924. vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
  1925. vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
  1926. vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
  1927. ___
  1928. ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
  1929. $code.=<<___;
  1930. vpaddd 0x200-0x200(%rax),$xd0,$xd0
  1931. vpaddd 0x220-0x200(%rax),$xd1,$xd1
  1932. vpaddd 0x240-0x200(%rax),$xd2,$xd2
  1933. vpaddd 0x260-0x200(%rax),$xd3,$xd3
  1934. vpunpckldq $xd1,$xd0,$xt2
  1935. vpunpckldq $xd3,$xd2,$xt3
  1936. vpunpckhdq $xd1,$xd0,$xd0
  1937. vpunpckhdq $xd3,$xd2,$xd2
  1938. vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
  1939. vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
  1940. vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
  1941. vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
  1942. ___
  1943. ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
  1944. $code.=<<___;
  1945. vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further
  1946. vperm2i128 \$0x31,$xd0,$xc0,$xd0
  1947. vperm2i128 \$0x20,$xd1,$xc1,$xc0
  1948. vperm2i128 \$0x31,$xd1,$xc1,$xd1
  1949. vperm2i128 \$0x20,$xd2,$xc2,$xc1
  1950. vperm2i128 \$0x31,$xd2,$xc2,$xd2
  1951. vperm2i128 \$0x20,$xd3,$xc3,$xc2
  1952. vperm2i128 \$0x31,$xd3,$xc3,$xd3
  1953. ___
  1954. ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
  1955. ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
  1956. ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
  1957. ($xa0,$xa1)=($xt2,$xt3);
  1958. $code.=<<___;
  1959. vmovdqa 0x00(%rsp),$xa0 # $xaN was offloaded, remember?
  1960. vmovdqa 0x20(%rsp),$xa1
  1961. cmp \$64*8,$len
  1962. jb .Ltail8x
  1963. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1964. vpxor 0x20($inp),$xb0,$xb0
  1965. vpxor 0x40($inp),$xc0,$xc0
  1966. vpxor 0x60($inp),$xd0,$xd0
  1967. lea 0x80($inp),$inp # size optimization
  1968. vmovdqu $xa0,0x00($out)
  1969. vmovdqu $xb0,0x20($out)
  1970. vmovdqu $xc0,0x40($out)
  1971. vmovdqu $xd0,0x60($out)
  1972. lea 0x80($out),$out # size optimization
  1973. vpxor 0x00($inp),$xa1,$xa1
  1974. vpxor 0x20($inp),$xb1,$xb1
  1975. vpxor 0x40($inp),$xc1,$xc1
  1976. vpxor 0x60($inp),$xd1,$xd1
  1977. lea 0x80($inp),$inp # size optimization
  1978. vmovdqu $xa1,0x00($out)
  1979. vmovdqu $xb1,0x20($out)
  1980. vmovdqu $xc1,0x40($out)
  1981. vmovdqu $xd1,0x60($out)
  1982. lea 0x80($out),$out # size optimization
  1983. vpxor 0x00($inp),$xa2,$xa2
  1984. vpxor 0x20($inp),$xb2,$xb2
  1985. vpxor 0x40($inp),$xc2,$xc2
  1986. vpxor 0x60($inp),$xd2,$xd2
  1987. lea 0x80($inp),$inp # size optimization
  1988. vmovdqu $xa2,0x00($out)
  1989. vmovdqu $xb2,0x20($out)
  1990. vmovdqu $xc2,0x40($out)
  1991. vmovdqu $xd2,0x60($out)
  1992. lea 0x80($out),$out # size optimization
  1993. vpxor 0x00($inp),$xa3,$xa3
  1994. vpxor 0x20($inp),$xb3,$xb3
  1995. vpxor 0x40($inp),$xc3,$xc3
  1996. vpxor 0x60($inp),$xd3,$xd3
  1997. lea 0x80($inp),$inp # size optimization
  1998. vmovdqu $xa3,0x00($out)
  1999. vmovdqu $xb3,0x20($out)
  2000. vmovdqu $xc3,0x40($out)
  2001. vmovdqu $xd3,0x60($out)
  2002. lea 0x80($out),$out # size optimization
  2003. sub \$64*8,$len
  2004. jnz .Loop_outer8x
  2005. jmp .Ldone8x
  2006. .Ltail8x:
  2007. cmp \$448,$len
  2008. jae .L448_or_more8x
  2009. cmp \$384,$len
  2010. jae .L384_or_more8x
  2011. cmp \$320,$len
  2012. jae .L320_or_more8x
  2013. cmp \$256,$len
  2014. jae .L256_or_more8x
  2015. cmp \$192,$len
  2016. jae .L192_or_more8x
  2017. cmp \$128,$len
  2018. jae .L128_or_more8x
  2019. cmp \$64,$len
  2020. jae .L64_or_more8x
  2021. xor %r10,%r10
  2022. vmovdqa $xa0,0x00(%rsp)
  2023. vmovdqa $xb0,0x20(%rsp)
  2024. jmp .Loop_tail8x
  2025. .align 32
  2026. .L64_or_more8x:
  2027. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2028. vpxor 0x20($inp),$xb0,$xb0
  2029. vmovdqu $xa0,0x00($out)
  2030. vmovdqu $xb0,0x20($out)
  2031. je .Ldone8x
  2032. lea 0x40($inp),$inp # inp+=64*1
  2033. xor %r10,%r10
  2034. vmovdqa $xc0,0x00(%rsp)
  2035. lea 0x40($out),$out # out+=64*1
  2036. sub \$64,$len # len-=64*1
  2037. vmovdqa $xd0,0x20(%rsp)
  2038. jmp .Loop_tail8x
  2039. .align 32
  2040. .L128_or_more8x:
  2041. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2042. vpxor 0x20($inp),$xb0,$xb0
  2043. vpxor 0x40($inp),$xc0,$xc0
  2044. vpxor 0x60($inp),$xd0,$xd0
  2045. vmovdqu $xa0,0x00($out)
  2046. vmovdqu $xb0,0x20($out)
  2047. vmovdqu $xc0,0x40($out)
  2048. vmovdqu $xd0,0x60($out)
  2049. je .Ldone8x
  2050. lea 0x80($inp),$inp # inp+=64*2
  2051. xor %r10,%r10
  2052. vmovdqa $xa1,0x00(%rsp)
  2053. lea 0x80($out),$out # out+=64*2
  2054. sub \$128,$len # len-=64*2
  2055. vmovdqa $xb1,0x20(%rsp)
  2056. jmp .Loop_tail8x
  2057. .align 32
  2058. .L192_or_more8x:
  2059. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2060. vpxor 0x20($inp),$xb0,$xb0
  2061. vpxor 0x40($inp),$xc0,$xc0
  2062. vpxor 0x60($inp),$xd0,$xd0
  2063. vpxor 0x80($inp),$xa1,$xa1
  2064. vpxor 0xa0($inp),$xb1,$xb1
  2065. vmovdqu $xa0,0x00($out)
  2066. vmovdqu $xb0,0x20($out)
  2067. vmovdqu $xc0,0x40($out)
  2068. vmovdqu $xd0,0x60($out)
  2069. vmovdqu $xa1,0x80($out)
  2070. vmovdqu $xb1,0xa0($out)
  2071. je .Ldone8x
  2072. lea 0xc0($inp),$inp # inp+=64*3
  2073. xor %r10,%r10
  2074. vmovdqa $xc1,0x00(%rsp)
  2075. lea 0xc0($out),$out # out+=64*3
  2076. sub \$192,$len # len-=64*3
  2077. vmovdqa $xd1,0x20(%rsp)
  2078. jmp .Loop_tail8x
  2079. .align 32
  2080. .L256_or_more8x:
  2081. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2082. vpxor 0x20($inp),$xb0,$xb0
  2083. vpxor 0x40($inp),$xc0,$xc0
  2084. vpxor 0x60($inp),$xd0,$xd0
  2085. vpxor 0x80($inp),$xa1,$xa1
  2086. vpxor 0xa0($inp),$xb1,$xb1
  2087. vpxor 0xc0($inp),$xc1,$xc1
  2088. vpxor 0xe0($inp),$xd1,$xd1
  2089. vmovdqu $xa0,0x00($out)
  2090. vmovdqu $xb0,0x20($out)
  2091. vmovdqu $xc0,0x40($out)
  2092. vmovdqu $xd0,0x60($out)
  2093. vmovdqu $xa1,0x80($out)
  2094. vmovdqu $xb1,0xa0($out)
  2095. vmovdqu $xc1,0xc0($out)
  2096. vmovdqu $xd1,0xe0($out)
  2097. je .Ldone8x
  2098. lea 0x100($inp),$inp # inp+=64*4
  2099. xor %r10,%r10
  2100. vmovdqa $xa2,0x00(%rsp)
  2101. lea 0x100($out),$out # out+=64*4
  2102. sub \$256,$len # len-=64*4
  2103. vmovdqa $xb2,0x20(%rsp)
  2104. jmp .Loop_tail8x
  2105. .align 32
  2106. .L320_or_more8x:
  2107. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2108. vpxor 0x20($inp),$xb0,$xb0
  2109. vpxor 0x40($inp),$xc0,$xc0
  2110. vpxor 0x60($inp),$xd0,$xd0
  2111. vpxor 0x80($inp),$xa1,$xa1
  2112. vpxor 0xa0($inp),$xb1,$xb1
  2113. vpxor 0xc0($inp),$xc1,$xc1
  2114. vpxor 0xe0($inp),$xd1,$xd1
  2115. vpxor 0x100($inp),$xa2,$xa2
  2116. vpxor 0x120($inp),$xb2,$xb2
  2117. vmovdqu $xa0,0x00($out)
  2118. vmovdqu $xb0,0x20($out)
  2119. vmovdqu $xc0,0x40($out)
  2120. vmovdqu $xd0,0x60($out)
  2121. vmovdqu $xa1,0x80($out)
  2122. vmovdqu $xb1,0xa0($out)
  2123. vmovdqu $xc1,0xc0($out)
  2124. vmovdqu $xd1,0xe0($out)
  2125. vmovdqu $xa2,0x100($out)
  2126. vmovdqu $xb2,0x120($out)
  2127. je .Ldone8x
  2128. lea 0x140($inp),$inp # inp+=64*5
  2129. xor %r10,%r10
  2130. vmovdqa $xc2,0x00(%rsp)
  2131. lea 0x140($out),$out # out+=64*5
  2132. sub \$320,$len # len-=64*5
  2133. vmovdqa $xd2,0x20(%rsp)
  2134. jmp .Loop_tail8x
  2135. .align 32
  2136. .L384_or_more8x:
  2137. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2138. vpxor 0x20($inp),$xb0,$xb0
  2139. vpxor 0x40($inp),$xc0,$xc0
  2140. vpxor 0x60($inp),$xd0,$xd0
  2141. vpxor 0x80($inp),$xa1,$xa1
  2142. vpxor 0xa0($inp),$xb1,$xb1
  2143. vpxor 0xc0($inp),$xc1,$xc1
  2144. vpxor 0xe0($inp),$xd1,$xd1
  2145. vpxor 0x100($inp),$xa2,$xa2
  2146. vpxor 0x120($inp),$xb2,$xb2
  2147. vpxor 0x140($inp),$xc2,$xc2
  2148. vpxor 0x160($inp),$xd2,$xd2
  2149. vmovdqu $xa0,0x00($out)
  2150. vmovdqu $xb0,0x20($out)
  2151. vmovdqu $xc0,0x40($out)
  2152. vmovdqu $xd0,0x60($out)
  2153. vmovdqu $xa1,0x80($out)
  2154. vmovdqu $xb1,0xa0($out)
  2155. vmovdqu $xc1,0xc0($out)
  2156. vmovdqu $xd1,0xe0($out)
  2157. vmovdqu $xa2,0x100($out)
  2158. vmovdqu $xb2,0x120($out)
  2159. vmovdqu $xc2,0x140($out)
  2160. vmovdqu $xd2,0x160($out)
  2161. je .Ldone8x
  2162. lea 0x180($inp),$inp # inp+=64*6
  2163. xor %r10,%r10
  2164. vmovdqa $xa3,0x00(%rsp)
  2165. lea 0x180($out),$out # out+=64*6
  2166. sub \$384,$len # len-=64*6
  2167. vmovdqa $xb3,0x20(%rsp)
  2168. jmp .Loop_tail8x
  2169. .align 32
  2170. .L448_or_more8x:
  2171. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2172. vpxor 0x20($inp),$xb0,$xb0
  2173. vpxor 0x40($inp),$xc0,$xc0
  2174. vpxor 0x60($inp),$xd0,$xd0
  2175. vpxor 0x80($inp),$xa1,$xa1
  2176. vpxor 0xa0($inp),$xb1,$xb1
  2177. vpxor 0xc0($inp),$xc1,$xc1
  2178. vpxor 0xe0($inp),$xd1,$xd1
  2179. vpxor 0x100($inp),$xa2,$xa2
  2180. vpxor 0x120($inp),$xb2,$xb2
  2181. vpxor 0x140($inp),$xc2,$xc2
  2182. vpxor 0x160($inp),$xd2,$xd2
  2183. vpxor 0x180($inp),$xa3,$xa3
  2184. vpxor 0x1a0($inp),$xb3,$xb3
  2185. vmovdqu $xa0,0x00($out)
  2186. vmovdqu $xb0,0x20($out)
  2187. vmovdqu $xc0,0x40($out)
  2188. vmovdqu $xd0,0x60($out)
  2189. vmovdqu $xa1,0x80($out)
  2190. vmovdqu $xb1,0xa0($out)
  2191. vmovdqu $xc1,0xc0($out)
  2192. vmovdqu $xd1,0xe0($out)
  2193. vmovdqu $xa2,0x100($out)
  2194. vmovdqu $xb2,0x120($out)
  2195. vmovdqu $xc2,0x140($out)
  2196. vmovdqu $xd2,0x160($out)
  2197. vmovdqu $xa3,0x180($out)
  2198. vmovdqu $xb3,0x1a0($out)
  2199. je .Ldone8x
  2200. lea 0x1c0($inp),$inp # inp+=64*7
  2201. xor %r10,%r10
  2202. vmovdqa $xc3,0x00(%rsp)
  2203. lea 0x1c0($out),$out # out+=64*7
  2204. sub \$448,$len # len-=64*7
  2205. vmovdqa $xd3,0x20(%rsp)
  2206. .Loop_tail8x:
  2207. movzb ($inp,%r10),%eax
  2208. movzb (%rsp,%r10),%ecx
  2209. lea 1(%r10),%r10
  2210. xor %ecx,%eax
  2211. mov %al,-1($out,%r10)
  2212. dec $len
  2213. jnz .Loop_tail8x
  2214. .Ldone8x:
  2215. vzeroall
  2216. ___
  2217. $code.=<<___ if ($win64);
  2218. movaps -0xa8(%r9),%xmm6
  2219. movaps -0x98(%r9),%xmm7
  2220. movaps -0x88(%r9),%xmm8
  2221. movaps -0x78(%r9),%xmm9
  2222. movaps -0x68(%r9),%xmm10
  2223. movaps -0x58(%r9),%xmm11
  2224. movaps -0x48(%r9),%xmm12
  2225. movaps -0x38(%r9),%xmm13
  2226. movaps -0x28(%r9),%xmm14
  2227. movaps -0x18(%r9),%xmm15
  2228. ___
  2229. $code.=<<___;
  2230. lea (%r9),%rsp
  2231. .cfi_def_cfa_register %rsp
  2232. .L8x_epilogue:
  2233. ret
  2234. .cfi_endproc
  2235. .size ChaCha20_8x,.-ChaCha20_8x
  2236. ___
  2237. }
  2238. ########################################################################
  2239. # AVX512 code paths
  2240. if ($avx>2) {
  2241. # This one handles shorter inputs...
  2242. my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20));
  2243. my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
  2244. sub vpxord() # size optimization
  2245. { my $opcode = "vpxor"; # adhere to vpxor when possible
  2246. foreach (@_) {
  2247. if (/%([zy])mm([0-9]+)/ && ($1 eq "z" || $2>=16)) {
  2248. $opcode = "vpxord";
  2249. last;
  2250. }
  2251. }
  2252. $code .= "\t$opcode\t".join(',',reverse @_)."\n";
  2253. }
  2254. sub AVX512ROUND { # critical path is 14 "SIMD ticks" per round
  2255. &vpaddd ($a,$a,$b);
  2256. &vpxord ($d,$d,$a);
  2257. &vprold ($d,$d,16);
  2258. &vpaddd ($c,$c,$d);
  2259. &vpxord ($b,$b,$c);
  2260. &vprold ($b,$b,12);
  2261. &vpaddd ($a,$a,$b);
  2262. &vpxord ($d,$d,$a);
  2263. &vprold ($d,$d,8);
  2264. &vpaddd ($c,$c,$d);
  2265. &vpxord ($b,$b,$c);
  2266. &vprold ($b,$b,7);
  2267. }
  2268. my $xframe = $win64 ? 160+8 : 8;
  2269. $code.=<<___;
  2270. .type ChaCha20_avx512,\@function,5
  2271. .align 32
  2272. ChaCha20_avx512:
  2273. .cfi_startproc
  2274. .LChaCha20_avx512:
  2275. mov %rsp,%r9 # frame pointer
  2276. .cfi_def_cfa_register %r9
  2277. cmp \$512,$len
  2278. ja .LChaCha20_16x
  2279. sub \$64+$xframe,%rsp
  2280. ___
  2281. $code.=<<___ if ($win64);
  2282. movaps %xmm6,-0xa8(%r9)
  2283. movaps %xmm7,-0x98(%r9)
  2284. movaps %xmm8,-0x88(%r9)
  2285. movaps %xmm9,-0x78(%r9)
  2286. movaps %xmm10,-0x68(%r9)
  2287. movaps %xmm11,-0x58(%r9)
  2288. movaps %xmm12,-0x48(%r9)
  2289. movaps %xmm13,-0x38(%r9)
  2290. movaps %xmm14,-0x28(%r9)
  2291. movaps %xmm15,-0x18(%r9)
  2292. .Lavx512_body:
  2293. ___
  2294. $code.=<<___;
  2295. vbroadcasti32x4 .Lsigma(%rip),$a
  2296. vbroadcasti32x4 ($key),$b
  2297. vbroadcasti32x4 16($key),$c
  2298. vbroadcasti32x4 ($counter),$d
  2299. vmovdqa32 $a,$a_
  2300. vmovdqa32 $b,$b_
  2301. vmovdqa32 $c,$c_
  2302. vpaddd .Lzeroz(%rip),$d,$d
  2303. vmovdqa32 .Lfourz(%rip),$fourz
  2304. mov \$10,$counter # reuse $counter
  2305. vmovdqa32 $d,$d_
  2306. jmp .Loop_avx512
  2307. .align 16
  2308. .Loop_outer_avx512:
  2309. vmovdqa32 $a_,$a
  2310. vmovdqa32 $b_,$b
  2311. vmovdqa32 $c_,$c
  2312. vpaddd $fourz,$d_,$d
  2313. mov \$10,$counter
  2314. vmovdqa32 $d,$d_
  2315. jmp .Loop_avx512
  2316. .align 32
  2317. .Loop_avx512:
  2318. ___
  2319. &AVX512ROUND();
  2320. &vpshufd ($c,$c,0b01001110);
  2321. &vpshufd ($b,$b,0b00111001);
  2322. &vpshufd ($d,$d,0b10010011);
  2323. &AVX512ROUND();
  2324. &vpshufd ($c,$c,0b01001110);
  2325. &vpshufd ($b,$b,0b10010011);
  2326. &vpshufd ($d,$d,0b00111001);
  2327. &dec ($counter);
  2328. &jnz (".Loop_avx512");
  2329. $code.=<<___;
  2330. vpaddd $a_,$a,$a
  2331. vpaddd $b_,$b,$b
  2332. vpaddd $c_,$c,$c
  2333. vpaddd $d_,$d,$d
  2334. sub \$64,$len
  2335. jb .Ltail64_avx512
  2336. vpxor 0x00($inp),%x#$a,$t0 # xor with input
  2337. vpxor 0x10($inp),%x#$b,$t1
  2338. vpxor 0x20($inp),%x#$c,$t2
  2339. vpxor 0x30($inp),%x#$d,$t3
  2340. lea 0x40($inp),$inp # inp+=64
  2341. vmovdqu $t0,0x00($out) # write output
  2342. vmovdqu $t1,0x10($out)
  2343. vmovdqu $t2,0x20($out)
  2344. vmovdqu $t3,0x30($out)
  2345. lea 0x40($out),$out # out+=64
  2346. jz .Ldone_avx512
  2347. vextracti32x4 \$1,$a,$t0
  2348. vextracti32x4 \$1,$b,$t1
  2349. vextracti32x4 \$1,$c,$t2
  2350. vextracti32x4 \$1,$d,$t3
  2351. sub \$64,$len
  2352. jb .Ltail_avx512
  2353. vpxor 0x00($inp),$t0,$t0 # xor with input
  2354. vpxor 0x10($inp),$t1,$t1
  2355. vpxor 0x20($inp),$t2,$t2
  2356. vpxor 0x30($inp),$t3,$t3
  2357. lea 0x40($inp),$inp # inp+=64
  2358. vmovdqu $t0,0x00($out) # write output
  2359. vmovdqu $t1,0x10($out)
  2360. vmovdqu $t2,0x20($out)
  2361. vmovdqu $t3,0x30($out)
  2362. lea 0x40($out),$out # out+=64
  2363. jz .Ldone_avx512
  2364. vextracti32x4 \$2,$a,$t0
  2365. vextracti32x4 \$2,$b,$t1
  2366. vextracti32x4 \$2,$c,$t2
  2367. vextracti32x4 \$2,$d,$t3
  2368. sub \$64,$len
  2369. jb .Ltail_avx512
  2370. vpxor 0x00($inp),$t0,$t0 # xor with input
  2371. vpxor 0x10($inp),$t1,$t1
  2372. vpxor 0x20($inp),$t2,$t2
  2373. vpxor 0x30($inp),$t3,$t3
  2374. lea 0x40($inp),$inp # inp+=64
  2375. vmovdqu $t0,0x00($out) # write output
  2376. vmovdqu $t1,0x10($out)
  2377. vmovdqu $t2,0x20($out)
  2378. vmovdqu $t3,0x30($out)
  2379. lea 0x40($out),$out # out+=64
  2380. jz .Ldone_avx512
  2381. vextracti32x4 \$3,$a,$t0
  2382. vextracti32x4 \$3,$b,$t1
  2383. vextracti32x4 \$3,$c,$t2
  2384. vextracti32x4 \$3,$d,$t3
  2385. sub \$64,$len
  2386. jb .Ltail_avx512
  2387. vpxor 0x00($inp),$t0,$t0 # xor with input
  2388. vpxor 0x10($inp),$t1,$t1
  2389. vpxor 0x20($inp),$t2,$t2
  2390. vpxor 0x30($inp),$t3,$t3
  2391. lea 0x40($inp),$inp # inp+=64
  2392. vmovdqu $t0,0x00($out) # write output
  2393. vmovdqu $t1,0x10($out)
  2394. vmovdqu $t2,0x20($out)
  2395. vmovdqu $t3,0x30($out)
  2396. lea 0x40($out),$out # out+=64
  2397. jnz .Loop_outer_avx512
  2398. jmp .Ldone_avx512
  2399. .align 16
  2400. .Ltail64_avx512:
  2401. vmovdqa %x#$a,0x00(%rsp)
  2402. vmovdqa %x#$b,0x10(%rsp)
  2403. vmovdqa %x#$c,0x20(%rsp)
  2404. vmovdqa %x#$d,0x30(%rsp)
  2405. add \$64,$len
  2406. jmp .Loop_tail_avx512
  2407. .align 16
  2408. .Ltail_avx512:
  2409. vmovdqa $t0,0x00(%rsp)
  2410. vmovdqa $t1,0x10(%rsp)
  2411. vmovdqa $t2,0x20(%rsp)
  2412. vmovdqa $t3,0x30(%rsp)
  2413. add \$64,$len
  2414. .Loop_tail_avx512:
  2415. movzb ($inp,$counter),%eax
  2416. movzb (%rsp,$counter),%ecx
  2417. lea 1($counter),$counter
  2418. xor %ecx,%eax
  2419. mov %al,-1($out,$counter)
  2420. dec $len
  2421. jnz .Loop_tail_avx512
  2422. vmovdqu32 $a_,0x00(%rsp)
  2423. .Ldone_avx512:
  2424. vzeroall
  2425. ___
  2426. $code.=<<___ if ($win64);
  2427. movaps -0xa8(%r9),%xmm6
  2428. movaps -0x98(%r9),%xmm7
  2429. movaps -0x88(%r9),%xmm8
  2430. movaps -0x78(%r9),%xmm9
  2431. movaps -0x68(%r9),%xmm10
  2432. movaps -0x58(%r9),%xmm11
  2433. movaps -0x48(%r9),%xmm12
  2434. movaps -0x38(%r9),%xmm13
  2435. movaps -0x28(%r9),%xmm14
  2436. movaps -0x18(%r9),%xmm15
  2437. ___
  2438. $code.=<<___;
  2439. lea (%r9),%rsp
  2440. .cfi_def_cfa_register %rsp
  2441. .Lavx512_epilogue:
  2442. ret
  2443. .cfi_endproc
  2444. .size ChaCha20_avx512,.-ChaCha20_avx512
  2445. ___
  2446. map(s/%z/%y/, $a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz);
  2447. $code.=<<___;
  2448. .type ChaCha20_avx512vl,\@function,5
  2449. .align 32
  2450. ChaCha20_avx512vl:
  2451. .cfi_startproc
  2452. .LChaCha20_avx512vl:
  2453. mov %rsp,%r9 # frame pointer
  2454. .cfi_def_cfa_register %r9
  2455. cmp \$128,$len
  2456. ja .LChaCha20_8xvl
  2457. sub \$64+$xframe,%rsp
  2458. ___
  2459. $code.=<<___ if ($win64);
  2460. movaps %xmm6,-0xa8(%r9)
  2461. movaps %xmm7,-0x98(%r9)
  2462. movaps %xmm8,-0x88(%r9)
  2463. movaps %xmm9,-0x78(%r9)
  2464. movaps %xmm10,-0x68(%r9)
  2465. movaps %xmm11,-0x58(%r9)
  2466. movaps %xmm12,-0x48(%r9)
  2467. movaps %xmm13,-0x38(%r9)
  2468. movaps %xmm14,-0x28(%r9)
  2469. movaps %xmm15,-0x18(%r9)
  2470. .Lavx512vl_body:
  2471. ___
  2472. $code.=<<___;
  2473. vbroadcasti128 .Lsigma(%rip),$a
  2474. vbroadcasti128 ($key),$b
  2475. vbroadcasti128 16($key),$c
  2476. vbroadcasti128 ($counter),$d
  2477. vmovdqa32 $a,$a_
  2478. vmovdqa32 $b,$b_
  2479. vmovdqa32 $c,$c_
  2480. vpaddd .Lzeroz(%rip),$d,$d
  2481. vmovdqa32 .Ltwoy(%rip),$fourz
  2482. mov \$10,$counter # reuse $counter
  2483. vmovdqa32 $d,$d_
  2484. jmp .Loop_avx512vl
  2485. .align 16
  2486. .Loop_outer_avx512vl:
  2487. vmovdqa32 $c_,$c
  2488. vpaddd $fourz,$d_,$d
  2489. mov \$10,$counter
  2490. vmovdqa32 $d,$d_
  2491. jmp .Loop_avx512vl
  2492. .align 32
  2493. .Loop_avx512vl:
  2494. ___
  2495. &AVX512ROUND();
  2496. &vpshufd ($c,$c,0b01001110);
  2497. &vpshufd ($b,$b,0b00111001);
  2498. &vpshufd ($d,$d,0b10010011);
  2499. &AVX512ROUND();
  2500. &vpshufd ($c,$c,0b01001110);
  2501. &vpshufd ($b,$b,0b10010011);
  2502. &vpshufd ($d,$d,0b00111001);
  2503. &dec ($counter);
  2504. &jnz (".Loop_avx512vl");
  2505. $code.=<<___;
  2506. vpaddd $a_,$a,$a
  2507. vpaddd $b_,$b,$b
  2508. vpaddd $c_,$c,$c
  2509. vpaddd $d_,$d,$d
  2510. sub \$64,$len
  2511. jb .Ltail64_avx512vl
  2512. vpxor 0x00($inp),%x#$a,$t0 # xor with input
  2513. vpxor 0x10($inp),%x#$b,$t1
  2514. vpxor 0x20($inp),%x#$c,$t2
  2515. vpxor 0x30($inp),%x#$d,$t3
  2516. lea 0x40($inp),$inp # inp+=64
  2517. vmovdqu $t0,0x00($out) # write output
  2518. vmovdqu $t1,0x10($out)
  2519. vmovdqu $t2,0x20($out)
  2520. vmovdqu $t3,0x30($out)
  2521. lea 0x40($out),$out # out+=64
  2522. jz .Ldone_avx512vl
  2523. vextracti128 \$1,$a,$t0
  2524. vextracti128 \$1,$b,$t1
  2525. vextracti128 \$1,$c,$t2
  2526. vextracti128 \$1,$d,$t3
  2527. sub \$64,$len
  2528. jb .Ltail_avx512vl
  2529. vpxor 0x00($inp),$t0,$t0 # xor with input
  2530. vpxor 0x10($inp),$t1,$t1
  2531. vpxor 0x20($inp),$t2,$t2
  2532. vpxor 0x30($inp),$t3,$t3
  2533. lea 0x40($inp),$inp # inp+=64
  2534. vmovdqu $t0,0x00($out) # write output
  2535. vmovdqu $t1,0x10($out)
  2536. vmovdqu $t2,0x20($out)
  2537. vmovdqu $t3,0x30($out)
  2538. lea 0x40($out),$out # out+=64
  2539. vmovdqa32 $a_,$a
  2540. vmovdqa32 $b_,$b
  2541. jnz .Loop_outer_avx512vl
  2542. jmp .Ldone_avx512vl
  2543. .align 16
  2544. .Ltail64_avx512vl:
  2545. vmovdqa %x#$a,0x00(%rsp)
  2546. vmovdqa %x#$b,0x10(%rsp)
  2547. vmovdqa %x#$c,0x20(%rsp)
  2548. vmovdqa %x#$d,0x30(%rsp)
  2549. add \$64,$len
  2550. jmp .Loop_tail_avx512vl
  2551. .align 16
  2552. .Ltail_avx512vl:
  2553. vmovdqa $t0,0x00(%rsp)
  2554. vmovdqa $t1,0x10(%rsp)
  2555. vmovdqa $t2,0x20(%rsp)
  2556. vmovdqa $t3,0x30(%rsp)
  2557. add \$64,$len
  2558. .Loop_tail_avx512vl:
  2559. movzb ($inp,$counter),%eax
  2560. movzb (%rsp,$counter),%ecx
  2561. lea 1($counter),$counter
  2562. xor %ecx,%eax
  2563. mov %al,-1($out,$counter)
  2564. dec $len
  2565. jnz .Loop_tail_avx512vl
  2566. vmovdqu32 $a_,0x00(%rsp)
  2567. vmovdqu32 $a_,0x20(%rsp)
  2568. .Ldone_avx512vl:
  2569. vzeroall
  2570. ___
  2571. $code.=<<___ if ($win64);
  2572. movaps -0xa8(%r9),%xmm6
  2573. movaps -0x98(%r9),%xmm7
  2574. movaps -0x88(%r9),%xmm8
  2575. movaps -0x78(%r9),%xmm9
  2576. movaps -0x68(%r9),%xmm10
  2577. movaps -0x58(%r9),%xmm11
  2578. movaps -0x48(%r9),%xmm12
  2579. movaps -0x38(%r9),%xmm13
  2580. movaps -0x28(%r9),%xmm14
  2581. movaps -0x18(%r9),%xmm15
  2582. ___
  2583. $code.=<<___;
  2584. lea (%r9),%rsp
  2585. .cfi_def_cfa_register %rsp
  2586. .Lavx512vl_epilogue:
  2587. ret
  2588. .cfi_endproc
  2589. .size ChaCha20_avx512vl,.-ChaCha20_avx512vl
  2590. ___
  2591. }
  2592. if ($avx>2) {
  2593. # This one handles longer inputs...
  2594. my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  2595. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
  2596. my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  2597. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
  2598. my @key=map("%zmm$_",(16..31));
  2599. my ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
  2600. sub AVX512_lane_ROUND {
  2601. my ($a0,$b0,$c0,$d0)=@_;
  2602. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  2603. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  2604. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  2605. my @x=map("\"$_\"",@xx);
  2606. (
  2607. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
  2608. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
  2609. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
  2610. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
  2611. "&vpxord (@x[$d0],@x[$d0],@x[$a0])",
  2612. "&vpxord (@x[$d1],@x[$d1],@x[$a1])",
  2613. "&vpxord (@x[$d2],@x[$d2],@x[$a2])",
  2614. "&vpxord (@x[$d3],@x[$d3],@x[$a3])",
  2615. "&vprold (@x[$d0],@x[$d0],16)",
  2616. "&vprold (@x[$d1],@x[$d1],16)",
  2617. "&vprold (@x[$d2],@x[$d2],16)",
  2618. "&vprold (@x[$d3],@x[$d3],16)",
  2619. "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
  2620. "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
  2621. "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
  2622. "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
  2623. "&vpxord (@x[$b0],@x[$b0],@x[$c0])",
  2624. "&vpxord (@x[$b1],@x[$b1],@x[$c1])",
  2625. "&vpxord (@x[$b2],@x[$b2],@x[$c2])",
  2626. "&vpxord (@x[$b3],@x[$b3],@x[$c3])",
  2627. "&vprold (@x[$b0],@x[$b0],12)",
  2628. "&vprold (@x[$b1],@x[$b1],12)",
  2629. "&vprold (@x[$b2],@x[$b2],12)",
  2630. "&vprold (@x[$b3],@x[$b3],12)",
  2631. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
  2632. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
  2633. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
  2634. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
  2635. "&vpxord (@x[$d0],@x[$d0],@x[$a0])",
  2636. "&vpxord (@x[$d1],@x[$d1],@x[$a1])",
  2637. "&vpxord (@x[$d2],@x[$d2],@x[$a2])",
  2638. "&vpxord (@x[$d3],@x[$d3],@x[$a3])",
  2639. "&vprold (@x[$d0],@x[$d0],8)",
  2640. "&vprold (@x[$d1],@x[$d1],8)",
  2641. "&vprold (@x[$d2],@x[$d2],8)",
  2642. "&vprold (@x[$d3],@x[$d3],8)",
  2643. "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
  2644. "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
  2645. "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
  2646. "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
  2647. "&vpxord (@x[$b0],@x[$b0],@x[$c0])",
  2648. "&vpxord (@x[$b1],@x[$b1],@x[$c1])",
  2649. "&vpxord (@x[$b2],@x[$b2],@x[$c2])",
  2650. "&vpxord (@x[$b3],@x[$b3],@x[$c3])",
  2651. "&vprold (@x[$b0],@x[$b0],7)",
  2652. "&vprold (@x[$b1],@x[$b1],7)",
  2653. "&vprold (@x[$b2],@x[$b2],7)",
  2654. "&vprold (@x[$b3],@x[$b3],7)"
  2655. );
  2656. }
  2657. my $xframe = $win64 ? 0xa8 : 8;
  2658. $code.=<<___;
  2659. .type ChaCha20_16x,\@function,5
  2660. .align 32
  2661. ChaCha20_16x:
  2662. .cfi_startproc
  2663. .LChaCha20_16x:
  2664. mov %rsp,%r9 # frame register
  2665. .cfi_def_cfa_register %r9
  2666. sub \$64+$xframe,%rsp
  2667. and \$-64,%rsp
  2668. ___
  2669. $code.=<<___ if ($win64);
  2670. movaps %xmm6,-0xa8(%r9)
  2671. movaps %xmm7,-0x98(%r9)
  2672. movaps %xmm8,-0x88(%r9)
  2673. movaps %xmm9,-0x78(%r9)
  2674. movaps %xmm10,-0x68(%r9)
  2675. movaps %xmm11,-0x58(%r9)
  2676. movaps %xmm12,-0x48(%r9)
  2677. movaps %xmm13,-0x38(%r9)
  2678. movaps %xmm14,-0x28(%r9)
  2679. movaps %xmm15,-0x18(%r9)
  2680. .L16x_body:
  2681. ___
  2682. $code.=<<___;
  2683. vzeroupper
  2684. lea .Lsigma(%rip),%r10
  2685. vbroadcasti32x4 (%r10),$xa3 # key[0]
  2686. vbroadcasti32x4 ($key),$xb3 # key[1]
  2687. vbroadcasti32x4 16($key),$xc3 # key[2]
  2688. vbroadcasti32x4 ($counter),$xd3 # key[3]
  2689. vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  2690. vpshufd \$0x55,$xa3,$xa1
  2691. vpshufd \$0xaa,$xa3,$xa2
  2692. vpshufd \$0xff,$xa3,$xa3
  2693. vmovdqa64 $xa0,@key[0]
  2694. vmovdqa64 $xa1,@key[1]
  2695. vmovdqa64 $xa2,@key[2]
  2696. vmovdqa64 $xa3,@key[3]
  2697. vpshufd \$0x00,$xb3,$xb0
  2698. vpshufd \$0x55,$xb3,$xb1
  2699. vpshufd \$0xaa,$xb3,$xb2
  2700. vpshufd \$0xff,$xb3,$xb3
  2701. vmovdqa64 $xb0,@key[4]
  2702. vmovdqa64 $xb1,@key[5]
  2703. vmovdqa64 $xb2,@key[6]
  2704. vmovdqa64 $xb3,@key[7]
  2705. vpshufd \$0x00,$xc3,$xc0
  2706. vpshufd \$0x55,$xc3,$xc1
  2707. vpshufd \$0xaa,$xc3,$xc2
  2708. vpshufd \$0xff,$xc3,$xc3
  2709. vmovdqa64 $xc0,@key[8]
  2710. vmovdqa64 $xc1,@key[9]
  2711. vmovdqa64 $xc2,@key[10]
  2712. vmovdqa64 $xc3,@key[11]
  2713. vpshufd \$0x00,$xd3,$xd0
  2714. vpshufd \$0x55,$xd3,$xd1
  2715. vpshufd \$0xaa,$xd3,$xd2
  2716. vpshufd \$0xff,$xd3,$xd3
  2717. vpaddd .Lincz(%rip),$xd0,$xd0 # don't save counters yet
  2718. vmovdqa64 $xd0,@key[12]
  2719. vmovdqa64 $xd1,@key[13]
  2720. vmovdqa64 $xd2,@key[14]
  2721. vmovdqa64 $xd3,@key[15]
  2722. mov \$10,%eax
  2723. jmp .Loop16x
  2724. .align 32
  2725. .Loop_outer16x:
  2726. vpbroadcastd 0(%r10),$xa0 # reload key
  2727. vpbroadcastd 4(%r10),$xa1
  2728. vpbroadcastd 8(%r10),$xa2
  2729. vpbroadcastd 12(%r10),$xa3
  2730. vpaddd .Lsixteen(%rip),@key[12],@key[12] # next SIMD counters
  2731. vmovdqa64 @key[4],$xb0
  2732. vmovdqa64 @key[5],$xb1
  2733. vmovdqa64 @key[6],$xb2
  2734. vmovdqa64 @key[7],$xb3
  2735. vmovdqa64 @key[8],$xc0
  2736. vmovdqa64 @key[9],$xc1
  2737. vmovdqa64 @key[10],$xc2
  2738. vmovdqa64 @key[11],$xc3
  2739. vmovdqa64 @key[12],$xd0
  2740. vmovdqa64 @key[13],$xd1
  2741. vmovdqa64 @key[14],$xd2
  2742. vmovdqa64 @key[15],$xd3
  2743. vmovdqa64 $xa0,@key[0]
  2744. vmovdqa64 $xa1,@key[1]
  2745. vmovdqa64 $xa2,@key[2]
  2746. vmovdqa64 $xa3,@key[3]
  2747. mov \$10,%eax
  2748. jmp .Loop16x
  2749. .align 32
  2750. .Loop16x:
  2751. ___
  2752. foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
  2753. foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
  2754. $code.=<<___;
  2755. dec %eax
  2756. jnz .Loop16x
  2757. vpaddd @key[0],$xa0,$xa0 # accumulate key
  2758. vpaddd @key[1],$xa1,$xa1
  2759. vpaddd @key[2],$xa2,$xa2
  2760. vpaddd @key[3],$xa3,$xa3
  2761. vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
  2762. vpunpckldq $xa3,$xa2,$xt3
  2763. vpunpckhdq $xa1,$xa0,$xa0
  2764. vpunpckhdq $xa3,$xa2,$xa2
  2765. vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
  2766. vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
  2767. vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
  2768. vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
  2769. ___
  2770. ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
  2771. $code.=<<___;
  2772. vpaddd @key[4],$xb0,$xb0
  2773. vpaddd @key[5],$xb1,$xb1
  2774. vpaddd @key[6],$xb2,$xb2
  2775. vpaddd @key[7],$xb3,$xb3
  2776. vpunpckldq $xb1,$xb0,$xt2
  2777. vpunpckldq $xb3,$xb2,$xt3
  2778. vpunpckhdq $xb1,$xb0,$xb0
  2779. vpunpckhdq $xb3,$xb2,$xb2
  2780. vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
  2781. vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
  2782. vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
  2783. vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
  2784. ___
  2785. ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
  2786. $code.=<<___;
  2787. vshufi32x4 \$0x44,$xb0,$xa0,$xt3 # "de-interlace" further
  2788. vshufi32x4 \$0xee,$xb0,$xa0,$xb0
  2789. vshufi32x4 \$0x44,$xb1,$xa1,$xa0
  2790. vshufi32x4 \$0xee,$xb1,$xa1,$xb1
  2791. vshufi32x4 \$0x44,$xb2,$xa2,$xa1
  2792. vshufi32x4 \$0xee,$xb2,$xa2,$xb2
  2793. vshufi32x4 \$0x44,$xb3,$xa3,$xa2
  2794. vshufi32x4 \$0xee,$xb3,$xa3,$xb3
  2795. ___
  2796. ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
  2797. $code.=<<___;
  2798. vpaddd @key[8],$xc0,$xc0
  2799. vpaddd @key[9],$xc1,$xc1
  2800. vpaddd @key[10],$xc2,$xc2
  2801. vpaddd @key[11],$xc3,$xc3
  2802. vpunpckldq $xc1,$xc0,$xt2
  2803. vpunpckldq $xc3,$xc2,$xt3
  2804. vpunpckhdq $xc1,$xc0,$xc0
  2805. vpunpckhdq $xc3,$xc2,$xc2
  2806. vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
  2807. vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
  2808. vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
  2809. vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
  2810. ___
  2811. ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
  2812. $code.=<<___;
  2813. vpaddd @key[12],$xd0,$xd0
  2814. vpaddd @key[13],$xd1,$xd1
  2815. vpaddd @key[14],$xd2,$xd2
  2816. vpaddd @key[15],$xd3,$xd3
  2817. vpunpckldq $xd1,$xd0,$xt2
  2818. vpunpckldq $xd3,$xd2,$xt3
  2819. vpunpckhdq $xd1,$xd0,$xd0
  2820. vpunpckhdq $xd3,$xd2,$xd2
  2821. vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
  2822. vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
  2823. vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
  2824. vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
  2825. ___
  2826. ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
  2827. $code.=<<___;
  2828. vshufi32x4 \$0x44,$xd0,$xc0,$xt3 # "de-interlace" further
  2829. vshufi32x4 \$0xee,$xd0,$xc0,$xd0
  2830. vshufi32x4 \$0x44,$xd1,$xc1,$xc0
  2831. vshufi32x4 \$0xee,$xd1,$xc1,$xd1
  2832. vshufi32x4 \$0x44,$xd2,$xc2,$xc1
  2833. vshufi32x4 \$0xee,$xd2,$xc2,$xd2
  2834. vshufi32x4 \$0x44,$xd3,$xc3,$xc2
  2835. vshufi32x4 \$0xee,$xd3,$xc3,$xd3
  2836. ___
  2837. ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
  2838. $code.=<<___;
  2839. vshufi32x4 \$0x88,$xc0,$xa0,$xt0 # "de-interlace" further
  2840. vshufi32x4 \$0xdd,$xc0,$xa0,$xa0
  2841. vshufi32x4 \$0x88,$xd0,$xb0,$xc0
  2842. vshufi32x4 \$0xdd,$xd0,$xb0,$xd0
  2843. vshufi32x4 \$0x88,$xc1,$xa1,$xt1
  2844. vshufi32x4 \$0xdd,$xc1,$xa1,$xa1
  2845. vshufi32x4 \$0x88,$xd1,$xb1,$xc1
  2846. vshufi32x4 \$0xdd,$xd1,$xb1,$xd1
  2847. vshufi32x4 \$0x88,$xc2,$xa2,$xt2
  2848. vshufi32x4 \$0xdd,$xc2,$xa2,$xa2
  2849. vshufi32x4 \$0x88,$xd2,$xb2,$xc2
  2850. vshufi32x4 \$0xdd,$xd2,$xb2,$xd2
  2851. vshufi32x4 \$0x88,$xc3,$xa3,$xt3
  2852. vshufi32x4 \$0xdd,$xc3,$xa3,$xa3
  2853. vshufi32x4 \$0x88,$xd3,$xb3,$xc3
  2854. vshufi32x4 \$0xdd,$xd3,$xb3,$xd3
  2855. ___
  2856. ($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)=
  2857. ($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3);
  2858. ($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1,
  2859. $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) =
  2860. ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  2861. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
  2862. $code.=<<___;
  2863. cmp \$64*16,$len
  2864. jb .Ltail16x
  2865. vpxord 0x00($inp),$xa0,$xa0 # xor with input
  2866. vpxord 0x40($inp),$xb0,$xb0
  2867. vpxord 0x80($inp),$xc0,$xc0
  2868. vpxord 0xc0($inp),$xd0,$xd0
  2869. vmovdqu32 $xa0,0x00($out)
  2870. vmovdqu32 $xb0,0x40($out)
  2871. vmovdqu32 $xc0,0x80($out)
  2872. vmovdqu32 $xd0,0xc0($out)
  2873. vpxord 0x100($inp),$xa1,$xa1
  2874. vpxord 0x140($inp),$xb1,$xb1
  2875. vpxord 0x180($inp),$xc1,$xc1
  2876. vpxord 0x1c0($inp),$xd1,$xd1
  2877. vmovdqu32 $xa1,0x100($out)
  2878. vmovdqu32 $xb1,0x140($out)
  2879. vmovdqu32 $xc1,0x180($out)
  2880. vmovdqu32 $xd1,0x1c0($out)
  2881. vpxord 0x200($inp),$xa2,$xa2
  2882. vpxord 0x240($inp),$xb2,$xb2
  2883. vpxord 0x280($inp),$xc2,$xc2
  2884. vpxord 0x2c0($inp),$xd2,$xd2
  2885. vmovdqu32 $xa2,0x200($out)
  2886. vmovdqu32 $xb2,0x240($out)
  2887. vmovdqu32 $xc2,0x280($out)
  2888. vmovdqu32 $xd2,0x2c0($out)
  2889. vpxord 0x300($inp),$xa3,$xa3
  2890. vpxord 0x340($inp),$xb3,$xb3
  2891. vpxord 0x380($inp),$xc3,$xc3
  2892. vpxord 0x3c0($inp),$xd3,$xd3
  2893. lea 0x400($inp),$inp
  2894. vmovdqu32 $xa3,0x300($out)
  2895. vmovdqu32 $xb3,0x340($out)
  2896. vmovdqu32 $xc3,0x380($out)
  2897. vmovdqu32 $xd3,0x3c0($out)
  2898. lea 0x400($out),$out
  2899. sub \$64*16,$len
  2900. jnz .Loop_outer16x
  2901. jmp .Ldone16x
  2902. .align 32
  2903. .Ltail16x:
  2904. xor %r10,%r10
  2905. sub $inp,$out
  2906. cmp \$64*1,$len
  2907. jb .Less_than_64_16x
  2908. vpxord ($inp),$xa0,$xa0 # xor with input
  2909. vmovdqu32 $xa0,($out,$inp)
  2910. je .Ldone16x
  2911. vmovdqa32 $xb0,$xa0
  2912. lea 64($inp),$inp
  2913. cmp \$64*2,$len
  2914. jb .Less_than_64_16x
  2915. vpxord ($inp),$xb0,$xb0
  2916. vmovdqu32 $xb0,($out,$inp)
  2917. je .Ldone16x
  2918. vmovdqa32 $xc0,$xa0
  2919. lea 64($inp),$inp
  2920. cmp \$64*3,$len
  2921. jb .Less_than_64_16x
  2922. vpxord ($inp),$xc0,$xc0
  2923. vmovdqu32 $xc0,($out,$inp)
  2924. je .Ldone16x
  2925. vmovdqa32 $xd0,$xa0
  2926. lea 64($inp),$inp
  2927. cmp \$64*4,$len
  2928. jb .Less_than_64_16x
  2929. vpxord ($inp),$xd0,$xd0
  2930. vmovdqu32 $xd0,($out,$inp)
  2931. je .Ldone16x
  2932. vmovdqa32 $xa1,$xa0
  2933. lea 64($inp),$inp
  2934. cmp \$64*5,$len
  2935. jb .Less_than_64_16x
  2936. vpxord ($inp),$xa1,$xa1
  2937. vmovdqu32 $xa1,($out,$inp)
  2938. je .Ldone16x
  2939. vmovdqa32 $xb1,$xa0
  2940. lea 64($inp),$inp
  2941. cmp \$64*6,$len
  2942. jb .Less_than_64_16x
  2943. vpxord ($inp),$xb1,$xb1
  2944. vmovdqu32 $xb1,($out,$inp)
  2945. je .Ldone16x
  2946. vmovdqa32 $xc1,$xa0
  2947. lea 64($inp),$inp
  2948. cmp \$64*7,$len
  2949. jb .Less_than_64_16x
  2950. vpxord ($inp),$xc1,$xc1
  2951. vmovdqu32 $xc1,($out,$inp)
  2952. je .Ldone16x
  2953. vmovdqa32 $xd1,$xa0
  2954. lea 64($inp),$inp
  2955. cmp \$64*8,$len
  2956. jb .Less_than_64_16x
  2957. vpxord ($inp),$xd1,$xd1
  2958. vmovdqu32 $xd1,($out,$inp)
  2959. je .Ldone16x
  2960. vmovdqa32 $xa2,$xa0
  2961. lea 64($inp),$inp
  2962. cmp \$64*9,$len
  2963. jb .Less_than_64_16x
  2964. vpxord ($inp),$xa2,$xa2
  2965. vmovdqu32 $xa2,($out,$inp)
  2966. je .Ldone16x
  2967. vmovdqa32 $xb2,$xa0
  2968. lea 64($inp),$inp
  2969. cmp \$64*10,$len
  2970. jb .Less_than_64_16x
  2971. vpxord ($inp),$xb2,$xb2
  2972. vmovdqu32 $xb2,($out,$inp)
  2973. je .Ldone16x
  2974. vmovdqa32 $xc2,$xa0
  2975. lea 64($inp),$inp
  2976. cmp \$64*11,$len
  2977. jb .Less_than_64_16x
  2978. vpxord ($inp),$xc2,$xc2
  2979. vmovdqu32 $xc2,($out,$inp)
  2980. je .Ldone16x
  2981. vmovdqa32 $xd2,$xa0
  2982. lea 64($inp),$inp
  2983. cmp \$64*12,$len
  2984. jb .Less_than_64_16x
  2985. vpxord ($inp),$xd2,$xd2
  2986. vmovdqu32 $xd2,($out,$inp)
  2987. je .Ldone16x
  2988. vmovdqa32 $xa3,$xa0
  2989. lea 64($inp),$inp
  2990. cmp \$64*13,$len
  2991. jb .Less_than_64_16x
  2992. vpxord ($inp),$xa3,$xa3
  2993. vmovdqu32 $xa3,($out,$inp)
  2994. je .Ldone16x
  2995. vmovdqa32 $xb3,$xa0
  2996. lea 64($inp),$inp
  2997. cmp \$64*14,$len
  2998. jb .Less_than_64_16x
  2999. vpxord ($inp),$xb3,$xb3
  3000. vmovdqu32 $xb3,($out,$inp)
  3001. je .Ldone16x
  3002. vmovdqa32 $xc3,$xa0
  3003. lea 64($inp),$inp
  3004. cmp \$64*15,$len
  3005. jb .Less_than_64_16x
  3006. vpxord ($inp),$xc3,$xc3
  3007. vmovdqu32 $xc3,($out,$inp)
  3008. je .Ldone16x
  3009. vmovdqa32 $xd3,$xa0
  3010. lea 64($inp),$inp
  3011. .Less_than_64_16x:
  3012. vmovdqa32 $xa0,0x00(%rsp)
  3013. lea ($out,$inp),$out
  3014. and \$63,$len
  3015. .Loop_tail16x:
  3016. movzb ($inp,%r10),%eax
  3017. movzb (%rsp,%r10),%ecx
  3018. lea 1(%r10),%r10
  3019. xor %ecx,%eax
  3020. mov %al,-1($out,%r10)
  3021. dec $len
  3022. jnz .Loop_tail16x
  3023. vpxord $xa0,$xa0,$xa0
  3024. vmovdqa32 $xa0,0(%rsp)
  3025. .Ldone16x:
  3026. vzeroall
  3027. ___
  3028. $code.=<<___ if ($win64);
  3029. movaps -0xa8(%r9),%xmm6
  3030. movaps -0x98(%r9),%xmm7
  3031. movaps -0x88(%r9),%xmm8
  3032. movaps -0x78(%r9),%xmm9
  3033. movaps -0x68(%r9),%xmm10
  3034. movaps -0x58(%r9),%xmm11
  3035. movaps -0x48(%r9),%xmm12
  3036. movaps -0x38(%r9),%xmm13
  3037. movaps -0x28(%r9),%xmm14
  3038. movaps -0x18(%r9),%xmm15
  3039. ___
  3040. $code.=<<___;
  3041. lea (%r9),%rsp
  3042. .cfi_def_cfa_register %rsp
  3043. .L16x_epilogue:
  3044. ret
  3045. .cfi_endproc
  3046. .size ChaCha20_16x,.-ChaCha20_16x
  3047. ___
  3048. # switch to %ymm domain
  3049. ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  3050. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%ymm$_",(0..15));
  3051. @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  3052. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
  3053. @key=map("%ymm$_",(16..31));
  3054. ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
  3055. $code.=<<___;
  3056. .type ChaCha20_8xvl,\@function,5
  3057. .align 32
  3058. ChaCha20_8xvl:
  3059. .cfi_startproc
  3060. .LChaCha20_8xvl:
  3061. mov %rsp,%r9 # frame register
  3062. .cfi_def_cfa_register %r9
  3063. sub \$64+$xframe,%rsp
  3064. and \$-64,%rsp
  3065. ___
  3066. $code.=<<___ if ($win64);
  3067. movaps %xmm6,-0xa8(%r9)
  3068. movaps %xmm7,-0x98(%r9)
  3069. movaps %xmm8,-0x88(%r9)
  3070. movaps %xmm9,-0x78(%r9)
  3071. movaps %xmm10,-0x68(%r9)
  3072. movaps %xmm11,-0x58(%r9)
  3073. movaps %xmm12,-0x48(%r9)
  3074. movaps %xmm13,-0x38(%r9)
  3075. movaps %xmm14,-0x28(%r9)
  3076. movaps %xmm15,-0x18(%r9)
  3077. .L8xvl_body:
  3078. ___
  3079. $code.=<<___;
  3080. vzeroupper
  3081. lea .Lsigma(%rip),%r10
  3082. vbroadcasti128 (%r10),$xa3 # key[0]
  3083. vbroadcasti128 ($key),$xb3 # key[1]
  3084. vbroadcasti128 16($key),$xc3 # key[2]
  3085. vbroadcasti128 ($counter),$xd3 # key[3]
  3086. vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  3087. vpshufd \$0x55,$xa3,$xa1
  3088. vpshufd \$0xaa,$xa3,$xa2
  3089. vpshufd \$0xff,$xa3,$xa3
  3090. vmovdqa64 $xa0,@key[0]
  3091. vmovdqa64 $xa1,@key[1]
  3092. vmovdqa64 $xa2,@key[2]
  3093. vmovdqa64 $xa3,@key[3]
  3094. vpshufd \$0x00,$xb3,$xb0
  3095. vpshufd \$0x55,$xb3,$xb1
  3096. vpshufd \$0xaa,$xb3,$xb2
  3097. vpshufd \$0xff,$xb3,$xb3
  3098. vmovdqa64 $xb0,@key[4]
  3099. vmovdqa64 $xb1,@key[5]
  3100. vmovdqa64 $xb2,@key[6]
  3101. vmovdqa64 $xb3,@key[7]
  3102. vpshufd \$0x00,$xc3,$xc0
  3103. vpshufd \$0x55,$xc3,$xc1
  3104. vpshufd \$0xaa,$xc3,$xc2
  3105. vpshufd \$0xff,$xc3,$xc3
  3106. vmovdqa64 $xc0,@key[8]
  3107. vmovdqa64 $xc1,@key[9]
  3108. vmovdqa64 $xc2,@key[10]
  3109. vmovdqa64 $xc3,@key[11]
  3110. vpshufd \$0x00,$xd3,$xd0
  3111. vpshufd \$0x55,$xd3,$xd1
  3112. vpshufd \$0xaa,$xd3,$xd2
  3113. vpshufd \$0xff,$xd3,$xd3
  3114. vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet
  3115. vmovdqa64 $xd0,@key[12]
  3116. vmovdqa64 $xd1,@key[13]
  3117. vmovdqa64 $xd2,@key[14]
  3118. vmovdqa64 $xd3,@key[15]
  3119. mov \$10,%eax
  3120. jmp .Loop8xvl
  3121. .align 32
  3122. .Loop_outer8xvl:
  3123. #vpbroadcastd 0(%r10),$xa0 # reload key
  3124. #vpbroadcastd 4(%r10),$xa1
  3125. vpbroadcastd 8(%r10),$xa2
  3126. vpbroadcastd 12(%r10),$xa3
  3127. vpaddd .Leight(%rip),@key[12],@key[12] # next SIMD counters
  3128. vmovdqa64 @key[4],$xb0
  3129. vmovdqa64 @key[5],$xb1
  3130. vmovdqa64 @key[6],$xb2
  3131. vmovdqa64 @key[7],$xb3
  3132. vmovdqa64 @key[8],$xc0
  3133. vmovdqa64 @key[9],$xc1
  3134. vmovdqa64 @key[10],$xc2
  3135. vmovdqa64 @key[11],$xc3
  3136. vmovdqa64 @key[12],$xd0
  3137. vmovdqa64 @key[13],$xd1
  3138. vmovdqa64 @key[14],$xd2
  3139. vmovdqa64 @key[15],$xd3
  3140. vmovdqa64 $xa0,@key[0]
  3141. vmovdqa64 $xa1,@key[1]
  3142. vmovdqa64 $xa2,@key[2]
  3143. vmovdqa64 $xa3,@key[3]
  3144. mov \$10,%eax
  3145. jmp .Loop8xvl
  3146. .align 32
  3147. .Loop8xvl:
  3148. ___
  3149. foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
  3150. foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
  3151. $code.=<<___;
  3152. dec %eax
  3153. jnz .Loop8xvl
  3154. vpaddd @key[0],$xa0,$xa0 # accumulate key
  3155. vpaddd @key[1],$xa1,$xa1
  3156. vpaddd @key[2],$xa2,$xa2
  3157. vpaddd @key[3],$xa3,$xa3
  3158. vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
  3159. vpunpckldq $xa3,$xa2,$xt3
  3160. vpunpckhdq $xa1,$xa0,$xa0
  3161. vpunpckhdq $xa3,$xa2,$xa2
  3162. vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
  3163. vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
  3164. vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
  3165. vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
  3166. ___
  3167. ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
  3168. $code.=<<___;
  3169. vpaddd @key[4],$xb0,$xb0
  3170. vpaddd @key[5],$xb1,$xb1
  3171. vpaddd @key[6],$xb2,$xb2
  3172. vpaddd @key[7],$xb3,$xb3
  3173. vpunpckldq $xb1,$xb0,$xt2
  3174. vpunpckldq $xb3,$xb2,$xt3
  3175. vpunpckhdq $xb1,$xb0,$xb0
  3176. vpunpckhdq $xb3,$xb2,$xb2
  3177. vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
  3178. vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
  3179. vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
  3180. vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
  3181. ___
  3182. ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
  3183. $code.=<<___;
  3184. vshufi32x4 \$0,$xb0,$xa0,$xt3 # "de-interlace" further
  3185. vshufi32x4 \$3,$xb0,$xa0,$xb0
  3186. vshufi32x4 \$0,$xb1,$xa1,$xa0
  3187. vshufi32x4 \$3,$xb1,$xa1,$xb1
  3188. vshufi32x4 \$0,$xb2,$xa2,$xa1
  3189. vshufi32x4 \$3,$xb2,$xa2,$xb2
  3190. vshufi32x4 \$0,$xb3,$xa3,$xa2
  3191. vshufi32x4 \$3,$xb3,$xa3,$xb3
  3192. ___
  3193. ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
  3194. $code.=<<___;
  3195. vpaddd @key[8],$xc0,$xc0
  3196. vpaddd @key[9],$xc1,$xc1
  3197. vpaddd @key[10],$xc2,$xc2
  3198. vpaddd @key[11],$xc3,$xc3
  3199. vpunpckldq $xc1,$xc0,$xt2
  3200. vpunpckldq $xc3,$xc2,$xt3
  3201. vpunpckhdq $xc1,$xc0,$xc0
  3202. vpunpckhdq $xc3,$xc2,$xc2
  3203. vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
  3204. vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
  3205. vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
  3206. vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
  3207. ___
  3208. ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
  3209. $code.=<<___;
  3210. vpaddd @key[12],$xd0,$xd0
  3211. vpaddd @key[13],$xd1,$xd1
  3212. vpaddd @key[14],$xd2,$xd2
  3213. vpaddd @key[15],$xd3,$xd3
  3214. vpunpckldq $xd1,$xd0,$xt2
  3215. vpunpckldq $xd3,$xd2,$xt3
  3216. vpunpckhdq $xd1,$xd0,$xd0
  3217. vpunpckhdq $xd3,$xd2,$xd2
  3218. vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
  3219. vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
  3220. vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
  3221. vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
  3222. ___
  3223. ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
  3224. $code.=<<___;
  3225. vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further
  3226. vperm2i128 \$0x31,$xd0,$xc0,$xd0
  3227. vperm2i128 \$0x20,$xd1,$xc1,$xc0
  3228. vperm2i128 \$0x31,$xd1,$xc1,$xd1
  3229. vperm2i128 \$0x20,$xd2,$xc2,$xc1
  3230. vperm2i128 \$0x31,$xd2,$xc2,$xd2
  3231. vperm2i128 \$0x20,$xd3,$xc3,$xc2
  3232. vperm2i128 \$0x31,$xd3,$xc3,$xd3
  3233. ___
  3234. ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
  3235. ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
  3236. ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
  3237. $code.=<<___;
  3238. cmp \$64*8,$len
  3239. jb .Ltail8xvl
  3240. mov \$0x80,%eax # size optimization
  3241. vpxord 0x00($inp),$xa0,$xa0 # xor with input
  3242. vpxor 0x20($inp),$xb0,$xb0
  3243. vpxor 0x40($inp),$xc0,$xc0
  3244. vpxor 0x60($inp),$xd0,$xd0
  3245. lea ($inp,%rax),$inp # size optimization
  3246. vmovdqu32 $xa0,0x00($out)
  3247. vmovdqu $xb0,0x20($out)
  3248. vmovdqu $xc0,0x40($out)
  3249. vmovdqu $xd0,0x60($out)
  3250. lea ($out,%rax),$out # size optimization
  3251. vpxor 0x00($inp),$xa1,$xa1
  3252. vpxor 0x20($inp),$xb1,$xb1
  3253. vpxor 0x40($inp),$xc1,$xc1
  3254. vpxor 0x60($inp),$xd1,$xd1
  3255. lea ($inp,%rax),$inp # size optimization
  3256. vmovdqu $xa1,0x00($out)
  3257. vmovdqu $xb1,0x20($out)
  3258. vmovdqu $xc1,0x40($out)
  3259. vmovdqu $xd1,0x60($out)
  3260. lea ($out,%rax),$out # size optimization
  3261. vpxord 0x00($inp),$xa2,$xa2
  3262. vpxor 0x20($inp),$xb2,$xb2
  3263. vpxor 0x40($inp),$xc2,$xc2
  3264. vpxor 0x60($inp),$xd2,$xd2
  3265. lea ($inp,%rax),$inp # size optimization
  3266. vmovdqu32 $xa2,0x00($out)
  3267. vmovdqu $xb2,0x20($out)
  3268. vmovdqu $xc2,0x40($out)
  3269. vmovdqu $xd2,0x60($out)
  3270. lea ($out,%rax),$out # size optimization
  3271. vpxor 0x00($inp),$xa3,$xa3
  3272. vpxor 0x20($inp),$xb3,$xb3
  3273. vpxor 0x40($inp),$xc3,$xc3
  3274. vpxor 0x60($inp),$xd3,$xd3
  3275. lea ($inp,%rax),$inp # size optimization
  3276. vmovdqu $xa3,0x00($out)
  3277. vmovdqu $xb3,0x20($out)
  3278. vmovdqu $xc3,0x40($out)
  3279. vmovdqu $xd3,0x60($out)
  3280. lea ($out,%rax),$out # size optimization
  3281. vpbroadcastd 0(%r10),%ymm0 # reload key
  3282. vpbroadcastd 4(%r10),%ymm1
  3283. sub \$64*8,$len
  3284. jnz .Loop_outer8xvl
  3285. jmp .Ldone8xvl
  3286. .align 32
  3287. .Ltail8xvl:
  3288. vmovdqa64 $xa0,%ymm8 # size optimization
  3289. ___
  3290. $xa0 = "%ymm8";
  3291. $code.=<<___;
  3292. xor %r10,%r10
  3293. sub $inp,$out
  3294. cmp \$64*1,$len
  3295. jb .Less_than_64_8xvl
  3296. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  3297. vpxor 0x20($inp),$xb0,$xb0
  3298. vmovdqu $xa0,0x00($out,$inp)
  3299. vmovdqu $xb0,0x20($out,$inp)
  3300. je .Ldone8xvl
  3301. vmovdqa $xc0,$xa0
  3302. vmovdqa $xd0,$xb0
  3303. lea 64($inp),$inp
  3304. cmp \$64*2,$len
  3305. jb .Less_than_64_8xvl
  3306. vpxor 0x00($inp),$xc0,$xc0
  3307. vpxor 0x20($inp),$xd0,$xd0
  3308. vmovdqu $xc0,0x00($out,$inp)
  3309. vmovdqu $xd0,0x20($out,$inp)
  3310. je .Ldone8xvl
  3311. vmovdqa $xa1,$xa0
  3312. vmovdqa $xb1,$xb0
  3313. lea 64($inp),$inp
  3314. cmp \$64*3,$len
  3315. jb .Less_than_64_8xvl
  3316. vpxor 0x00($inp),$xa1,$xa1
  3317. vpxor 0x20($inp),$xb1,$xb1
  3318. vmovdqu $xa1,0x00($out,$inp)
  3319. vmovdqu $xb1,0x20($out,$inp)
  3320. je .Ldone8xvl
  3321. vmovdqa $xc1,$xa0
  3322. vmovdqa $xd1,$xb0
  3323. lea 64($inp),$inp
  3324. cmp \$64*4,$len
  3325. jb .Less_than_64_8xvl
  3326. vpxor 0x00($inp),$xc1,$xc1
  3327. vpxor 0x20($inp),$xd1,$xd1
  3328. vmovdqu $xc1,0x00($out,$inp)
  3329. vmovdqu $xd1,0x20($out,$inp)
  3330. je .Ldone8xvl
  3331. vmovdqa32 $xa2,$xa0
  3332. vmovdqa $xb2,$xb0
  3333. lea 64($inp),$inp
  3334. cmp \$64*5,$len
  3335. jb .Less_than_64_8xvl
  3336. vpxord 0x00($inp),$xa2,$xa2
  3337. vpxor 0x20($inp),$xb2,$xb2
  3338. vmovdqu32 $xa2,0x00($out,$inp)
  3339. vmovdqu $xb2,0x20($out,$inp)
  3340. je .Ldone8xvl
  3341. vmovdqa $xc2,$xa0
  3342. vmovdqa $xd2,$xb0
  3343. lea 64($inp),$inp
  3344. cmp \$64*6,$len
  3345. jb .Less_than_64_8xvl
  3346. vpxor 0x00($inp),$xc2,$xc2
  3347. vpxor 0x20($inp),$xd2,$xd2
  3348. vmovdqu $xc2,0x00($out,$inp)
  3349. vmovdqu $xd2,0x20($out,$inp)
  3350. je .Ldone8xvl
  3351. vmovdqa $xa3,$xa0
  3352. vmovdqa $xb3,$xb0
  3353. lea 64($inp),$inp
  3354. cmp \$64*7,$len
  3355. jb .Less_than_64_8xvl
  3356. vpxor 0x00($inp),$xa3,$xa3
  3357. vpxor 0x20($inp),$xb3,$xb3
  3358. vmovdqu $xa3,0x00($out,$inp)
  3359. vmovdqu $xb3,0x20($out,$inp)
  3360. je .Ldone8xvl
  3361. vmovdqa $xc3,$xa0
  3362. vmovdqa $xd3,$xb0
  3363. lea 64($inp),$inp
  3364. .Less_than_64_8xvl:
  3365. vmovdqa $xa0,0x00(%rsp)
  3366. vmovdqa $xb0,0x20(%rsp)
  3367. lea ($out,$inp),$out
  3368. and \$63,$len
  3369. .Loop_tail8xvl:
  3370. movzb ($inp,%r10),%eax
  3371. movzb (%rsp,%r10),%ecx
  3372. lea 1(%r10),%r10
  3373. xor %ecx,%eax
  3374. mov %al,-1($out,%r10)
  3375. dec $len
  3376. jnz .Loop_tail8xvl
  3377. vpxor $xa0,$xa0,$xa0
  3378. vmovdqa $xa0,0x00(%rsp)
  3379. vmovdqa $xa0,0x20(%rsp)
  3380. .Ldone8xvl:
  3381. vzeroall
  3382. ___
  3383. $code.=<<___ if ($win64);
  3384. movaps -0xa8(%r9),%xmm6
  3385. movaps -0x98(%r9),%xmm7
  3386. movaps -0x88(%r9),%xmm8
  3387. movaps -0x78(%r9),%xmm9
  3388. movaps -0x68(%r9),%xmm10
  3389. movaps -0x58(%r9),%xmm11
  3390. movaps -0x48(%r9),%xmm12
  3391. movaps -0x38(%r9),%xmm13
  3392. movaps -0x28(%r9),%xmm14
  3393. movaps -0x18(%r9),%xmm15
  3394. ___
  3395. $code.=<<___;
  3396. lea (%r9),%rsp
  3397. .cfi_def_cfa_register %rsp
  3398. .L8xvl_epilogue:
  3399. ret
  3400. .cfi_endproc
  3401. .size ChaCha20_8xvl,.-ChaCha20_8xvl
  3402. ___
  3403. }
  3404. # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
  3405. # CONTEXT *context,DISPATCHER_CONTEXT *disp)
  3406. if ($win64) {
  3407. $rec="%rcx";
  3408. $frame="%rdx";
  3409. $context="%r8";
  3410. $disp="%r9";
  3411. $code.=<<___;
  3412. .extern __imp_RtlVirtualUnwind
  3413. .type se_handler,\@abi-omnipotent
  3414. .align 16
  3415. se_handler:
  3416. push %rsi
  3417. push %rdi
  3418. push %rbx
  3419. push %rbp
  3420. push %r12
  3421. push %r13
  3422. push %r14
  3423. push %r15
  3424. pushfq
  3425. sub \$64,%rsp
  3426. mov 120($context),%rax # pull context->Rax
  3427. mov 248($context),%rbx # pull context->Rip
  3428. mov 8($disp),%rsi # disp->ImageBase
  3429. mov 56($disp),%r11 # disp->HandlerData
  3430. lea .Lctr32_body(%rip),%r10
  3431. cmp %r10,%rbx # context->Rip<.Lprologue
  3432. jb .Lcommon_seh_tail
  3433. mov 152($context),%rax # pull context->Rsp
  3434. lea .Lno_data(%rip),%r10 # epilogue label
  3435. cmp %r10,%rbx # context->Rip>=.Lepilogue
  3436. jae .Lcommon_seh_tail
  3437. lea 64+24+48(%rax),%rax
  3438. mov -8(%rax),%rbx
  3439. mov -16(%rax),%rbp
  3440. mov -24(%rax),%r12
  3441. mov -32(%rax),%r13
  3442. mov -40(%rax),%r14
  3443. mov -48(%rax),%r15
  3444. mov %rbx,144($context) # restore context->Rbx
  3445. mov %rbp,160($context) # restore context->Rbp
  3446. mov %r12,216($context) # restore context->R12
  3447. mov %r13,224($context) # restore context->R13
  3448. mov %r14,232($context) # restore context->R14
  3449. mov %r15,240($context) # restore context->R14
  3450. .Lcommon_seh_tail:
  3451. mov 8(%rax),%rdi
  3452. mov 16(%rax),%rsi
  3453. mov %rax,152($context) # restore context->Rsp
  3454. mov %rsi,168($context) # restore context->Rsi
  3455. mov %rdi,176($context) # restore context->Rdi
  3456. mov 40($disp),%rdi # disp->ContextRecord
  3457. mov $context,%rsi # context
  3458. mov \$154,%ecx # sizeof(CONTEXT)
  3459. .long 0xa548f3fc # cld; rep movsq
  3460. mov $disp,%rsi
  3461. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  3462. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  3463. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  3464. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  3465. mov 40(%rsi),%r10 # disp->ContextRecord
  3466. lea 56(%rsi),%r11 # &disp->HandlerData
  3467. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  3468. mov %r10,32(%rsp) # arg5
  3469. mov %r11,40(%rsp) # arg6
  3470. mov %r12,48(%rsp) # arg7
  3471. mov %rcx,56(%rsp) # arg8, (NULL)
  3472. call *__imp_RtlVirtualUnwind(%rip)
  3473. mov \$1,%eax # ExceptionContinueSearch
  3474. add \$64,%rsp
  3475. popfq
  3476. pop %r15
  3477. pop %r14
  3478. pop %r13
  3479. pop %r12
  3480. pop %rbp
  3481. pop %rbx
  3482. pop %rdi
  3483. pop %rsi
  3484. ret
  3485. .size se_handler,.-se_handler
  3486. .type simd_handler,\@abi-omnipotent
  3487. .align 16
  3488. simd_handler:
  3489. push %rsi
  3490. push %rdi
  3491. push %rbx
  3492. push %rbp
  3493. push %r12
  3494. push %r13
  3495. push %r14
  3496. push %r15
  3497. pushfq
  3498. sub \$64,%rsp
  3499. mov 120($context),%rax # pull context->Rax
  3500. mov 248($context),%rbx # pull context->Rip
  3501. mov 8($disp),%rsi # disp->ImageBase
  3502. mov 56($disp),%r11 # disp->HandlerData
  3503. mov 0(%r11),%r10d # HandlerData[0]
  3504. lea (%rsi,%r10),%r10 # prologue label
  3505. cmp %r10,%rbx # context->Rip<prologue label
  3506. jb .Lcommon_seh_tail
  3507. mov 192($context),%rax # pull context->R9
  3508. mov 4(%r11),%r10d # HandlerData[1]
  3509. mov 8(%r11),%ecx # HandlerData[2]
  3510. lea (%rsi,%r10),%r10 # epilogue label
  3511. cmp %r10,%rbx # context->Rip>=epilogue label
  3512. jae .Lcommon_seh_tail
  3513. neg %rcx
  3514. lea -8(%rax,%rcx),%rsi
  3515. lea 512($context),%rdi # &context.Xmm6
  3516. neg %ecx
  3517. shr \$3,%ecx
  3518. .long 0xa548f3fc # cld; rep movsq
  3519. jmp .Lcommon_seh_tail
  3520. .size simd_handler,.-simd_handler
  3521. .section .pdata
  3522. .align 4
  3523. .rva .LSEH_begin_ChaCha20_ctr32
  3524. .rva .LSEH_end_ChaCha20_ctr32
  3525. .rva .LSEH_info_ChaCha20_ctr32
  3526. .rva .LSEH_begin_ChaCha20_ssse3
  3527. .rva .LSEH_end_ChaCha20_ssse3
  3528. .rva .LSEH_info_ChaCha20_ssse3
  3529. .rva .LSEH_begin_ChaCha20_128
  3530. .rva .LSEH_end_ChaCha20_128
  3531. .rva .LSEH_info_ChaCha20_128
  3532. .rva .LSEH_begin_ChaCha20_4x
  3533. .rva .LSEH_end_ChaCha20_4x
  3534. .rva .LSEH_info_ChaCha20_4x
  3535. ___
  3536. $code.=<<___ if ($avx);
  3537. .rva .LSEH_begin_ChaCha20_4xop
  3538. .rva .LSEH_end_ChaCha20_4xop
  3539. .rva .LSEH_info_ChaCha20_4xop
  3540. ___
  3541. $code.=<<___ if ($avx>1);
  3542. .rva .LSEH_begin_ChaCha20_8x
  3543. .rva .LSEH_end_ChaCha20_8x
  3544. .rva .LSEH_info_ChaCha20_8x
  3545. ___
  3546. $code.=<<___ if ($avx>2);
  3547. .rva .LSEH_begin_ChaCha20_avx512
  3548. .rva .LSEH_end_ChaCha20_avx512
  3549. .rva .LSEH_info_ChaCha20_avx512
  3550. .rva .LSEH_begin_ChaCha20_avx512vl
  3551. .rva .LSEH_end_ChaCha20_avx512vl
  3552. .rva .LSEH_info_ChaCha20_avx512vl
  3553. .rva .LSEH_begin_ChaCha20_16x
  3554. .rva .LSEH_end_ChaCha20_16x
  3555. .rva .LSEH_info_ChaCha20_16x
  3556. .rva .LSEH_begin_ChaCha20_8xvl
  3557. .rva .LSEH_end_ChaCha20_8xvl
  3558. .rva .LSEH_info_ChaCha20_8xvl
  3559. ___
  3560. $code.=<<___;
  3561. .section .xdata
  3562. .align 8
  3563. .LSEH_info_ChaCha20_ctr32:
  3564. .byte 9,0,0,0
  3565. .rva se_handler
  3566. .LSEH_info_ChaCha20_ssse3:
  3567. .byte 9,0,0,0
  3568. .rva simd_handler
  3569. .rva .Lssse3_body,.Lssse3_epilogue
  3570. .long 0x20,0
  3571. .LSEH_info_ChaCha20_128:
  3572. .byte 9,0,0,0
  3573. .rva simd_handler
  3574. .rva .L128_body,.L128_epilogue
  3575. .long 0x60,0
  3576. .LSEH_info_ChaCha20_4x:
  3577. .byte 9,0,0,0
  3578. .rva simd_handler
  3579. .rva .L4x_body,.L4x_epilogue
  3580. .long 0xa0,0
  3581. ___
  3582. $code.=<<___ if ($avx);
  3583. .LSEH_info_ChaCha20_4xop:
  3584. .byte 9,0,0,0
  3585. .rva simd_handler
  3586. .rva .L4xop_body,.L4xop_epilogue # HandlerData[]
  3587. .long 0xa0,0
  3588. ___
  3589. $code.=<<___ if ($avx>1);
  3590. .LSEH_info_ChaCha20_8x:
  3591. .byte 9,0,0,0
  3592. .rva simd_handler
  3593. .rva .L8x_body,.L8x_epilogue # HandlerData[]
  3594. .long 0xa0,0
  3595. ___
  3596. $code.=<<___ if ($avx>2);
  3597. .LSEH_info_ChaCha20_avx512:
  3598. .byte 9,0,0,0
  3599. .rva simd_handler
  3600. .rva .Lavx512_body,.Lavx512_epilogue # HandlerData[]
  3601. .long 0x20,0
  3602. .LSEH_info_ChaCha20_avx512vl:
  3603. .byte 9,0,0,0
  3604. .rva simd_handler
  3605. .rva .Lavx512vl_body,.Lavx512vl_epilogue # HandlerData[]
  3606. .long 0x20,0
  3607. .LSEH_info_ChaCha20_16x:
  3608. .byte 9,0,0,0
  3609. .rva simd_handler
  3610. .rva .L16x_body,.L16x_epilogue # HandlerData[]
  3611. .long 0xa0,0
  3612. .LSEH_info_ChaCha20_8xvl:
  3613. .byte 9,0,0,0
  3614. .rva simd_handler
  3615. .rva .L8xvl_body,.L8xvl_epilogue # HandlerData[]
  3616. .long 0xa0,0
  3617. ___
  3618. }
  3619. foreach (split("\n",$code)) {
  3620. s/\`([^\`]*)\`/eval $1/ge;
  3621. s/%x#%[yz]/%x/g; # "down-shift"
  3622. print $_,"\n";
  3623. }
  3624. close STDOUT or die "error closing STDOUT: $!";