chacha-x86_64.pl 95 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039
  1. #! /usr/bin/env perl
  2. # Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # November 2014
  17. #
  18. # ChaCha20 for x86_64.
  19. #
  20. # December 2016
  21. #
  22. # Add AVX512F code path.
  23. #
  24. # December 2017
  25. #
  26. # Add AVX512VL code path.
  27. #
  28. # Performance in cycles per byte out of large buffer.
  29. #
  30. # IALU/gcc 4.8(i) 1x/2xSSSE3(ii) 4xSSSE3 NxAVX(v)
  31. #
  32. # P4 9.48/+99% - -
  33. # Core2 7.83/+55% 7.90/5.76 4.35
  34. # Westmere 7.19/+50% 5.60/4.50 3.00
  35. # Sandy Bridge 8.31/+42% 5.45/4.00 2.72
  36. # Ivy Bridge 6.71/+46% 5.40/? 2.41
  37. # Haswell 5.92/+43% 5.20/3.45 2.42 1.23
  38. # Skylake[-X] 5.87/+39% 4.70/3.22 2.31 1.19[0.80(vi)]
  39. # Silvermont 12.0/+33% 7.75/6.90 7.03(iii)
  40. # Knights L 11.7/- ? 9.60(iii) 0.80
  41. # Goldmont 10.6/+17% 5.10/3.52 3.28
  42. # Sledgehammer 7.28/+52% - -
  43. # Bulldozer 9.66/+28% 9.85/5.35(iv) 3.06(iv)
  44. # Ryzen 5.96/+50% 5.19/3.00 2.40 2.09
  45. # VIA Nano 10.5/+46% 6.72/6.88 6.05
  46. #
  47. # (i) compared to older gcc 3.x one can observe >2x improvement on
  48. # most platforms;
  49. # (ii) 2xSSSE3 is code path optimized specifically for 128 bytes used
  50. # by chacha20_poly1305_tls_cipher, results are EVP-free;
  51. # (iii) this is not optimal result for Atom because of MSROM
  52. # limitations, SSE2 can do better, but gain is considered too
  53. # low to justify the [maintenance] effort;
  54. # (iv) Bulldozer actually executes 4xXOP code path that delivers 2.20
  55. # and 4.85 for 128-byte inputs;
  56. # (v) 8xAVX2, 8xAVX512VL or 16xAVX512F, whichever best applicable;
  57. # (vi) even though Skylake-X can execute AVX512F code and deliver 0.57
  58. # cpb in single thread, the corresponding capability is suppressed;
  59. # $output is the last argument if it looks like a file (it has an extension)
  60. # $flavour is the first argument if it doesn't look like a file
  61. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  62. $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  63. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  64. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  65. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  66. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  67. die "can't locate x86_64-xlate.pl";
  68. if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  69. =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
  70. $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
  71. }
  72. if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
  73. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
  74. $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
  75. $avx += 1 if ($1==2.11 && $2>=8);
  76. }
  77. if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
  78. `ml64 2>&1` =~ /Version ([0-9]+)\./) {
  79. $avx = ($1>=10) + ($1>=11);
  80. }
  81. if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|.*based on LLVM) ([0-9]+\.[0-9]+)/) {
  82. $avx = ($2>=3.0) + ($2>3.0);
  83. }
  84. open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""
  85. or die "can't call $xlate: $!";
  86. *STDOUT=*OUT;
  87. # input parameter block
  88. ($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
  89. $code.=<<___;
  90. .text
  91. .extern OPENSSL_ia32cap_P
  92. .align 64
  93. .Lzero:
  94. .long 0,0,0,0
  95. .Lone:
  96. .long 1,0,0,0
  97. .Linc:
  98. .long 0,1,2,3
  99. .Lfour:
  100. .long 4,4,4,4
  101. .Lincy:
  102. .long 0,2,4,6,1,3,5,7
  103. .Leight:
  104. .long 8,8,8,8,8,8,8,8
  105. .Lrot16:
  106. .byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
  107. .Lrot24:
  108. .byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
  109. .Ltwoy:
  110. .long 2,0,0,0, 2,0,0,0
  111. .align 64
  112. .Lzeroz:
  113. .long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
  114. .Lfourz:
  115. .long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
  116. .Lincz:
  117. .long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
  118. .Lsixteen:
  119. .long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
  120. .Lsigma:
  121. .asciz "expand 32-byte k"
  122. .asciz "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  123. ___
  124. sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
  125. { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
  126. my $arg = pop;
  127. $arg = "\$$arg" if ($arg*1 eq $arg);
  128. $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
  129. }
  130. @x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
  131. "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
  132. @t=("%esi","%edi");
  133. sub ROUND { # critical path is 24 cycles per round
  134. my ($a0,$b0,$c0,$d0)=@_;
  135. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  136. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  137. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  138. my ($xc,$xc_)=map("\"$_\"",@t);
  139. my @x=map("\"$_\"",@x);
  140. # Consider order in which variables are addressed by their
  141. # index:
  142. #
  143. # a b c d
  144. #
  145. # 0 4 8 12 < even round
  146. # 1 5 9 13
  147. # 2 6 10 14
  148. # 3 7 11 15
  149. # 0 5 10 15 < odd round
  150. # 1 6 11 12
  151. # 2 7 8 13
  152. # 3 4 9 14
  153. #
  154. # 'a', 'b' and 'd's are permanently allocated in registers,
  155. # @x[0..7,12..15], while 'c's are maintained in memory. If
  156. # you observe 'c' column, you'll notice that pair of 'c's is
  157. # invariant between rounds. This means that we have to reload
  158. # them once per round, in the middle. This is why you'll see
  159. # bunch of 'c' stores and loads in the middle, but none in
  160. # the beginning or end.
  161. # Normally instructions would be interleaved to favour in-order
  162. # execution. Generally out-of-order cores manage it gracefully,
  163. # but not this time for some reason. As in-order execution
  164. # cores are dying breed, old Atom is the only one around,
  165. # instructions are left uninterleaved. Besides, Atom is better
  166. # off executing 1xSSSE3 code anyway...
  167. (
  168. "&add (@x[$a0],@x[$b0])", # Q1
  169. "&xor (@x[$d0],@x[$a0])",
  170. "&rol (@x[$d0],16)",
  171. "&add (@x[$a1],@x[$b1])", # Q2
  172. "&xor (@x[$d1],@x[$a1])",
  173. "&rol (@x[$d1],16)",
  174. "&add ($xc,@x[$d0])",
  175. "&xor (@x[$b0],$xc)",
  176. "&rol (@x[$b0],12)",
  177. "&add ($xc_,@x[$d1])",
  178. "&xor (@x[$b1],$xc_)",
  179. "&rol (@x[$b1],12)",
  180. "&add (@x[$a0],@x[$b0])",
  181. "&xor (@x[$d0],@x[$a0])",
  182. "&rol (@x[$d0],8)",
  183. "&add (@x[$a1],@x[$b1])",
  184. "&xor (@x[$d1],@x[$a1])",
  185. "&rol (@x[$d1],8)",
  186. "&add ($xc,@x[$d0])",
  187. "&xor (@x[$b0],$xc)",
  188. "&rol (@x[$b0],7)",
  189. "&add ($xc_,@x[$d1])",
  190. "&xor (@x[$b1],$xc_)",
  191. "&rol (@x[$b1],7)",
  192. "&mov (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's
  193. "&mov (\"4*$c1(%rsp)\",$xc_)",
  194. "&mov ($xc,\"4*$c2(%rsp)\")",
  195. "&mov ($xc_,\"4*$c3(%rsp)\")",
  196. "&add (@x[$a2],@x[$b2])", # Q3
  197. "&xor (@x[$d2],@x[$a2])",
  198. "&rol (@x[$d2],16)",
  199. "&add (@x[$a3],@x[$b3])", # Q4
  200. "&xor (@x[$d3],@x[$a3])",
  201. "&rol (@x[$d3],16)",
  202. "&add ($xc,@x[$d2])",
  203. "&xor (@x[$b2],$xc)",
  204. "&rol (@x[$b2],12)",
  205. "&add ($xc_,@x[$d3])",
  206. "&xor (@x[$b3],$xc_)",
  207. "&rol (@x[$b3],12)",
  208. "&add (@x[$a2],@x[$b2])",
  209. "&xor (@x[$d2],@x[$a2])",
  210. "&rol (@x[$d2],8)",
  211. "&add (@x[$a3],@x[$b3])",
  212. "&xor (@x[$d3],@x[$a3])",
  213. "&rol (@x[$d3],8)",
  214. "&add ($xc,@x[$d2])",
  215. "&xor (@x[$b2],$xc)",
  216. "&rol (@x[$b2],7)",
  217. "&add ($xc_,@x[$d3])",
  218. "&xor (@x[$b3],$xc_)",
  219. "&rol (@x[$b3],7)"
  220. );
  221. }
  222. ########################################################################
  223. # Generic code path that handles all lengths on pre-SSSE3 processors.
  224. $code.=<<___;
  225. .globl ChaCha20_ctr32
  226. .type ChaCha20_ctr32,\@function,5
  227. .align 64
  228. ChaCha20_ctr32:
  229. .cfi_startproc
  230. cmp \$0,$len
  231. je .Lno_data
  232. mov OPENSSL_ia32cap_P+4(%rip),%r10
  233. ___
  234. $code.=<<___ if ($avx>2);
  235. bt \$48,%r10 # check for AVX512F
  236. jc .LChaCha20_avx512
  237. test %r10,%r10 # check for AVX512VL
  238. js .LChaCha20_avx512vl
  239. ___
  240. $code.=<<___;
  241. test \$`1<<(41-32)`,%r10d
  242. jnz .LChaCha20_ssse3
  243. push %rbx
  244. .cfi_push %rbx
  245. push %rbp
  246. .cfi_push %rbp
  247. push %r12
  248. .cfi_push %r12
  249. push %r13
  250. .cfi_push %r13
  251. push %r14
  252. .cfi_push %r14
  253. push %r15
  254. .cfi_push %r15
  255. sub \$64+24,%rsp
  256. .cfi_adjust_cfa_offset 64+24
  257. .Lctr32_body:
  258. #movdqa .Lsigma(%rip),%xmm0
  259. movdqu ($key),%xmm1
  260. movdqu 16($key),%xmm2
  261. movdqu ($counter),%xmm3
  262. movdqa .Lone(%rip),%xmm4
  263. #movdqa %xmm0,4*0(%rsp) # key[0]
  264. movdqa %xmm1,4*4(%rsp) # key[1]
  265. movdqa %xmm2,4*8(%rsp) # key[2]
  266. movdqa %xmm3,4*12(%rsp) # key[3]
  267. mov $len,%rbp # reassign $len
  268. jmp .Loop_outer
  269. .align 32
  270. .Loop_outer:
  271. mov \$0x61707865,@x[0] # 'expa'
  272. mov \$0x3320646e,@x[1] # 'nd 3'
  273. mov \$0x79622d32,@x[2] # '2-by'
  274. mov \$0x6b206574,@x[3] # 'te k'
  275. mov 4*4(%rsp),@x[4]
  276. mov 4*5(%rsp),@x[5]
  277. mov 4*6(%rsp),@x[6]
  278. mov 4*7(%rsp),@x[7]
  279. movd %xmm3,@x[12]
  280. mov 4*13(%rsp),@x[13]
  281. mov 4*14(%rsp),@x[14]
  282. mov 4*15(%rsp),@x[15]
  283. mov %rbp,64+0(%rsp) # save len
  284. mov \$10,%ebp
  285. mov $inp,64+8(%rsp) # save inp
  286. movq %xmm2,%rsi # "@x[8]"
  287. mov $out,64+16(%rsp) # save out
  288. mov %rsi,%rdi
  289. shr \$32,%rdi # "@x[9]"
  290. jmp .Loop
  291. .align 32
  292. .Loop:
  293. ___
  294. foreach (&ROUND (0, 4, 8,12)) { eval; }
  295. foreach (&ROUND (0, 5,10,15)) { eval; }
  296. &dec ("%ebp");
  297. &jnz (".Loop");
  298. $code.=<<___;
  299. mov @t[1],4*9(%rsp) # modulo-scheduled
  300. mov @t[0],4*8(%rsp)
  301. mov 64(%rsp),%rbp # load len
  302. movdqa %xmm2,%xmm1
  303. mov 64+8(%rsp),$inp # load inp
  304. paddd %xmm4,%xmm3 # increment counter
  305. mov 64+16(%rsp),$out # load out
  306. add \$0x61707865,@x[0] # 'expa'
  307. add \$0x3320646e,@x[1] # 'nd 3'
  308. add \$0x79622d32,@x[2] # '2-by'
  309. add \$0x6b206574,@x[3] # 'te k'
  310. add 4*4(%rsp),@x[4]
  311. add 4*5(%rsp),@x[5]
  312. add 4*6(%rsp),@x[6]
  313. add 4*7(%rsp),@x[7]
  314. add 4*12(%rsp),@x[12]
  315. add 4*13(%rsp),@x[13]
  316. add 4*14(%rsp),@x[14]
  317. add 4*15(%rsp),@x[15]
  318. paddd 4*8(%rsp),%xmm1
  319. cmp \$64,%rbp
  320. jb .Ltail
  321. xor 4*0($inp),@x[0] # xor with input
  322. xor 4*1($inp),@x[1]
  323. xor 4*2($inp),@x[2]
  324. xor 4*3($inp),@x[3]
  325. xor 4*4($inp),@x[4]
  326. xor 4*5($inp),@x[5]
  327. xor 4*6($inp),@x[6]
  328. xor 4*7($inp),@x[7]
  329. movdqu 4*8($inp),%xmm0
  330. xor 4*12($inp),@x[12]
  331. xor 4*13($inp),@x[13]
  332. xor 4*14($inp),@x[14]
  333. xor 4*15($inp),@x[15]
  334. lea 4*16($inp),$inp # inp+=64
  335. pxor %xmm1,%xmm0
  336. movdqa %xmm2,4*8(%rsp)
  337. movd %xmm3,4*12(%rsp)
  338. mov @x[0],4*0($out) # write output
  339. mov @x[1],4*1($out)
  340. mov @x[2],4*2($out)
  341. mov @x[3],4*3($out)
  342. mov @x[4],4*4($out)
  343. mov @x[5],4*5($out)
  344. mov @x[6],4*6($out)
  345. mov @x[7],4*7($out)
  346. movdqu %xmm0,4*8($out)
  347. mov @x[12],4*12($out)
  348. mov @x[13],4*13($out)
  349. mov @x[14],4*14($out)
  350. mov @x[15],4*15($out)
  351. lea 4*16($out),$out # out+=64
  352. sub \$64,%rbp
  353. jnz .Loop_outer
  354. jmp .Ldone
  355. .align 16
  356. .Ltail:
  357. mov @x[0],4*0(%rsp)
  358. mov @x[1],4*1(%rsp)
  359. xor %rbx,%rbx
  360. mov @x[2],4*2(%rsp)
  361. mov @x[3],4*3(%rsp)
  362. mov @x[4],4*4(%rsp)
  363. mov @x[5],4*5(%rsp)
  364. mov @x[6],4*6(%rsp)
  365. mov @x[7],4*7(%rsp)
  366. movdqa %xmm1,4*8(%rsp)
  367. mov @x[12],4*12(%rsp)
  368. mov @x[13],4*13(%rsp)
  369. mov @x[14],4*14(%rsp)
  370. mov @x[15],4*15(%rsp)
  371. .Loop_tail:
  372. movzb ($inp,%rbx),%eax
  373. movzb (%rsp,%rbx),%edx
  374. lea 1(%rbx),%rbx
  375. xor %edx,%eax
  376. mov %al,-1($out,%rbx)
  377. dec %rbp
  378. jnz .Loop_tail
  379. .Ldone:
  380. lea 64+24+48(%rsp),%rsi
  381. .cfi_def_cfa %rsi,8
  382. mov -48(%rsi),%r15
  383. .cfi_restore %r15
  384. mov -40(%rsi),%r14
  385. .cfi_restore %r14
  386. mov -32(%rsi),%r13
  387. .cfi_restore %r13
  388. mov -24(%rsi),%r12
  389. .cfi_restore %r12
  390. mov -16(%rsi),%rbp
  391. .cfi_restore %rbp
  392. mov -8(%rsi),%rbx
  393. .cfi_restore %rbx
  394. lea (%rsi),%rsp
  395. .cfi_def_cfa_register %rsp
  396. .Lno_data:
  397. ret
  398. .cfi_endproc
  399. .size ChaCha20_ctr32,.-ChaCha20_ctr32
  400. ___
  401. ########################################################################
  402. # SSSE3 code path that handles shorter lengths
  403. {
  404. my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
  405. sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round
  406. &paddd ($a,$b);
  407. &pxor ($d,$a);
  408. &pshufb ($d,$rot16);
  409. &paddd ($c,$d);
  410. &pxor ($b,$c);
  411. &movdqa ($t,$b);
  412. &psrld ($b,20);
  413. &pslld ($t,12);
  414. &por ($b,$t);
  415. &paddd ($a,$b);
  416. &pxor ($d,$a);
  417. &pshufb ($d,$rot24);
  418. &paddd ($c,$d);
  419. &pxor ($b,$c);
  420. &movdqa ($t,$b);
  421. &psrld ($b,25);
  422. &pslld ($t,7);
  423. &por ($b,$t);
  424. }
  425. my $xframe = $win64 ? 160+8 : 8;
  426. $code.=<<___;
  427. .type ChaCha20_ssse3,\@function,5
  428. .align 32
  429. ChaCha20_ssse3:
  430. .cfi_startproc
  431. .LChaCha20_ssse3:
  432. mov %rsp,%r9 # frame pointer
  433. .cfi_def_cfa_register %r9
  434. ___
  435. $code.=<<___ if ($avx);
  436. test \$`1<<(43-32)`,%r10d
  437. jnz .LChaCha20_4xop # XOP is fastest even if we use 1/4
  438. ___
  439. $code.=<<___;
  440. cmp \$128,$len # we might throw away some data,
  441. je .LChaCha20_128
  442. ja .LChaCha20_4x # but overall it won't be slower
  443. .Ldo_sse3_after_all:
  444. sub \$64+$xframe,%rsp
  445. ___
  446. $code.=<<___ if ($win64);
  447. movaps %xmm6,-0x28(%r9)
  448. movaps %xmm7,-0x18(%r9)
  449. .Lssse3_body:
  450. ___
  451. $code.=<<___;
  452. movdqa .Lsigma(%rip),$a
  453. movdqu ($key),$b
  454. movdqu 16($key),$c
  455. movdqu ($counter),$d
  456. movdqa .Lrot16(%rip),$rot16
  457. movdqa .Lrot24(%rip),$rot24
  458. movdqa $a,0x00(%rsp)
  459. movdqa $b,0x10(%rsp)
  460. movdqa $c,0x20(%rsp)
  461. movdqa $d,0x30(%rsp)
  462. mov \$10,$counter # reuse $counter
  463. jmp .Loop_ssse3
  464. .align 32
  465. .Loop_outer_ssse3:
  466. movdqa .Lone(%rip),$d
  467. movdqa 0x00(%rsp),$a
  468. movdqa 0x10(%rsp),$b
  469. movdqa 0x20(%rsp),$c
  470. paddd 0x30(%rsp),$d
  471. mov \$10,$counter
  472. movdqa $d,0x30(%rsp)
  473. jmp .Loop_ssse3
  474. .align 32
  475. .Loop_ssse3:
  476. ___
  477. &SSSE3ROUND();
  478. &pshufd ($c,$c,0b01001110);
  479. &pshufd ($b,$b,0b00111001);
  480. &pshufd ($d,$d,0b10010011);
  481. &nop ();
  482. &SSSE3ROUND();
  483. &pshufd ($c,$c,0b01001110);
  484. &pshufd ($b,$b,0b10010011);
  485. &pshufd ($d,$d,0b00111001);
  486. &dec ($counter);
  487. &jnz (".Loop_ssse3");
  488. $code.=<<___;
  489. paddd 0x00(%rsp),$a
  490. paddd 0x10(%rsp),$b
  491. paddd 0x20(%rsp),$c
  492. paddd 0x30(%rsp),$d
  493. cmp \$64,$len
  494. jb .Ltail_ssse3
  495. movdqu 0x00($inp),$t
  496. movdqu 0x10($inp),$t1
  497. pxor $t,$a # xor with input
  498. movdqu 0x20($inp),$t
  499. pxor $t1,$b
  500. movdqu 0x30($inp),$t1
  501. lea 0x40($inp),$inp # inp+=64
  502. pxor $t,$c
  503. pxor $t1,$d
  504. movdqu $a,0x00($out) # write output
  505. movdqu $b,0x10($out)
  506. movdqu $c,0x20($out)
  507. movdqu $d,0x30($out)
  508. lea 0x40($out),$out # out+=64
  509. sub \$64,$len
  510. jnz .Loop_outer_ssse3
  511. jmp .Ldone_ssse3
  512. .align 16
  513. .Ltail_ssse3:
  514. movdqa $a,0x00(%rsp)
  515. movdqa $b,0x10(%rsp)
  516. movdqa $c,0x20(%rsp)
  517. movdqa $d,0x30(%rsp)
  518. xor $counter,$counter
  519. .Loop_tail_ssse3:
  520. movzb ($inp,$counter),%eax
  521. movzb (%rsp,$counter),%ecx
  522. lea 1($counter),$counter
  523. xor %ecx,%eax
  524. mov %al,-1($out,$counter)
  525. dec $len
  526. jnz .Loop_tail_ssse3
  527. .Ldone_ssse3:
  528. ___
  529. $code.=<<___ if ($win64);
  530. movaps -0x28(%r9),%xmm6
  531. movaps -0x18(%r9),%xmm7
  532. ___
  533. $code.=<<___;
  534. lea (%r9),%rsp
  535. .cfi_def_cfa_register %rsp
  536. .Lssse3_epilogue:
  537. ret
  538. .cfi_endproc
  539. .size ChaCha20_ssse3,.-ChaCha20_ssse3
  540. ___
  541. }
  542. ########################################################################
  543. # SSSE3 code path that handles 128-byte inputs
  544. {
  545. my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(8,9,2..7));
  546. my ($a1,$b1,$c1,$d1)=map("%xmm$_",(10,11,0,1));
  547. sub SSSE3ROUND_2x {
  548. &paddd ($a,$b);
  549. &pxor ($d,$a);
  550. &paddd ($a1,$b1);
  551. &pxor ($d1,$a1);
  552. &pshufb ($d,$rot16);
  553. &pshufb($d1,$rot16);
  554. &paddd ($c,$d);
  555. &paddd ($c1,$d1);
  556. &pxor ($b,$c);
  557. &pxor ($b1,$c1);
  558. &movdqa ($t,$b);
  559. &psrld ($b,20);
  560. &movdqa($t1,$b1);
  561. &pslld ($t,12);
  562. &psrld ($b1,20);
  563. &por ($b,$t);
  564. &pslld ($t1,12);
  565. &por ($b1,$t1);
  566. &paddd ($a,$b);
  567. &pxor ($d,$a);
  568. &paddd ($a1,$b1);
  569. &pxor ($d1,$a1);
  570. &pshufb ($d,$rot24);
  571. &pshufb($d1,$rot24);
  572. &paddd ($c,$d);
  573. &paddd ($c1,$d1);
  574. &pxor ($b,$c);
  575. &pxor ($b1,$c1);
  576. &movdqa ($t,$b);
  577. &psrld ($b,25);
  578. &movdqa($t1,$b1);
  579. &pslld ($t,7);
  580. &psrld ($b1,25);
  581. &por ($b,$t);
  582. &pslld ($t1,7);
  583. &por ($b1,$t1);
  584. }
  585. my $xframe = $win64 ? 0x68 : 8;
  586. $code.=<<___;
  587. .type ChaCha20_128,\@function,5
  588. .align 32
  589. ChaCha20_128:
  590. .cfi_startproc
  591. .LChaCha20_128:
  592. mov %rsp,%r9 # frame pointer
  593. .cfi_def_cfa_register %r9
  594. sub \$64+$xframe,%rsp
  595. ___
  596. $code.=<<___ if ($win64);
  597. movaps %xmm6,-0x68(%r9)
  598. movaps %xmm7,-0x58(%r9)
  599. movaps %xmm8,-0x48(%r9)
  600. movaps %xmm9,-0x38(%r9)
  601. movaps %xmm10,-0x28(%r9)
  602. movaps %xmm11,-0x18(%r9)
  603. .L128_body:
  604. ___
  605. $code.=<<___;
  606. movdqa .Lsigma(%rip),$a
  607. movdqu ($key),$b
  608. movdqu 16($key),$c
  609. movdqu ($counter),$d
  610. movdqa .Lone(%rip),$d1
  611. movdqa .Lrot16(%rip),$rot16
  612. movdqa .Lrot24(%rip),$rot24
  613. movdqa $a,$a1
  614. movdqa $a,0x00(%rsp)
  615. movdqa $b,$b1
  616. movdqa $b,0x10(%rsp)
  617. movdqa $c,$c1
  618. movdqa $c,0x20(%rsp)
  619. paddd $d,$d1
  620. movdqa $d,0x30(%rsp)
  621. mov \$10,$counter # reuse $counter
  622. jmp .Loop_128
  623. .align 32
  624. .Loop_128:
  625. ___
  626. &SSSE3ROUND_2x();
  627. &pshufd ($c,$c,0b01001110);
  628. &pshufd ($b,$b,0b00111001);
  629. &pshufd ($d,$d,0b10010011);
  630. &pshufd ($c1,$c1,0b01001110);
  631. &pshufd ($b1,$b1,0b00111001);
  632. &pshufd ($d1,$d1,0b10010011);
  633. &SSSE3ROUND_2x();
  634. &pshufd ($c,$c,0b01001110);
  635. &pshufd ($b,$b,0b10010011);
  636. &pshufd ($d,$d,0b00111001);
  637. &pshufd ($c1,$c1,0b01001110);
  638. &pshufd ($b1,$b1,0b10010011);
  639. &pshufd ($d1,$d1,0b00111001);
  640. &dec ($counter);
  641. &jnz (".Loop_128");
  642. $code.=<<___;
  643. paddd 0x00(%rsp),$a
  644. paddd 0x10(%rsp),$b
  645. paddd 0x20(%rsp),$c
  646. paddd 0x30(%rsp),$d
  647. paddd .Lone(%rip),$d1
  648. paddd 0x00(%rsp),$a1
  649. paddd 0x10(%rsp),$b1
  650. paddd 0x20(%rsp),$c1
  651. paddd 0x30(%rsp),$d1
  652. movdqu 0x00($inp),$t
  653. movdqu 0x10($inp),$t1
  654. pxor $t,$a # xor with input
  655. movdqu 0x20($inp),$t
  656. pxor $t1,$b
  657. movdqu 0x30($inp),$t1
  658. pxor $t,$c
  659. movdqu 0x40($inp),$t
  660. pxor $t1,$d
  661. movdqu 0x50($inp),$t1
  662. pxor $t,$a1
  663. movdqu 0x60($inp),$t
  664. pxor $t1,$b1
  665. movdqu 0x70($inp),$t1
  666. pxor $t,$c1
  667. pxor $t1,$d1
  668. movdqu $a,0x00($out) # write output
  669. movdqu $b,0x10($out)
  670. movdqu $c,0x20($out)
  671. movdqu $d,0x30($out)
  672. movdqu $a1,0x40($out)
  673. movdqu $b1,0x50($out)
  674. movdqu $c1,0x60($out)
  675. movdqu $d1,0x70($out)
  676. ___
  677. $code.=<<___ if ($win64);
  678. movaps -0x68(%r9),%xmm6
  679. movaps -0x58(%r9),%xmm7
  680. movaps -0x48(%r9),%xmm8
  681. movaps -0x38(%r9),%xmm9
  682. movaps -0x28(%r9),%xmm10
  683. movaps -0x18(%r9),%xmm11
  684. ___
  685. $code.=<<___;
  686. lea (%r9),%rsp
  687. .cfi_def_cfa_register %rsp
  688. .L128_epilogue:
  689. ret
  690. .cfi_endproc
  691. .size ChaCha20_128,.-ChaCha20_128
  692. ___
  693. }
  694. ########################################################################
  695. # SSSE3 code path that handles longer messages.
  696. {
  697. # assign variables to favor Atom front-end
  698. my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
  699. $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
  700. my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  701. "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
  702. sub SSSE3_lane_ROUND {
  703. my ($a0,$b0,$c0,$d0)=@_;
  704. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  705. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  706. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  707. my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
  708. my @x=map("\"$_\"",@xx);
  709. # Consider order in which variables are addressed by their
  710. # index:
  711. #
  712. # a b c d
  713. #
  714. # 0 4 8 12 < even round
  715. # 1 5 9 13
  716. # 2 6 10 14
  717. # 3 7 11 15
  718. # 0 5 10 15 < odd round
  719. # 1 6 11 12
  720. # 2 7 8 13
  721. # 3 4 9 14
  722. #
  723. # 'a', 'b' and 'd's are permanently allocated in registers,
  724. # @x[0..7,12..15], while 'c's are maintained in memory. If
  725. # you observe 'c' column, you'll notice that pair of 'c's is
  726. # invariant between rounds. This means that we have to reload
  727. # them once per round, in the middle. This is why you'll see
  728. # bunch of 'c' stores and loads in the middle, but none in
  729. # the beginning or end.
  730. (
  731. "&paddd (@x[$a0],@x[$b0])", # Q1
  732. "&paddd (@x[$a1],@x[$b1])", # Q2
  733. "&pxor (@x[$d0],@x[$a0])",
  734. "&pxor (@x[$d1],@x[$a1])",
  735. "&pshufb (@x[$d0],$t1)",
  736. "&pshufb (@x[$d1],$t1)",
  737. "&paddd ($xc,@x[$d0])",
  738. "&paddd ($xc_,@x[$d1])",
  739. "&pxor (@x[$b0],$xc)",
  740. "&pxor (@x[$b1],$xc_)",
  741. "&movdqa ($t0,@x[$b0])",
  742. "&pslld (@x[$b0],12)",
  743. "&psrld ($t0,20)",
  744. "&movdqa ($t1,@x[$b1])",
  745. "&pslld (@x[$b1],12)",
  746. "&por (@x[$b0],$t0)",
  747. "&psrld ($t1,20)",
  748. "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
  749. "&por (@x[$b1],$t1)",
  750. "&paddd (@x[$a0],@x[$b0])",
  751. "&paddd (@x[$a1],@x[$b1])",
  752. "&pxor (@x[$d0],@x[$a0])",
  753. "&pxor (@x[$d1],@x[$a1])",
  754. "&pshufb (@x[$d0],$t0)",
  755. "&pshufb (@x[$d1],$t0)",
  756. "&paddd ($xc,@x[$d0])",
  757. "&paddd ($xc_,@x[$d1])",
  758. "&pxor (@x[$b0],$xc)",
  759. "&pxor (@x[$b1],$xc_)",
  760. "&movdqa ($t1,@x[$b0])",
  761. "&pslld (@x[$b0],7)",
  762. "&psrld ($t1,25)",
  763. "&movdqa ($t0,@x[$b1])",
  764. "&pslld (@x[$b1],7)",
  765. "&por (@x[$b0],$t1)",
  766. "&psrld ($t0,25)",
  767. "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
  768. "&por (@x[$b1],$t0)",
  769. "&movdqa (\"`16*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
  770. "&movdqa (\"`16*($c1-8)`(%rsp)\",$xc_)",
  771. "&movdqa ($xc,\"`16*($c2-8)`(%rsp)\")",
  772. "&movdqa ($xc_,\"`16*($c3-8)`(%rsp)\")",
  773. "&paddd (@x[$a2],@x[$b2])", # Q3
  774. "&paddd (@x[$a3],@x[$b3])", # Q4
  775. "&pxor (@x[$d2],@x[$a2])",
  776. "&pxor (@x[$d3],@x[$a3])",
  777. "&pshufb (@x[$d2],$t1)",
  778. "&pshufb (@x[$d3],$t1)",
  779. "&paddd ($xc,@x[$d2])",
  780. "&paddd ($xc_,@x[$d3])",
  781. "&pxor (@x[$b2],$xc)",
  782. "&pxor (@x[$b3],$xc_)",
  783. "&movdqa ($t0,@x[$b2])",
  784. "&pslld (@x[$b2],12)",
  785. "&psrld ($t0,20)",
  786. "&movdqa ($t1,@x[$b3])",
  787. "&pslld (@x[$b3],12)",
  788. "&por (@x[$b2],$t0)",
  789. "&psrld ($t1,20)",
  790. "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
  791. "&por (@x[$b3],$t1)",
  792. "&paddd (@x[$a2],@x[$b2])",
  793. "&paddd (@x[$a3],@x[$b3])",
  794. "&pxor (@x[$d2],@x[$a2])",
  795. "&pxor (@x[$d3],@x[$a3])",
  796. "&pshufb (@x[$d2],$t0)",
  797. "&pshufb (@x[$d3],$t0)",
  798. "&paddd ($xc,@x[$d2])",
  799. "&paddd ($xc_,@x[$d3])",
  800. "&pxor (@x[$b2],$xc)",
  801. "&pxor (@x[$b3],$xc_)",
  802. "&movdqa ($t1,@x[$b2])",
  803. "&pslld (@x[$b2],7)",
  804. "&psrld ($t1,25)",
  805. "&movdqa ($t0,@x[$b3])",
  806. "&pslld (@x[$b3],7)",
  807. "&por (@x[$b2],$t1)",
  808. "&psrld ($t0,25)",
  809. "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
  810. "&por (@x[$b3],$t0)"
  811. );
  812. }
  813. my $xframe = $win64 ? 0xa8 : 8;
  814. $code.=<<___;
  815. .type ChaCha20_4x,\@function,5
  816. .align 32
  817. ChaCha20_4x:
  818. .cfi_startproc
  819. .LChaCha20_4x:
  820. mov %rsp,%r9 # frame pointer
  821. .cfi_def_cfa_register %r9
  822. mov %r10,%r11
  823. ___
  824. $code.=<<___ if ($avx>1);
  825. shr \$32,%r10 # OPENSSL_ia32cap_P+8
  826. test \$`1<<5`,%r10 # test AVX2
  827. jnz .LChaCha20_8x
  828. ___
  829. $code.=<<___;
  830. cmp \$192,$len
  831. ja .Lproceed4x
  832. and \$`1<<26|1<<22`,%r11 # isolate XSAVE+MOVBE
  833. cmp \$`1<<22`,%r11 # check for MOVBE without XSAVE
  834. je .Ldo_sse3_after_all # to detect Atom
  835. .Lproceed4x:
  836. sub \$0x140+$xframe,%rsp
  837. ___
  838. ################ stack layout
  839. # +0x00 SIMD equivalent of @x[8-12]
  840. # ...
  841. # +0x40 constant copy of key[0-2] smashed by lanes
  842. # ...
  843. # +0x100 SIMD counters (with nonce smashed by lanes)
  844. # ...
  845. # +0x140
  846. $code.=<<___ if ($win64);
  847. movaps %xmm6,-0xa8(%r9)
  848. movaps %xmm7,-0x98(%r9)
  849. movaps %xmm8,-0x88(%r9)
  850. movaps %xmm9,-0x78(%r9)
  851. movaps %xmm10,-0x68(%r9)
  852. movaps %xmm11,-0x58(%r9)
  853. movaps %xmm12,-0x48(%r9)
  854. movaps %xmm13,-0x38(%r9)
  855. movaps %xmm14,-0x28(%r9)
  856. movaps %xmm15,-0x18(%r9)
  857. .L4x_body:
  858. ___
  859. $code.=<<___;
  860. movdqa .Lsigma(%rip),$xa3 # key[0]
  861. movdqu ($key),$xb3 # key[1]
  862. movdqu 16($key),$xt3 # key[2]
  863. movdqu ($counter),$xd3 # key[3]
  864. lea 0x100(%rsp),%rcx # size optimization
  865. lea .Lrot16(%rip),%r10
  866. lea .Lrot24(%rip),%r11
  867. pshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  868. pshufd \$0x55,$xa3,$xa1
  869. movdqa $xa0,0x40(%rsp) # ... and offload
  870. pshufd \$0xaa,$xa3,$xa2
  871. movdqa $xa1,0x50(%rsp)
  872. pshufd \$0xff,$xa3,$xa3
  873. movdqa $xa2,0x60(%rsp)
  874. movdqa $xa3,0x70(%rsp)
  875. pshufd \$0x00,$xb3,$xb0
  876. pshufd \$0x55,$xb3,$xb1
  877. movdqa $xb0,0x80-0x100(%rcx)
  878. pshufd \$0xaa,$xb3,$xb2
  879. movdqa $xb1,0x90-0x100(%rcx)
  880. pshufd \$0xff,$xb3,$xb3
  881. movdqa $xb2,0xa0-0x100(%rcx)
  882. movdqa $xb3,0xb0-0x100(%rcx)
  883. pshufd \$0x00,$xt3,$xt0 # "$xc0"
  884. pshufd \$0x55,$xt3,$xt1 # "$xc1"
  885. movdqa $xt0,0xc0-0x100(%rcx)
  886. pshufd \$0xaa,$xt3,$xt2 # "$xc2"
  887. movdqa $xt1,0xd0-0x100(%rcx)
  888. pshufd \$0xff,$xt3,$xt3 # "$xc3"
  889. movdqa $xt2,0xe0-0x100(%rcx)
  890. movdqa $xt3,0xf0-0x100(%rcx)
  891. pshufd \$0x00,$xd3,$xd0
  892. pshufd \$0x55,$xd3,$xd1
  893. paddd .Linc(%rip),$xd0 # don't save counters yet
  894. pshufd \$0xaa,$xd3,$xd2
  895. movdqa $xd1,0x110-0x100(%rcx)
  896. pshufd \$0xff,$xd3,$xd3
  897. movdqa $xd2,0x120-0x100(%rcx)
  898. movdqa $xd3,0x130-0x100(%rcx)
  899. jmp .Loop_enter4x
  900. .align 32
  901. .Loop_outer4x:
  902. movdqa 0x40(%rsp),$xa0 # re-load smashed key
  903. movdqa 0x50(%rsp),$xa1
  904. movdqa 0x60(%rsp),$xa2
  905. movdqa 0x70(%rsp),$xa3
  906. movdqa 0x80-0x100(%rcx),$xb0
  907. movdqa 0x90-0x100(%rcx),$xb1
  908. movdqa 0xa0-0x100(%rcx),$xb2
  909. movdqa 0xb0-0x100(%rcx),$xb3
  910. movdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
  911. movdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
  912. movdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
  913. movdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
  914. movdqa 0x100-0x100(%rcx),$xd0
  915. movdqa 0x110-0x100(%rcx),$xd1
  916. movdqa 0x120-0x100(%rcx),$xd2
  917. movdqa 0x130-0x100(%rcx),$xd3
  918. paddd .Lfour(%rip),$xd0 # next SIMD counters
  919. .Loop_enter4x:
  920. movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]"
  921. movdqa $xt3,0x30(%rsp) # SIMD equivalent of "@x[11]"
  922. movdqa (%r10),$xt3 # .Lrot16(%rip)
  923. mov \$10,%eax
  924. movdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
  925. jmp .Loop4x
  926. .align 32
  927. .Loop4x:
  928. ___
  929. foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
  930. foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
  931. $code.=<<___;
  932. dec %eax
  933. jnz .Loop4x
  934. paddd 0x40(%rsp),$xa0 # accumulate key material
  935. paddd 0x50(%rsp),$xa1
  936. paddd 0x60(%rsp),$xa2
  937. paddd 0x70(%rsp),$xa3
  938. movdqa $xa0,$xt2 # "de-interlace" data
  939. punpckldq $xa1,$xa0
  940. movdqa $xa2,$xt3
  941. punpckldq $xa3,$xa2
  942. punpckhdq $xa1,$xt2
  943. punpckhdq $xa3,$xt3
  944. movdqa $xa0,$xa1
  945. punpcklqdq $xa2,$xa0 # "a0"
  946. movdqa $xt2,$xa3
  947. punpcklqdq $xt3,$xt2 # "a2"
  948. punpckhqdq $xa2,$xa1 # "a1"
  949. punpckhqdq $xt3,$xa3 # "a3"
  950. ___
  951. ($xa2,$xt2)=($xt2,$xa2);
  952. $code.=<<___;
  953. paddd 0x80-0x100(%rcx),$xb0
  954. paddd 0x90-0x100(%rcx),$xb1
  955. paddd 0xa0-0x100(%rcx),$xb2
  956. paddd 0xb0-0x100(%rcx),$xb3
  957. movdqa $xa0,0x00(%rsp) # offload $xaN
  958. movdqa $xa1,0x10(%rsp)
  959. movdqa 0x20(%rsp),$xa0 # "xc2"
  960. movdqa 0x30(%rsp),$xa1 # "xc3"
  961. movdqa $xb0,$xt2
  962. punpckldq $xb1,$xb0
  963. movdqa $xb2,$xt3
  964. punpckldq $xb3,$xb2
  965. punpckhdq $xb1,$xt2
  966. punpckhdq $xb3,$xt3
  967. movdqa $xb0,$xb1
  968. punpcklqdq $xb2,$xb0 # "b0"
  969. movdqa $xt2,$xb3
  970. punpcklqdq $xt3,$xt2 # "b2"
  971. punpckhqdq $xb2,$xb1 # "b1"
  972. punpckhqdq $xt3,$xb3 # "b3"
  973. ___
  974. ($xb2,$xt2)=($xt2,$xb2);
  975. my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
  976. $code.=<<___;
  977. paddd 0xc0-0x100(%rcx),$xc0
  978. paddd 0xd0-0x100(%rcx),$xc1
  979. paddd 0xe0-0x100(%rcx),$xc2
  980. paddd 0xf0-0x100(%rcx),$xc3
  981. movdqa $xa2,0x20(%rsp) # keep offloading $xaN
  982. movdqa $xa3,0x30(%rsp)
  983. movdqa $xc0,$xt2
  984. punpckldq $xc1,$xc0
  985. movdqa $xc2,$xt3
  986. punpckldq $xc3,$xc2
  987. punpckhdq $xc1,$xt2
  988. punpckhdq $xc3,$xt3
  989. movdqa $xc0,$xc1
  990. punpcklqdq $xc2,$xc0 # "c0"
  991. movdqa $xt2,$xc3
  992. punpcklqdq $xt3,$xt2 # "c2"
  993. punpckhqdq $xc2,$xc1 # "c1"
  994. punpckhqdq $xt3,$xc3 # "c3"
  995. ___
  996. ($xc2,$xt2)=($xt2,$xc2);
  997. ($xt0,$xt1)=($xa2,$xa3); # use $xaN as temporary
  998. $code.=<<___;
  999. paddd 0x100-0x100(%rcx),$xd0
  1000. paddd 0x110-0x100(%rcx),$xd1
  1001. paddd 0x120-0x100(%rcx),$xd2
  1002. paddd 0x130-0x100(%rcx),$xd3
  1003. movdqa $xd0,$xt2
  1004. punpckldq $xd1,$xd0
  1005. movdqa $xd2,$xt3
  1006. punpckldq $xd3,$xd2
  1007. punpckhdq $xd1,$xt2
  1008. punpckhdq $xd3,$xt3
  1009. movdqa $xd0,$xd1
  1010. punpcklqdq $xd2,$xd0 # "d0"
  1011. movdqa $xt2,$xd3
  1012. punpcklqdq $xt3,$xt2 # "d2"
  1013. punpckhqdq $xd2,$xd1 # "d1"
  1014. punpckhqdq $xt3,$xd3 # "d3"
  1015. ___
  1016. ($xd2,$xt2)=($xt2,$xd2);
  1017. $code.=<<___;
  1018. cmp \$64*4,$len
  1019. jb .Ltail4x
  1020. movdqu 0x00($inp),$xt0 # xor with input
  1021. movdqu 0x10($inp),$xt1
  1022. movdqu 0x20($inp),$xt2
  1023. movdqu 0x30($inp),$xt3
  1024. pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
  1025. pxor $xb0,$xt1
  1026. pxor $xc0,$xt2
  1027. pxor $xd0,$xt3
  1028. movdqu $xt0,0x00($out)
  1029. movdqu 0x40($inp),$xt0
  1030. movdqu $xt1,0x10($out)
  1031. movdqu 0x50($inp),$xt1
  1032. movdqu $xt2,0x20($out)
  1033. movdqu 0x60($inp),$xt2
  1034. movdqu $xt3,0x30($out)
  1035. movdqu 0x70($inp),$xt3
  1036. lea 0x80($inp),$inp # size optimization
  1037. pxor 0x10(%rsp),$xt0
  1038. pxor $xb1,$xt1
  1039. pxor $xc1,$xt2
  1040. pxor $xd1,$xt3
  1041. movdqu $xt0,0x40($out)
  1042. movdqu 0x00($inp),$xt0
  1043. movdqu $xt1,0x50($out)
  1044. movdqu 0x10($inp),$xt1
  1045. movdqu $xt2,0x60($out)
  1046. movdqu 0x20($inp),$xt2
  1047. movdqu $xt3,0x70($out)
  1048. lea 0x80($out),$out # size optimization
  1049. movdqu 0x30($inp),$xt3
  1050. pxor 0x20(%rsp),$xt0
  1051. pxor $xb2,$xt1
  1052. pxor $xc2,$xt2
  1053. pxor $xd2,$xt3
  1054. movdqu $xt0,0x00($out)
  1055. movdqu 0x40($inp),$xt0
  1056. movdqu $xt1,0x10($out)
  1057. movdqu 0x50($inp),$xt1
  1058. movdqu $xt2,0x20($out)
  1059. movdqu 0x60($inp),$xt2
  1060. movdqu $xt3,0x30($out)
  1061. movdqu 0x70($inp),$xt3
  1062. lea 0x80($inp),$inp # inp+=64*4
  1063. pxor 0x30(%rsp),$xt0
  1064. pxor $xb3,$xt1
  1065. pxor $xc3,$xt2
  1066. pxor $xd3,$xt3
  1067. movdqu $xt0,0x40($out)
  1068. movdqu $xt1,0x50($out)
  1069. movdqu $xt2,0x60($out)
  1070. movdqu $xt3,0x70($out)
  1071. lea 0x80($out),$out # out+=64*4
  1072. sub \$64*4,$len
  1073. jnz .Loop_outer4x
  1074. jmp .Ldone4x
  1075. .Ltail4x:
  1076. cmp \$192,$len
  1077. jae .L192_or_more4x
  1078. cmp \$128,$len
  1079. jae .L128_or_more4x
  1080. cmp \$64,$len
  1081. jae .L64_or_more4x
  1082. #movdqa 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
  1083. xor %r10,%r10
  1084. #movdqa $xt0,0x00(%rsp)
  1085. movdqa $xb0,0x10(%rsp)
  1086. movdqa $xc0,0x20(%rsp)
  1087. movdqa $xd0,0x30(%rsp)
  1088. jmp .Loop_tail4x
  1089. .align 32
  1090. .L64_or_more4x:
  1091. movdqu 0x00($inp),$xt0 # xor with input
  1092. movdqu 0x10($inp),$xt1
  1093. movdqu 0x20($inp),$xt2
  1094. movdqu 0x30($inp),$xt3
  1095. pxor 0x00(%rsp),$xt0 # $xaxN is offloaded, remember?
  1096. pxor $xb0,$xt1
  1097. pxor $xc0,$xt2
  1098. pxor $xd0,$xt3
  1099. movdqu $xt0,0x00($out)
  1100. movdqu $xt1,0x10($out)
  1101. movdqu $xt2,0x20($out)
  1102. movdqu $xt3,0x30($out)
  1103. je .Ldone4x
  1104. movdqa 0x10(%rsp),$xt0 # $xaN is offloaded, remember?
  1105. lea 0x40($inp),$inp # inp+=64*1
  1106. xor %r10,%r10
  1107. movdqa $xt0,0x00(%rsp)
  1108. movdqa $xb1,0x10(%rsp)
  1109. lea 0x40($out),$out # out+=64*1
  1110. movdqa $xc1,0x20(%rsp)
  1111. sub \$64,$len # len-=64*1
  1112. movdqa $xd1,0x30(%rsp)
  1113. jmp .Loop_tail4x
  1114. .align 32
  1115. .L128_or_more4x:
  1116. movdqu 0x00($inp),$xt0 # xor with input
  1117. movdqu 0x10($inp),$xt1
  1118. movdqu 0x20($inp),$xt2
  1119. movdqu 0x30($inp),$xt3
  1120. pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
  1121. pxor $xb0,$xt1
  1122. pxor $xc0,$xt2
  1123. pxor $xd0,$xt3
  1124. movdqu $xt0,0x00($out)
  1125. movdqu 0x40($inp),$xt0
  1126. movdqu $xt1,0x10($out)
  1127. movdqu 0x50($inp),$xt1
  1128. movdqu $xt2,0x20($out)
  1129. movdqu 0x60($inp),$xt2
  1130. movdqu $xt3,0x30($out)
  1131. movdqu 0x70($inp),$xt3
  1132. pxor 0x10(%rsp),$xt0
  1133. pxor $xb1,$xt1
  1134. pxor $xc1,$xt2
  1135. pxor $xd1,$xt3
  1136. movdqu $xt0,0x40($out)
  1137. movdqu $xt1,0x50($out)
  1138. movdqu $xt2,0x60($out)
  1139. movdqu $xt3,0x70($out)
  1140. je .Ldone4x
  1141. movdqa 0x20(%rsp),$xt0 # $xaN is offloaded, remember?
  1142. lea 0x80($inp),$inp # inp+=64*2
  1143. xor %r10,%r10
  1144. movdqa $xt0,0x00(%rsp)
  1145. movdqa $xb2,0x10(%rsp)
  1146. lea 0x80($out),$out # out+=64*2
  1147. movdqa $xc2,0x20(%rsp)
  1148. sub \$128,$len # len-=64*2
  1149. movdqa $xd2,0x30(%rsp)
  1150. jmp .Loop_tail4x
  1151. .align 32
  1152. .L192_or_more4x:
  1153. movdqu 0x00($inp),$xt0 # xor with input
  1154. movdqu 0x10($inp),$xt1
  1155. movdqu 0x20($inp),$xt2
  1156. movdqu 0x30($inp),$xt3
  1157. pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
  1158. pxor $xb0,$xt1
  1159. pxor $xc0,$xt2
  1160. pxor $xd0,$xt3
  1161. movdqu $xt0,0x00($out)
  1162. movdqu 0x40($inp),$xt0
  1163. movdqu $xt1,0x10($out)
  1164. movdqu 0x50($inp),$xt1
  1165. movdqu $xt2,0x20($out)
  1166. movdqu 0x60($inp),$xt2
  1167. movdqu $xt3,0x30($out)
  1168. movdqu 0x70($inp),$xt3
  1169. lea 0x80($inp),$inp # size optimization
  1170. pxor 0x10(%rsp),$xt0
  1171. pxor $xb1,$xt1
  1172. pxor $xc1,$xt2
  1173. pxor $xd1,$xt3
  1174. movdqu $xt0,0x40($out)
  1175. movdqu 0x00($inp),$xt0
  1176. movdqu $xt1,0x50($out)
  1177. movdqu 0x10($inp),$xt1
  1178. movdqu $xt2,0x60($out)
  1179. movdqu 0x20($inp),$xt2
  1180. movdqu $xt3,0x70($out)
  1181. lea 0x80($out),$out # size optimization
  1182. movdqu 0x30($inp),$xt3
  1183. pxor 0x20(%rsp),$xt0
  1184. pxor $xb2,$xt1
  1185. pxor $xc2,$xt2
  1186. pxor $xd2,$xt3
  1187. movdqu $xt0,0x00($out)
  1188. movdqu $xt1,0x10($out)
  1189. movdqu $xt2,0x20($out)
  1190. movdqu $xt3,0x30($out)
  1191. je .Ldone4x
  1192. movdqa 0x30(%rsp),$xt0 # $xaN is offloaded, remember?
  1193. lea 0x40($inp),$inp # inp+=64*3
  1194. xor %r10,%r10
  1195. movdqa $xt0,0x00(%rsp)
  1196. movdqa $xb3,0x10(%rsp)
  1197. lea 0x40($out),$out # out+=64*3
  1198. movdqa $xc3,0x20(%rsp)
  1199. sub \$192,$len # len-=64*3
  1200. movdqa $xd3,0x30(%rsp)
  1201. .Loop_tail4x:
  1202. movzb ($inp,%r10),%eax
  1203. movzb (%rsp,%r10),%ecx
  1204. lea 1(%r10),%r10
  1205. xor %ecx,%eax
  1206. mov %al,-1($out,%r10)
  1207. dec $len
  1208. jnz .Loop_tail4x
  1209. .Ldone4x:
  1210. ___
  1211. $code.=<<___ if ($win64);
  1212. movaps -0xa8(%r9),%xmm6
  1213. movaps -0x98(%r9),%xmm7
  1214. movaps -0x88(%r9),%xmm8
  1215. movaps -0x78(%r9),%xmm9
  1216. movaps -0x68(%r9),%xmm10
  1217. movaps -0x58(%r9),%xmm11
  1218. movaps -0x48(%r9),%xmm12
  1219. movaps -0x38(%r9),%xmm13
  1220. movaps -0x28(%r9),%xmm14
  1221. movaps -0x18(%r9),%xmm15
  1222. ___
  1223. $code.=<<___;
  1224. lea (%r9),%rsp
  1225. .cfi_def_cfa_register %rsp
  1226. .L4x_epilogue:
  1227. ret
  1228. .cfi_endproc
  1229. .size ChaCha20_4x,.-ChaCha20_4x
  1230. ___
  1231. }
  1232. ########################################################################
  1233. # XOP code path that handles all lengths.
  1234. if ($avx) {
  1235. # There is some "anomaly" observed depending on instructions' size or
  1236. # alignment. If you look closely at below code you'll notice that
  1237. # sometimes argument order varies. The order affects instruction
  1238. # encoding by making it larger, and such fiddling gives 5% performance
  1239. # improvement. This is on FX-4100...
  1240. my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
  1241. $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15));
  1242. my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  1243. $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3);
  1244. sub XOP_lane_ROUND {
  1245. my ($a0,$b0,$c0,$d0)=@_;
  1246. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  1247. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  1248. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  1249. my @x=map("\"$_\"",@xx);
  1250. (
  1251. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
  1252. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
  1253. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
  1254. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
  1255. "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
  1256. "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
  1257. "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
  1258. "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
  1259. "&vprotd (@x[$d0],@x[$d0],16)",
  1260. "&vprotd (@x[$d1],@x[$d1],16)",
  1261. "&vprotd (@x[$d2],@x[$d2],16)",
  1262. "&vprotd (@x[$d3],@x[$d3],16)",
  1263. "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
  1264. "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
  1265. "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
  1266. "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
  1267. "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
  1268. "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
  1269. "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
  1270. "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
  1271. "&vprotd (@x[$b0],@x[$b0],12)",
  1272. "&vprotd (@x[$b1],@x[$b1],12)",
  1273. "&vprotd (@x[$b2],@x[$b2],12)",
  1274. "&vprotd (@x[$b3],@x[$b3],12)",
  1275. "&vpaddd (@x[$a0],@x[$b0],@x[$a0])", # flip
  1276. "&vpaddd (@x[$a1],@x[$b1],@x[$a1])", # flip
  1277. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
  1278. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
  1279. "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
  1280. "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
  1281. "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
  1282. "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
  1283. "&vprotd (@x[$d0],@x[$d0],8)",
  1284. "&vprotd (@x[$d1],@x[$d1],8)",
  1285. "&vprotd (@x[$d2],@x[$d2],8)",
  1286. "&vprotd (@x[$d3],@x[$d3],8)",
  1287. "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
  1288. "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
  1289. "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
  1290. "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
  1291. "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
  1292. "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
  1293. "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
  1294. "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
  1295. "&vprotd (@x[$b0],@x[$b0],7)",
  1296. "&vprotd (@x[$b1],@x[$b1],7)",
  1297. "&vprotd (@x[$b2],@x[$b2],7)",
  1298. "&vprotd (@x[$b3],@x[$b3],7)"
  1299. );
  1300. }
  1301. my $xframe = $win64 ? 0xa8 : 8;
  1302. $code.=<<___;
  1303. .type ChaCha20_4xop,\@function,5
  1304. .align 32
  1305. ChaCha20_4xop:
  1306. .cfi_startproc
  1307. .LChaCha20_4xop:
  1308. mov %rsp,%r9 # frame pointer
  1309. .cfi_def_cfa_register %r9
  1310. sub \$0x140+$xframe,%rsp
  1311. ___
  1312. ################ stack layout
  1313. # +0x00 SIMD equivalent of @x[8-12]
  1314. # ...
  1315. # +0x40 constant copy of key[0-2] smashed by lanes
  1316. # ...
  1317. # +0x100 SIMD counters (with nonce smashed by lanes)
  1318. # ...
  1319. # +0x140
  1320. $code.=<<___ if ($win64);
  1321. movaps %xmm6,-0xa8(%r9)
  1322. movaps %xmm7,-0x98(%r9)
  1323. movaps %xmm8,-0x88(%r9)
  1324. movaps %xmm9,-0x78(%r9)
  1325. movaps %xmm10,-0x68(%r9)
  1326. movaps %xmm11,-0x58(%r9)
  1327. movaps %xmm12,-0x48(%r9)
  1328. movaps %xmm13,-0x38(%r9)
  1329. movaps %xmm14,-0x28(%r9)
  1330. movaps %xmm15,-0x18(%r9)
  1331. .L4xop_body:
  1332. ___
  1333. $code.=<<___;
  1334. vzeroupper
  1335. vmovdqa .Lsigma(%rip),$xa3 # key[0]
  1336. vmovdqu ($key),$xb3 # key[1]
  1337. vmovdqu 16($key),$xt3 # key[2]
  1338. vmovdqu ($counter),$xd3 # key[3]
  1339. lea 0x100(%rsp),%rcx # size optimization
  1340. vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  1341. vpshufd \$0x55,$xa3,$xa1
  1342. vmovdqa $xa0,0x40(%rsp) # ... and offload
  1343. vpshufd \$0xaa,$xa3,$xa2
  1344. vmovdqa $xa1,0x50(%rsp)
  1345. vpshufd \$0xff,$xa3,$xa3
  1346. vmovdqa $xa2,0x60(%rsp)
  1347. vmovdqa $xa3,0x70(%rsp)
  1348. vpshufd \$0x00,$xb3,$xb0
  1349. vpshufd \$0x55,$xb3,$xb1
  1350. vmovdqa $xb0,0x80-0x100(%rcx)
  1351. vpshufd \$0xaa,$xb3,$xb2
  1352. vmovdqa $xb1,0x90-0x100(%rcx)
  1353. vpshufd \$0xff,$xb3,$xb3
  1354. vmovdqa $xb2,0xa0-0x100(%rcx)
  1355. vmovdqa $xb3,0xb0-0x100(%rcx)
  1356. vpshufd \$0x00,$xt3,$xt0 # "$xc0"
  1357. vpshufd \$0x55,$xt3,$xt1 # "$xc1"
  1358. vmovdqa $xt0,0xc0-0x100(%rcx)
  1359. vpshufd \$0xaa,$xt3,$xt2 # "$xc2"
  1360. vmovdqa $xt1,0xd0-0x100(%rcx)
  1361. vpshufd \$0xff,$xt3,$xt3 # "$xc3"
  1362. vmovdqa $xt2,0xe0-0x100(%rcx)
  1363. vmovdqa $xt3,0xf0-0x100(%rcx)
  1364. vpshufd \$0x00,$xd3,$xd0
  1365. vpshufd \$0x55,$xd3,$xd1
  1366. vpaddd .Linc(%rip),$xd0,$xd0 # don't save counters yet
  1367. vpshufd \$0xaa,$xd3,$xd2
  1368. vmovdqa $xd1,0x110-0x100(%rcx)
  1369. vpshufd \$0xff,$xd3,$xd3
  1370. vmovdqa $xd2,0x120-0x100(%rcx)
  1371. vmovdqa $xd3,0x130-0x100(%rcx)
  1372. jmp .Loop_enter4xop
  1373. .align 32
  1374. .Loop_outer4xop:
  1375. vmovdqa 0x40(%rsp),$xa0 # re-load smashed key
  1376. vmovdqa 0x50(%rsp),$xa1
  1377. vmovdqa 0x60(%rsp),$xa2
  1378. vmovdqa 0x70(%rsp),$xa3
  1379. vmovdqa 0x80-0x100(%rcx),$xb0
  1380. vmovdqa 0x90-0x100(%rcx),$xb1
  1381. vmovdqa 0xa0-0x100(%rcx),$xb2
  1382. vmovdqa 0xb0-0x100(%rcx),$xb3
  1383. vmovdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
  1384. vmovdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
  1385. vmovdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
  1386. vmovdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
  1387. vmovdqa 0x100-0x100(%rcx),$xd0
  1388. vmovdqa 0x110-0x100(%rcx),$xd1
  1389. vmovdqa 0x120-0x100(%rcx),$xd2
  1390. vmovdqa 0x130-0x100(%rcx),$xd3
  1391. vpaddd .Lfour(%rip),$xd0,$xd0 # next SIMD counters
  1392. .Loop_enter4xop:
  1393. mov \$10,%eax
  1394. vmovdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
  1395. jmp .Loop4xop
  1396. .align 32
  1397. .Loop4xop:
  1398. ___
  1399. foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; }
  1400. foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; }
  1401. $code.=<<___;
  1402. dec %eax
  1403. jnz .Loop4xop
  1404. vpaddd 0x40(%rsp),$xa0,$xa0 # accumulate key material
  1405. vpaddd 0x50(%rsp),$xa1,$xa1
  1406. vpaddd 0x60(%rsp),$xa2,$xa2
  1407. vpaddd 0x70(%rsp),$xa3,$xa3
  1408. vmovdqa $xt2,0x20(%rsp) # offload $xc2,3
  1409. vmovdqa $xt3,0x30(%rsp)
  1410. vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
  1411. vpunpckldq $xa3,$xa2,$xt3
  1412. vpunpckhdq $xa1,$xa0,$xa0
  1413. vpunpckhdq $xa3,$xa2,$xa2
  1414. vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
  1415. vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
  1416. vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
  1417. vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
  1418. ___
  1419. ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
  1420. $code.=<<___;
  1421. vpaddd 0x80-0x100(%rcx),$xb0,$xb0
  1422. vpaddd 0x90-0x100(%rcx),$xb1,$xb1
  1423. vpaddd 0xa0-0x100(%rcx),$xb2,$xb2
  1424. vpaddd 0xb0-0x100(%rcx),$xb3,$xb3
  1425. vmovdqa $xa0,0x00(%rsp) # offload $xa0,1
  1426. vmovdqa $xa1,0x10(%rsp)
  1427. vmovdqa 0x20(%rsp),$xa0 # "xc2"
  1428. vmovdqa 0x30(%rsp),$xa1 # "xc3"
  1429. vpunpckldq $xb1,$xb0,$xt2
  1430. vpunpckldq $xb3,$xb2,$xt3
  1431. vpunpckhdq $xb1,$xb0,$xb0
  1432. vpunpckhdq $xb3,$xb2,$xb2
  1433. vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
  1434. vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
  1435. vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
  1436. vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
  1437. ___
  1438. ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
  1439. my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
  1440. $code.=<<___;
  1441. vpaddd 0xc0-0x100(%rcx),$xc0,$xc0
  1442. vpaddd 0xd0-0x100(%rcx),$xc1,$xc1
  1443. vpaddd 0xe0-0x100(%rcx),$xc2,$xc2
  1444. vpaddd 0xf0-0x100(%rcx),$xc3,$xc3
  1445. vpunpckldq $xc1,$xc0,$xt2
  1446. vpunpckldq $xc3,$xc2,$xt3
  1447. vpunpckhdq $xc1,$xc0,$xc0
  1448. vpunpckhdq $xc3,$xc2,$xc2
  1449. vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
  1450. vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
  1451. vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
  1452. vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
  1453. ___
  1454. ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
  1455. $code.=<<___;
  1456. vpaddd 0x100-0x100(%rcx),$xd0,$xd0
  1457. vpaddd 0x110-0x100(%rcx),$xd1,$xd1
  1458. vpaddd 0x120-0x100(%rcx),$xd2,$xd2
  1459. vpaddd 0x130-0x100(%rcx),$xd3,$xd3
  1460. vpunpckldq $xd1,$xd0,$xt2
  1461. vpunpckldq $xd3,$xd2,$xt3
  1462. vpunpckhdq $xd1,$xd0,$xd0
  1463. vpunpckhdq $xd3,$xd2,$xd2
  1464. vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
  1465. vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
  1466. vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
  1467. vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
  1468. ___
  1469. ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
  1470. ($xa0,$xa1)=($xt2,$xt3);
  1471. $code.=<<___;
  1472. vmovdqa 0x00(%rsp),$xa0 # restore $xa0,1
  1473. vmovdqa 0x10(%rsp),$xa1
  1474. cmp \$64*4,$len
  1475. jb .Ltail4xop
  1476. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1477. vpxor 0x10($inp),$xb0,$xb0
  1478. vpxor 0x20($inp),$xc0,$xc0
  1479. vpxor 0x30($inp),$xd0,$xd0
  1480. vpxor 0x40($inp),$xa1,$xa1
  1481. vpxor 0x50($inp),$xb1,$xb1
  1482. vpxor 0x60($inp),$xc1,$xc1
  1483. vpxor 0x70($inp),$xd1,$xd1
  1484. lea 0x80($inp),$inp # size optimization
  1485. vpxor 0x00($inp),$xa2,$xa2
  1486. vpxor 0x10($inp),$xb2,$xb2
  1487. vpxor 0x20($inp),$xc2,$xc2
  1488. vpxor 0x30($inp),$xd2,$xd2
  1489. vpxor 0x40($inp),$xa3,$xa3
  1490. vpxor 0x50($inp),$xb3,$xb3
  1491. vpxor 0x60($inp),$xc3,$xc3
  1492. vpxor 0x70($inp),$xd3,$xd3
  1493. lea 0x80($inp),$inp # inp+=64*4
  1494. vmovdqu $xa0,0x00($out)
  1495. vmovdqu $xb0,0x10($out)
  1496. vmovdqu $xc0,0x20($out)
  1497. vmovdqu $xd0,0x30($out)
  1498. vmovdqu $xa1,0x40($out)
  1499. vmovdqu $xb1,0x50($out)
  1500. vmovdqu $xc1,0x60($out)
  1501. vmovdqu $xd1,0x70($out)
  1502. lea 0x80($out),$out # size optimization
  1503. vmovdqu $xa2,0x00($out)
  1504. vmovdqu $xb2,0x10($out)
  1505. vmovdqu $xc2,0x20($out)
  1506. vmovdqu $xd2,0x30($out)
  1507. vmovdqu $xa3,0x40($out)
  1508. vmovdqu $xb3,0x50($out)
  1509. vmovdqu $xc3,0x60($out)
  1510. vmovdqu $xd3,0x70($out)
  1511. lea 0x80($out),$out # out+=64*4
  1512. sub \$64*4,$len
  1513. jnz .Loop_outer4xop
  1514. jmp .Ldone4xop
  1515. .align 32
  1516. .Ltail4xop:
  1517. cmp \$192,$len
  1518. jae .L192_or_more4xop
  1519. cmp \$128,$len
  1520. jae .L128_or_more4xop
  1521. cmp \$64,$len
  1522. jae .L64_or_more4xop
  1523. xor %r10,%r10
  1524. vmovdqa $xa0,0x00(%rsp)
  1525. vmovdqa $xb0,0x10(%rsp)
  1526. vmovdqa $xc0,0x20(%rsp)
  1527. vmovdqa $xd0,0x30(%rsp)
  1528. jmp .Loop_tail4xop
  1529. .align 32
  1530. .L64_or_more4xop:
  1531. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1532. vpxor 0x10($inp),$xb0,$xb0
  1533. vpxor 0x20($inp),$xc0,$xc0
  1534. vpxor 0x30($inp),$xd0,$xd0
  1535. vmovdqu $xa0,0x00($out)
  1536. vmovdqu $xb0,0x10($out)
  1537. vmovdqu $xc0,0x20($out)
  1538. vmovdqu $xd0,0x30($out)
  1539. je .Ldone4xop
  1540. lea 0x40($inp),$inp # inp+=64*1
  1541. vmovdqa $xa1,0x00(%rsp)
  1542. xor %r10,%r10
  1543. vmovdqa $xb1,0x10(%rsp)
  1544. lea 0x40($out),$out # out+=64*1
  1545. vmovdqa $xc1,0x20(%rsp)
  1546. sub \$64,$len # len-=64*1
  1547. vmovdqa $xd1,0x30(%rsp)
  1548. jmp .Loop_tail4xop
  1549. .align 32
  1550. .L128_or_more4xop:
  1551. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1552. vpxor 0x10($inp),$xb0,$xb0
  1553. vpxor 0x20($inp),$xc0,$xc0
  1554. vpxor 0x30($inp),$xd0,$xd0
  1555. vpxor 0x40($inp),$xa1,$xa1
  1556. vpxor 0x50($inp),$xb1,$xb1
  1557. vpxor 0x60($inp),$xc1,$xc1
  1558. vpxor 0x70($inp),$xd1,$xd1
  1559. vmovdqu $xa0,0x00($out)
  1560. vmovdqu $xb0,0x10($out)
  1561. vmovdqu $xc0,0x20($out)
  1562. vmovdqu $xd0,0x30($out)
  1563. vmovdqu $xa1,0x40($out)
  1564. vmovdqu $xb1,0x50($out)
  1565. vmovdqu $xc1,0x60($out)
  1566. vmovdqu $xd1,0x70($out)
  1567. je .Ldone4xop
  1568. lea 0x80($inp),$inp # inp+=64*2
  1569. vmovdqa $xa2,0x00(%rsp)
  1570. xor %r10,%r10
  1571. vmovdqa $xb2,0x10(%rsp)
  1572. lea 0x80($out),$out # out+=64*2
  1573. vmovdqa $xc2,0x20(%rsp)
  1574. sub \$128,$len # len-=64*2
  1575. vmovdqa $xd2,0x30(%rsp)
  1576. jmp .Loop_tail4xop
  1577. .align 32
  1578. .L192_or_more4xop:
  1579. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1580. vpxor 0x10($inp),$xb0,$xb0
  1581. vpxor 0x20($inp),$xc0,$xc0
  1582. vpxor 0x30($inp),$xd0,$xd0
  1583. vpxor 0x40($inp),$xa1,$xa1
  1584. vpxor 0x50($inp),$xb1,$xb1
  1585. vpxor 0x60($inp),$xc1,$xc1
  1586. vpxor 0x70($inp),$xd1,$xd1
  1587. lea 0x80($inp),$inp # size optimization
  1588. vpxor 0x00($inp),$xa2,$xa2
  1589. vpxor 0x10($inp),$xb2,$xb2
  1590. vpxor 0x20($inp),$xc2,$xc2
  1591. vpxor 0x30($inp),$xd2,$xd2
  1592. vmovdqu $xa0,0x00($out)
  1593. vmovdqu $xb0,0x10($out)
  1594. vmovdqu $xc0,0x20($out)
  1595. vmovdqu $xd0,0x30($out)
  1596. vmovdqu $xa1,0x40($out)
  1597. vmovdqu $xb1,0x50($out)
  1598. vmovdqu $xc1,0x60($out)
  1599. vmovdqu $xd1,0x70($out)
  1600. lea 0x80($out),$out # size optimization
  1601. vmovdqu $xa2,0x00($out)
  1602. vmovdqu $xb2,0x10($out)
  1603. vmovdqu $xc2,0x20($out)
  1604. vmovdqu $xd2,0x30($out)
  1605. je .Ldone4xop
  1606. lea 0x40($inp),$inp # inp+=64*3
  1607. vmovdqa $xa3,0x00(%rsp)
  1608. xor %r10,%r10
  1609. vmovdqa $xb3,0x10(%rsp)
  1610. lea 0x40($out),$out # out+=64*3
  1611. vmovdqa $xc3,0x20(%rsp)
  1612. sub \$192,$len # len-=64*3
  1613. vmovdqa $xd3,0x30(%rsp)
  1614. .Loop_tail4xop:
  1615. movzb ($inp,%r10),%eax
  1616. movzb (%rsp,%r10),%ecx
  1617. lea 1(%r10),%r10
  1618. xor %ecx,%eax
  1619. mov %al,-1($out,%r10)
  1620. dec $len
  1621. jnz .Loop_tail4xop
  1622. .Ldone4xop:
  1623. vzeroupper
  1624. ___
  1625. $code.=<<___ if ($win64);
  1626. movaps -0xa8(%r9),%xmm6
  1627. movaps -0x98(%r9),%xmm7
  1628. movaps -0x88(%r9),%xmm8
  1629. movaps -0x78(%r9),%xmm9
  1630. movaps -0x68(%r9),%xmm10
  1631. movaps -0x58(%r9),%xmm11
  1632. movaps -0x48(%r9),%xmm12
  1633. movaps -0x38(%r9),%xmm13
  1634. movaps -0x28(%r9),%xmm14
  1635. movaps -0x18(%r9),%xmm15
  1636. ___
  1637. $code.=<<___;
  1638. lea (%r9),%rsp
  1639. .cfi_def_cfa_register %rsp
  1640. .L4xop_epilogue:
  1641. ret
  1642. .cfi_endproc
  1643. .size ChaCha20_4xop,.-ChaCha20_4xop
  1644. ___
  1645. }
  1646. ########################################################################
  1647. # AVX2 code path
  1648. if ($avx>1) {
  1649. my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
  1650. $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
  1651. my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  1652. "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
  1653. sub AVX2_lane_ROUND {
  1654. my ($a0,$b0,$c0,$d0)=@_;
  1655. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  1656. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  1657. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  1658. my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
  1659. my @x=map("\"$_\"",@xx);
  1660. # Consider order in which variables are addressed by their
  1661. # index:
  1662. #
  1663. # a b c d
  1664. #
  1665. # 0 4 8 12 < even round
  1666. # 1 5 9 13
  1667. # 2 6 10 14
  1668. # 3 7 11 15
  1669. # 0 5 10 15 < odd round
  1670. # 1 6 11 12
  1671. # 2 7 8 13
  1672. # 3 4 9 14
  1673. #
  1674. # 'a', 'b' and 'd's are permanently allocated in registers,
  1675. # @x[0..7,12..15], while 'c's are maintained in memory. If
  1676. # you observe 'c' column, you'll notice that pair of 'c's is
  1677. # invariant between rounds. This means that we have to reload
  1678. # them once per round, in the middle. This is why you'll see
  1679. # bunch of 'c' stores and loads in the middle, but none in
  1680. # the beginning or end.
  1681. (
  1682. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
  1683. "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
  1684. "&vpshufb (@x[$d0],@x[$d0],$t1)",
  1685. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
  1686. "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
  1687. "&vpshufb (@x[$d1],@x[$d1],$t1)",
  1688. "&vpaddd ($xc,$xc,@x[$d0])",
  1689. "&vpxor (@x[$b0],$xc,@x[$b0])",
  1690. "&vpslld ($t0,@x[$b0],12)",
  1691. "&vpsrld (@x[$b0],@x[$b0],20)",
  1692. "&vpor (@x[$b0],$t0,@x[$b0])",
  1693. "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
  1694. "&vpaddd ($xc_,$xc_,@x[$d1])",
  1695. "&vpxor (@x[$b1],$xc_,@x[$b1])",
  1696. "&vpslld ($t1,@x[$b1],12)",
  1697. "&vpsrld (@x[$b1],@x[$b1],20)",
  1698. "&vpor (@x[$b1],$t1,@x[$b1])",
  1699. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
  1700. "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
  1701. "&vpshufb (@x[$d0],@x[$d0],$t0)",
  1702. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
  1703. "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
  1704. "&vpshufb (@x[$d1],@x[$d1],$t0)",
  1705. "&vpaddd ($xc,$xc,@x[$d0])",
  1706. "&vpxor (@x[$b0],$xc,@x[$b0])",
  1707. "&vpslld ($t1,@x[$b0],7)",
  1708. "&vpsrld (@x[$b0],@x[$b0],25)",
  1709. "&vpor (@x[$b0],$t1,@x[$b0])",
  1710. "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
  1711. "&vpaddd ($xc_,$xc_,@x[$d1])",
  1712. "&vpxor (@x[$b1],$xc_,@x[$b1])",
  1713. "&vpslld ($t0,@x[$b1],7)",
  1714. "&vpsrld (@x[$b1],@x[$b1],25)",
  1715. "&vpor (@x[$b1],$t0,@x[$b1])",
  1716. "&vmovdqa (\"`32*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
  1717. "&vmovdqa (\"`32*($c1-8)`(%rsp)\",$xc_)",
  1718. "&vmovdqa ($xc,\"`32*($c2-8)`(%rsp)\")",
  1719. "&vmovdqa ($xc_,\"`32*($c3-8)`(%rsp)\")",
  1720. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
  1721. "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
  1722. "&vpshufb (@x[$d2],@x[$d2],$t1)",
  1723. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
  1724. "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
  1725. "&vpshufb (@x[$d3],@x[$d3],$t1)",
  1726. "&vpaddd ($xc,$xc,@x[$d2])",
  1727. "&vpxor (@x[$b2],$xc,@x[$b2])",
  1728. "&vpslld ($t0,@x[$b2],12)",
  1729. "&vpsrld (@x[$b2],@x[$b2],20)",
  1730. "&vpor (@x[$b2],$t0,@x[$b2])",
  1731. "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
  1732. "&vpaddd ($xc_,$xc_,@x[$d3])",
  1733. "&vpxor (@x[$b3],$xc_,@x[$b3])",
  1734. "&vpslld ($t1,@x[$b3],12)",
  1735. "&vpsrld (@x[$b3],@x[$b3],20)",
  1736. "&vpor (@x[$b3],$t1,@x[$b3])",
  1737. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
  1738. "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
  1739. "&vpshufb (@x[$d2],@x[$d2],$t0)",
  1740. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
  1741. "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
  1742. "&vpshufb (@x[$d3],@x[$d3],$t0)",
  1743. "&vpaddd ($xc,$xc,@x[$d2])",
  1744. "&vpxor (@x[$b2],$xc,@x[$b2])",
  1745. "&vpslld ($t1,@x[$b2],7)",
  1746. "&vpsrld (@x[$b2],@x[$b2],25)",
  1747. "&vpor (@x[$b2],$t1,@x[$b2])",
  1748. "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
  1749. "&vpaddd ($xc_,$xc_,@x[$d3])",
  1750. "&vpxor (@x[$b3],$xc_,@x[$b3])",
  1751. "&vpslld ($t0,@x[$b3],7)",
  1752. "&vpsrld (@x[$b3],@x[$b3],25)",
  1753. "&vpor (@x[$b3],$t0,@x[$b3])"
  1754. );
  1755. }
  1756. my $xframe = $win64 ? 0xa8 : 8;
  1757. $code.=<<___;
  1758. .type ChaCha20_8x,\@function,5
  1759. .align 32
  1760. ChaCha20_8x:
  1761. .cfi_startproc
  1762. .LChaCha20_8x:
  1763. mov %rsp,%r9 # frame register
  1764. .cfi_def_cfa_register %r9
  1765. sub \$0x280+$xframe,%rsp
  1766. and \$-32,%rsp
  1767. ___
  1768. $code.=<<___ if ($win64);
  1769. movaps %xmm6,-0xa8(%r9)
  1770. movaps %xmm7,-0x98(%r9)
  1771. movaps %xmm8,-0x88(%r9)
  1772. movaps %xmm9,-0x78(%r9)
  1773. movaps %xmm10,-0x68(%r9)
  1774. movaps %xmm11,-0x58(%r9)
  1775. movaps %xmm12,-0x48(%r9)
  1776. movaps %xmm13,-0x38(%r9)
  1777. movaps %xmm14,-0x28(%r9)
  1778. movaps %xmm15,-0x18(%r9)
  1779. .L8x_body:
  1780. ___
  1781. $code.=<<___;
  1782. vzeroupper
  1783. ################ stack layout
  1784. # +0x00 SIMD equivalent of @x[8-12]
  1785. # ...
  1786. # +0x80 constant copy of key[0-2] smashed by lanes
  1787. # ...
  1788. # +0x200 SIMD counters (with nonce smashed by lanes)
  1789. # ...
  1790. # +0x280
  1791. vbroadcasti128 .Lsigma(%rip),$xa3 # key[0]
  1792. vbroadcasti128 ($key),$xb3 # key[1]
  1793. vbroadcasti128 16($key),$xt3 # key[2]
  1794. vbroadcasti128 ($counter),$xd3 # key[3]
  1795. lea 0x100(%rsp),%rcx # size optimization
  1796. lea 0x200(%rsp),%rax # size optimization
  1797. lea .Lrot16(%rip),%r10
  1798. lea .Lrot24(%rip),%r11
  1799. vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  1800. vpshufd \$0x55,$xa3,$xa1
  1801. vmovdqa $xa0,0x80-0x100(%rcx) # ... and offload
  1802. vpshufd \$0xaa,$xa3,$xa2
  1803. vmovdqa $xa1,0xa0-0x100(%rcx)
  1804. vpshufd \$0xff,$xa3,$xa3
  1805. vmovdqa $xa2,0xc0-0x100(%rcx)
  1806. vmovdqa $xa3,0xe0-0x100(%rcx)
  1807. vpshufd \$0x00,$xb3,$xb0
  1808. vpshufd \$0x55,$xb3,$xb1
  1809. vmovdqa $xb0,0x100-0x100(%rcx)
  1810. vpshufd \$0xaa,$xb3,$xb2
  1811. vmovdqa $xb1,0x120-0x100(%rcx)
  1812. vpshufd \$0xff,$xb3,$xb3
  1813. vmovdqa $xb2,0x140-0x100(%rcx)
  1814. vmovdqa $xb3,0x160-0x100(%rcx)
  1815. vpshufd \$0x00,$xt3,$xt0 # "xc0"
  1816. vpshufd \$0x55,$xt3,$xt1 # "xc1"
  1817. vmovdqa $xt0,0x180-0x200(%rax)
  1818. vpshufd \$0xaa,$xt3,$xt2 # "xc2"
  1819. vmovdqa $xt1,0x1a0-0x200(%rax)
  1820. vpshufd \$0xff,$xt3,$xt3 # "xc3"
  1821. vmovdqa $xt2,0x1c0-0x200(%rax)
  1822. vmovdqa $xt3,0x1e0-0x200(%rax)
  1823. vpshufd \$0x00,$xd3,$xd0
  1824. vpshufd \$0x55,$xd3,$xd1
  1825. vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet
  1826. vpshufd \$0xaa,$xd3,$xd2
  1827. vmovdqa $xd1,0x220-0x200(%rax)
  1828. vpshufd \$0xff,$xd3,$xd3
  1829. vmovdqa $xd2,0x240-0x200(%rax)
  1830. vmovdqa $xd3,0x260-0x200(%rax)
  1831. jmp .Loop_enter8x
  1832. .align 32
  1833. .Loop_outer8x:
  1834. vmovdqa 0x80-0x100(%rcx),$xa0 # re-load smashed key
  1835. vmovdqa 0xa0-0x100(%rcx),$xa1
  1836. vmovdqa 0xc0-0x100(%rcx),$xa2
  1837. vmovdqa 0xe0-0x100(%rcx),$xa3
  1838. vmovdqa 0x100-0x100(%rcx),$xb0
  1839. vmovdqa 0x120-0x100(%rcx),$xb1
  1840. vmovdqa 0x140-0x100(%rcx),$xb2
  1841. vmovdqa 0x160-0x100(%rcx),$xb3
  1842. vmovdqa 0x180-0x200(%rax),$xt0 # "xc0"
  1843. vmovdqa 0x1a0-0x200(%rax),$xt1 # "xc1"
  1844. vmovdqa 0x1c0-0x200(%rax),$xt2 # "xc2"
  1845. vmovdqa 0x1e0-0x200(%rax),$xt3 # "xc3"
  1846. vmovdqa 0x200-0x200(%rax),$xd0
  1847. vmovdqa 0x220-0x200(%rax),$xd1
  1848. vmovdqa 0x240-0x200(%rax),$xd2
  1849. vmovdqa 0x260-0x200(%rax),$xd3
  1850. vpaddd .Leight(%rip),$xd0,$xd0 # next SIMD counters
  1851. .Loop_enter8x:
  1852. vmovdqa $xt2,0x40(%rsp) # SIMD equivalent of "@x[10]"
  1853. vmovdqa $xt3,0x60(%rsp) # SIMD equivalent of "@x[11]"
  1854. vbroadcasti128 (%r10),$xt3
  1855. vmovdqa $xd0,0x200-0x200(%rax) # save SIMD counters
  1856. mov \$10,%eax
  1857. jmp .Loop8x
  1858. .align 32
  1859. .Loop8x:
  1860. ___
  1861. foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
  1862. foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
  1863. $code.=<<___;
  1864. dec %eax
  1865. jnz .Loop8x
  1866. lea 0x200(%rsp),%rax # size optimization
  1867. vpaddd 0x80-0x100(%rcx),$xa0,$xa0 # accumulate key
  1868. vpaddd 0xa0-0x100(%rcx),$xa1,$xa1
  1869. vpaddd 0xc0-0x100(%rcx),$xa2,$xa2
  1870. vpaddd 0xe0-0x100(%rcx),$xa3,$xa3
  1871. vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
  1872. vpunpckldq $xa3,$xa2,$xt3
  1873. vpunpckhdq $xa1,$xa0,$xa0
  1874. vpunpckhdq $xa3,$xa2,$xa2
  1875. vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
  1876. vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
  1877. vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
  1878. vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
  1879. ___
  1880. ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
  1881. $code.=<<___;
  1882. vpaddd 0x100-0x100(%rcx),$xb0,$xb0
  1883. vpaddd 0x120-0x100(%rcx),$xb1,$xb1
  1884. vpaddd 0x140-0x100(%rcx),$xb2,$xb2
  1885. vpaddd 0x160-0x100(%rcx),$xb3,$xb3
  1886. vpunpckldq $xb1,$xb0,$xt2
  1887. vpunpckldq $xb3,$xb2,$xt3
  1888. vpunpckhdq $xb1,$xb0,$xb0
  1889. vpunpckhdq $xb3,$xb2,$xb2
  1890. vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
  1891. vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
  1892. vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
  1893. vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
  1894. ___
  1895. ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
  1896. $code.=<<___;
  1897. vperm2i128 \$0x20,$xb0,$xa0,$xt3 # "de-interlace" further
  1898. vperm2i128 \$0x31,$xb0,$xa0,$xb0
  1899. vperm2i128 \$0x20,$xb1,$xa1,$xa0
  1900. vperm2i128 \$0x31,$xb1,$xa1,$xb1
  1901. vperm2i128 \$0x20,$xb2,$xa2,$xa1
  1902. vperm2i128 \$0x31,$xb2,$xa2,$xb2
  1903. vperm2i128 \$0x20,$xb3,$xa3,$xa2
  1904. vperm2i128 \$0x31,$xb3,$xa3,$xb3
  1905. ___
  1906. ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
  1907. my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
  1908. $code.=<<___;
  1909. vmovdqa $xa0,0x00(%rsp) # offload $xaN
  1910. vmovdqa $xa1,0x20(%rsp)
  1911. vmovdqa 0x40(%rsp),$xc2 # $xa0
  1912. vmovdqa 0x60(%rsp),$xc3 # $xa1
  1913. vpaddd 0x180-0x200(%rax),$xc0,$xc0
  1914. vpaddd 0x1a0-0x200(%rax),$xc1,$xc1
  1915. vpaddd 0x1c0-0x200(%rax),$xc2,$xc2
  1916. vpaddd 0x1e0-0x200(%rax),$xc3,$xc3
  1917. vpunpckldq $xc1,$xc0,$xt2
  1918. vpunpckldq $xc3,$xc2,$xt3
  1919. vpunpckhdq $xc1,$xc0,$xc0
  1920. vpunpckhdq $xc3,$xc2,$xc2
  1921. vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
  1922. vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
  1923. vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
  1924. vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
  1925. ___
  1926. ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
  1927. $code.=<<___;
  1928. vpaddd 0x200-0x200(%rax),$xd0,$xd0
  1929. vpaddd 0x220-0x200(%rax),$xd1,$xd1
  1930. vpaddd 0x240-0x200(%rax),$xd2,$xd2
  1931. vpaddd 0x260-0x200(%rax),$xd3,$xd3
  1932. vpunpckldq $xd1,$xd0,$xt2
  1933. vpunpckldq $xd3,$xd2,$xt3
  1934. vpunpckhdq $xd1,$xd0,$xd0
  1935. vpunpckhdq $xd3,$xd2,$xd2
  1936. vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
  1937. vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
  1938. vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
  1939. vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
  1940. ___
  1941. ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
  1942. $code.=<<___;
  1943. vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further
  1944. vperm2i128 \$0x31,$xd0,$xc0,$xd0
  1945. vperm2i128 \$0x20,$xd1,$xc1,$xc0
  1946. vperm2i128 \$0x31,$xd1,$xc1,$xd1
  1947. vperm2i128 \$0x20,$xd2,$xc2,$xc1
  1948. vperm2i128 \$0x31,$xd2,$xc2,$xd2
  1949. vperm2i128 \$0x20,$xd3,$xc3,$xc2
  1950. vperm2i128 \$0x31,$xd3,$xc3,$xd3
  1951. ___
  1952. ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
  1953. ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
  1954. ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
  1955. ($xa0,$xa1)=($xt2,$xt3);
  1956. $code.=<<___;
  1957. vmovdqa 0x00(%rsp),$xa0 # $xaN was offloaded, remember?
  1958. vmovdqa 0x20(%rsp),$xa1
  1959. cmp \$64*8,$len
  1960. jb .Ltail8x
  1961. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  1962. vpxor 0x20($inp),$xb0,$xb0
  1963. vpxor 0x40($inp),$xc0,$xc0
  1964. vpxor 0x60($inp),$xd0,$xd0
  1965. lea 0x80($inp),$inp # size optimization
  1966. vmovdqu $xa0,0x00($out)
  1967. vmovdqu $xb0,0x20($out)
  1968. vmovdqu $xc0,0x40($out)
  1969. vmovdqu $xd0,0x60($out)
  1970. lea 0x80($out),$out # size optimization
  1971. vpxor 0x00($inp),$xa1,$xa1
  1972. vpxor 0x20($inp),$xb1,$xb1
  1973. vpxor 0x40($inp),$xc1,$xc1
  1974. vpxor 0x60($inp),$xd1,$xd1
  1975. lea 0x80($inp),$inp # size optimization
  1976. vmovdqu $xa1,0x00($out)
  1977. vmovdqu $xb1,0x20($out)
  1978. vmovdqu $xc1,0x40($out)
  1979. vmovdqu $xd1,0x60($out)
  1980. lea 0x80($out),$out # size optimization
  1981. vpxor 0x00($inp),$xa2,$xa2
  1982. vpxor 0x20($inp),$xb2,$xb2
  1983. vpxor 0x40($inp),$xc2,$xc2
  1984. vpxor 0x60($inp),$xd2,$xd2
  1985. lea 0x80($inp),$inp # size optimization
  1986. vmovdqu $xa2,0x00($out)
  1987. vmovdqu $xb2,0x20($out)
  1988. vmovdqu $xc2,0x40($out)
  1989. vmovdqu $xd2,0x60($out)
  1990. lea 0x80($out),$out # size optimization
  1991. vpxor 0x00($inp),$xa3,$xa3
  1992. vpxor 0x20($inp),$xb3,$xb3
  1993. vpxor 0x40($inp),$xc3,$xc3
  1994. vpxor 0x60($inp),$xd3,$xd3
  1995. lea 0x80($inp),$inp # size optimization
  1996. vmovdqu $xa3,0x00($out)
  1997. vmovdqu $xb3,0x20($out)
  1998. vmovdqu $xc3,0x40($out)
  1999. vmovdqu $xd3,0x60($out)
  2000. lea 0x80($out),$out # size optimization
  2001. sub \$64*8,$len
  2002. jnz .Loop_outer8x
  2003. jmp .Ldone8x
  2004. .Ltail8x:
  2005. cmp \$448,$len
  2006. jae .L448_or_more8x
  2007. cmp \$384,$len
  2008. jae .L384_or_more8x
  2009. cmp \$320,$len
  2010. jae .L320_or_more8x
  2011. cmp \$256,$len
  2012. jae .L256_or_more8x
  2013. cmp \$192,$len
  2014. jae .L192_or_more8x
  2015. cmp \$128,$len
  2016. jae .L128_or_more8x
  2017. cmp \$64,$len
  2018. jae .L64_or_more8x
  2019. xor %r10,%r10
  2020. vmovdqa $xa0,0x00(%rsp)
  2021. vmovdqa $xb0,0x20(%rsp)
  2022. jmp .Loop_tail8x
  2023. .align 32
  2024. .L64_or_more8x:
  2025. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2026. vpxor 0x20($inp),$xb0,$xb0
  2027. vmovdqu $xa0,0x00($out)
  2028. vmovdqu $xb0,0x20($out)
  2029. je .Ldone8x
  2030. lea 0x40($inp),$inp # inp+=64*1
  2031. xor %r10,%r10
  2032. vmovdqa $xc0,0x00(%rsp)
  2033. lea 0x40($out),$out # out+=64*1
  2034. sub \$64,$len # len-=64*1
  2035. vmovdqa $xd0,0x20(%rsp)
  2036. jmp .Loop_tail8x
  2037. .align 32
  2038. .L128_or_more8x:
  2039. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2040. vpxor 0x20($inp),$xb0,$xb0
  2041. vpxor 0x40($inp),$xc0,$xc0
  2042. vpxor 0x60($inp),$xd0,$xd0
  2043. vmovdqu $xa0,0x00($out)
  2044. vmovdqu $xb0,0x20($out)
  2045. vmovdqu $xc0,0x40($out)
  2046. vmovdqu $xd0,0x60($out)
  2047. je .Ldone8x
  2048. lea 0x80($inp),$inp # inp+=64*2
  2049. xor %r10,%r10
  2050. vmovdqa $xa1,0x00(%rsp)
  2051. lea 0x80($out),$out # out+=64*2
  2052. sub \$128,$len # len-=64*2
  2053. vmovdqa $xb1,0x20(%rsp)
  2054. jmp .Loop_tail8x
  2055. .align 32
  2056. .L192_or_more8x:
  2057. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2058. vpxor 0x20($inp),$xb0,$xb0
  2059. vpxor 0x40($inp),$xc0,$xc0
  2060. vpxor 0x60($inp),$xd0,$xd0
  2061. vpxor 0x80($inp),$xa1,$xa1
  2062. vpxor 0xa0($inp),$xb1,$xb1
  2063. vmovdqu $xa0,0x00($out)
  2064. vmovdqu $xb0,0x20($out)
  2065. vmovdqu $xc0,0x40($out)
  2066. vmovdqu $xd0,0x60($out)
  2067. vmovdqu $xa1,0x80($out)
  2068. vmovdqu $xb1,0xa0($out)
  2069. je .Ldone8x
  2070. lea 0xc0($inp),$inp # inp+=64*3
  2071. xor %r10,%r10
  2072. vmovdqa $xc1,0x00(%rsp)
  2073. lea 0xc0($out),$out # out+=64*3
  2074. sub \$192,$len # len-=64*3
  2075. vmovdqa $xd1,0x20(%rsp)
  2076. jmp .Loop_tail8x
  2077. .align 32
  2078. .L256_or_more8x:
  2079. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2080. vpxor 0x20($inp),$xb0,$xb0
  2081. vpxor 0x40($inp),$xc0,$xc0
  2082. vpxor 0x60($inp),$xd0,$xd0
  2083. vpxor 0x80($inp),$xa1,$xa1
  2084. vpxor 0xa0($inp),$xb1,$xb1
  2085. vpxor 0xc0($inp),$xc1,$xc1
  2086. vpxor 0xe0($inp),$xd1,$xd1
  2087. vmovdqu $xa0,0x00($out)
  2088. vmovdqu $xb0,0x20($out)
  2089. vmovdqu $xc0,0x40($out)
  2090. vmovdqu $xd0,0x60($out)
  2091. vmovdqu $xa1,0x80($out)
  2092. vmovdqu $xb1,0xa0($out)
  2093. vmovdqu $xc1,0xc0($out)
  2094. vmovdqu $xd1,0xe0($out)
  2095. je .Ldone8x
  2096. lea 0x100($inp),$inp # inp+=64*4
  2097. xor %r10,%r10
  2098. vmovdqa $xa2,0x00(%rsp)
  2099. lea 0x100($out),$out # out+=64*4
  2100. sub \$256,$len # len-=64*4
  2101. vmovdqa $xb2,0x20(%rsp)
  2102. jmp .Loop_tail8x
  2103. .align 32
  2104. .L320_or_more8x:
  2105. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2106. vpxor 0x20($inp),$xb0,$xb0
  2107. vpxor 0x40($inp),$xc0,$xc0
  2108. vpxor 0x60($inp),$xd0,$xd0
  2109. vpxor 0x80($inp),$xa1,$xa1
  2110. vpxor 0xa0($inp),$xb1,$xb1
  2111. vpxor 0xc0($inp),$xc1,$xc1
  2112. vpxor 0xe0($inp),$xd1,$xd1
  2113. vpxor 0x100($inp),$xa2,$xa2
  2114. vpxor 0x120($inp),$xb2,$xb2
  2115. vmovdqu $xa0,0x00($out)
  2116. vmovdqu $xb0,0x20($out)
  2117. vmovdqu $xc0,0x40($out)
  2118. vmovdqu $xd0,0x60($out)
  2119. vmovdqu $xa1,0x80($out)
  2120. vmovdqu $xb1,0xa0($out)
  2121. vmovdqu $xc1,0xc0($out)
  2122. vmovdqu $xd1,0xe0($out)
  2123. vmovdqu $xa2,0x100($out)
  2124. vmovdqu $xb2,0x120($out)
  2125. je .Ldone8x
  2126. lea 0x140($inp),$inp # inp+=64*5
  2127. xor %r10,%r10
  2128. vmovdqa $xc2,0x00(%rsp)
  2129. lea 0x140($out),$out # out+=64*5
  2130. sub \$320,$len # len-=64*5
  2131. vmovdqa $xd2,0x20(%rsp)
  2132. jmp .Loop_tail8x
  2133. .align 32
  2134. .L384_or_more8x:
  2135. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2136. vpxor 0x20($inp),$xb0,$xb0
  2137. vpxor 0x40($inp),$xc0,$xc0
  2138. vpxor 0x60($inp),$xd0,$xd0
  2139. vpxor 0x80($inp),$xa1,$xa1
  2140. vpxor 0xa0($inp),$xb1,$xb1
  2141. vpxor 0xc0($inp),$xc1,$xc1
  2142. vpxor 0xe0($inp),$xd1,$xd1
  2143. vpxor 0x100($inp),$xa2,$xa2
  2144. vpxor 0x120($inp),$xb2,$xb2
  2145. vpxor 0x140($inp),$xc2,$xc2
  2146. vpxor 0x160($inp),$xd2,$xd2
  2147. vmovdqu $xa0,0x00($out)
  2148. vmovdqu $xb0,0x20($out)
  2149. vmovdqu $xc0,0x40($out)
  2150. vmovdqu $xd0,0x60($out)
  2151. vmovdqu $xa1,0x80($out)
  2152. vmovdqu $xb1,0xa0($out)
  2153. vmovdqu $xc1,0xc0($out)
  2154. vmovdqu $xd1,0xe0($out)
  2155. vmovdqu $xa2,0x100($out)
  2156. vmovdqu $xb2,0x120($out)
  2157. vmovdqu $xc2,0x140($out)
  2158. vmovdqu $xd2,0x160($out)
  2159. je .Ldone8x
  2160. lea 0x180($inp),$inp # inp+=64*6
  2161. xor %r10,%r10
  2162. vmovdqa $xa3,0x00(%rsp)
  2163. lea 0x180($out),$out # out+=64*6
  2164. sub \$384,$len # len-=64*6
  2165. vmovdqa $xb3,0x20(%rsp)
  2166. jmp .Loop_tail8x
  2167. .align 32
  2168. .L448_or_more8x:
  2169. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  2170. vpxor 0x20($inp),$xb0,$xb0
  2171. vpxor 0x40($inp),$xc0,$xc0
  2172. vpxor 0x60($inp),$xd0,$xd0
  2173. vpxor 0x80($inp),$xa1,$xa1
  2174. vpxor 0xa0($inp),$xb1,$xb1
  2175. vpxor 0xc0($inp),$xc1,$xc1
  2176. vpxor 0xe0($inp),$xd1,$xd1
  2177. vpxor 0x100($inp),$xa2,$xa2
  2178. vpxor 0x120($inp),$xb2,$xb2
  2179. vpxor 0x140($inp),$xc2,$xc2
  2180. vpxor 0x160($inp),$xd2,$xd2
  2181. vpxor 0x180($inp),$xa3,$xa3
  2182. vpxor 0x1a0($inp),$xb3,$xb3
  2183. vmovdqu $xa0,0x00($out)
  2184. vmovdqu $xb0,0x20($out)
  2185. vmovdqu $xc0,0x40($out)
  2186. vmovdqu $xd0,0x60($out)
  2187. vmovdqu $xa1,0x80($out)
  2188. vmovdqu $xb1,0xa0($out)
  2189. vmovdqu $xc1,0xc0($out)
  2190. vmovdqu $xd1,0xe0($out)
  2191. vmovdqu $xa2,0x100($out)
  2192. vmovdqu $xb2,0x120($out)
  2193. vmovdqu $xc2,0x140($out)
  2194. vmovdqu $xd2,0x160($out)
  2195. vmovdqu $xa3,0x180($out)
  2196. vmovdqu $xb3,0x1a0($out)
  2197. je .Ldone8x
  2198. lea 0x1c0($inp),$inp # inp+=64*7
  2199. xor %r10,%r10
  2200. vmovdqa $xc3,0x00(%rsp)
  2201. lea 0x1c0($out),$out # out+=64*7
  2202. sub \$448,$len # len-=64*7
  2203. vmovdqa $xd3,0x20(%rsp)
  2204. .Loop_tail8x:
  2205. movzb ($inp,%r10),%eax
  2206. movzb (%rsp,%r10),%ecx
  2207. lea 1(%r10),%r10
  2208. xor %ecx,%eax
  2209. mov %al,-1($out,%r10)
  2210. dec $len
  2211. jnz .Loop_tail8x
  2212. .Ldone8x:
  2213. vzeroall
  2214. ___
  2215. $code.=<<___ if ($win64);
  2216. movaps -0xa8(%r9),%xmm6
  2217. movaps -0x98(%r9),%xmm7
  2218. movaps -0x88(%r9),%xmm8
  2219. movaps -0x78(%r9),%xmm9
  2220. movaps -0x68(%r9),%xmm10
  2221. movaps -0x58(%r9),%xmm11
  2222. movaps -0x48(%r9),%xmm12
  2223. movaps -0x38(%r9),%xmm13
  2224. movaps -0x28(%r9),%xmm14
  2225. movaps -0x18(%r9),%xmm15
  2226. ___
  2227. $code.=<<___;
  2228. lea (%r9),%rsp
  2229. .cfi_def_cfa_register %rsp
  2230. .L8x_epilogue:
  2231. ret
  2232. .cfi_endproc
  2233. .size ChaCha20_8x,.-ChaCha20_8x
  2234. ___
  2235. }
  2236. ########################################################################
  2237. # AVX512 code paths
  2238. if ($avx>2) {
  2239. # This one handles shorter inputs...
  2240. my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20));
  2241. my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
  2242. sub vpxord() # size optimization
  2243. { my $opcode = "vpxor"; # adhere to vpxor when possible
  2244. foreach (@_) {
  2245. if (/%([zy])mm([0-9]+)/ && ($1 eq "z" || $2>=16)) {
  2246. $opcode = "vpxord";
  2247. last;
  2248. }
  2249. }
  2250. $code .= "\t$opcode\t".join(',',reverse @_)."\n";
  2251. }
  2252. sub AVX512ROUND { # critical path is 14 "SIMD ticks" per round
  2253. &vpaddd ($a,$a,$b);
  2254. &vpxord ($d,$d,$a);
  2255. &vprold ($d,$d,16);
  2256. &vpaddd ($c,$c,$d);
  2257. &vpxord ($b,$b,$c);
  2258. &vprold ($b,$b,12);
  2259. &vpaddd ($a,$a,$b);
  2260. &vpxord ($d,$d,$a);
  2261. &vprold ($d,$d,8);
  2262. &vpaddd ($c,$c,$d);
  2263. &vpxord ($b,$b,$c);
  2264. &vprold ($b,$b,7);
  2265. }
  2266. my $xframe = $win64 ? 160+8 : 8;
  2267. $code.=<<___;
  2268. .type ChaCha20_avx512,\@function,5
  2269. .align 32
  2270. ChaCha20_avx512:
  2271. .cfi_startproc
  2272. .LChaCha20_avx512:
  2273. mov %rsp,%r9 # frame pointer
  2274. .cfi_def_cfa_register %r9
  2275. cmp \$512,$len
  2276. ja .LChaCha20_16x
  2277. sub \$64+$xframe,%rsp
  2278. ___
  2279. $code.=<<___ if ($win64);
  2280. movaps %xmm6,-0xa8(%r9)
  2281. movaps %xmm7,-0x98(%r9)
  2282. movaps %xmm8,-0x88(%r9)
  2283. movaps %xmm9,-0x78(%r9)
  2284. movaps %xmm10,-0x68(%r9)
  2285. movaps %xmm11,-0x58(%r9)
  2286. movaps %xmm12,-0x48(%r9)
  2287. movaps %xmm13,-0x38(%r9)
  2288. movaps %xmm14,-0x28(%r9)
  2289. movaps %xmm15,-0x18(%r9)
  2290. .Lavx512_body:
  2291. ___
  2292. $code.=<<___;
  2293. vbroadcasti32x4 .Lsigma(%rip),$a
  2294. vbroadcasti32x4 ($key),$b
  2295. vbroadcasti32x4 16($key),$c
  2296. vbroadcasti32x4 ($counter),$d
  2297. vmovdqa32 $a,$a_
  2298. vmovdqa32 $b,$b_
  2299. vmovdqa32 $c,$c_
  2300. vpaddd .Lzeroz(%rip),$d,$d
  2301. vmovdqa32 .Lfourz(%rip),$fourz
  2302. mov \$10,$counter # reuse $counter
  2303. vmovdqa32 $d,$d_
  2304. jmp .Loop_avx512
  2305. .align 16
  2306. .Loop_outer_avx512:
  2307. vmovdqa32 $a_,$a
  2308. vmovdqa32 $b_,$b
  2309. vmovdqa32 $c_,$c
  2310. vpaddd $fourz,$d_,$d
  2311. mov \$10,$counter
  2312. vmovdqa32 $d,$d_
  2313. jmp .Loop_avx512
  2314. .align 32
  2315. .Loop_avx512:
  2316. ___
  2317. &AVX512ROUND();
  2318. &vpshufd ($c,$c,0b01001110);
  2319. &vpshufd ($b,$b,0b00111001);
  2320. &vpshufd ($d,$d,0b10010011);
  2321. &AVX512ROUND();
  2322. &vpshufd ($c,$c,0b01001110);
  2323. &vpshufd ($b,$b,0b10010011);
  2324. &vpshufd ($d,$d,0b00111001);
  2325. &dec ($counter);
  2326. &jnz (".Loop_avx512");
  2327. $code.=<<___;
  2328. vpaddd $a_,$a,$a
  2329. vpaddd $b_,$b,$b
  2330. vpaddd $c_,$c,$c
  2331. vpaddd $d_,$d,$d
  2332. sub \$64,$len
  2333. jb .Ltail64_avx512
  2334. vpxor 0x00($inp),%x#$a,$t0 # xor with input
  2335. vpxor 0x10($inp),%x#$b,$t1
  2336. vpxor 0x20($inp),%x#$c,$t2
  2337. vpxor 0x30($inp),%x#$d,$t3
  2338. lea 0x40($inp),$inp # inp+=64
  2339. vmovdqu $t0,0x00($out) # write output
  2340. vmovdqu $t1,0x10($out)
  2341. vmovdqu $t2,0x20($out)
  2342. vmovdqu $t3,0x30($out)
  2343. lea 0x40($out),$out # out+=64
  2344. jz .Ldone_avx512
  2345. vextracti32x4 \$1,$a,$t0
  2346. vextracti32x4 \$1,$b,$t1
  2347. vextracti32x4 \$1,$c,$t2
  2348. vextracti32x4 \$1,$d,$t3
  2349. sub \$64,$len
  2350. jb .Ltail_avx512
  2351. vpxor 0x00($inp),$t0,$t0 # xor with input
  2352. vpxor 0x10($inp),$t1,$t1
  2353. vpxor 0x20($inp),$t2,$t2
  2354. vpxor 0x30($inp),$t3,$t3
  2355. lea 0x40($inp),$inp # inp+=64
  2356. vmovdqu $t0,0x00($out) # write output
  2357. vmovdqu $t1,0x10($out)
  2358. vmovdqu $t2,0x20($out)
  2359. vmovdqu $t3,0x30($out)
  2360. lea 0x40($out),$out # out+=64
  2361. jz .Ldone_avx512
  2362. vextracti32x4 \$2,$a,$t0
  2363. vextracti32x4 \$2,$b,$t1
  2364. vextracti32x4 \$2,$c,$t2
  2365. vextracti32x4 \$2,$d,$t3
  2366. sub \$64,$len
  2367. jb .Ltail_avx512
  2368. vpxor 0x00($inp),$t0,$t0 # xor with input
  2369. vpxor 0x10($inp),$t1,$t1
  2370. vpxor 0x20($inp),$t2,$t2
  2371. vpxor 0x30($inp),$t3,$t3
  2372. lea 0x40($inp),$inp # inp+=64
  2373. vmovdqu $t0,0x00($out) # write output
  2374. vmovdqu $t1,0x10($out)
  2375. vmovdqu $t2,0x20($out)
  2376. vmovdqu $t3,0x30($out)
  2377. lea 0x40($out),$out # out+=64
  2378. jz .Ldone_avx512
  2379. vextracti32x4 \$3,$a,$t0
  2380. vextracti32x4 \$3,$b,$t1
  2381. vextracti32x4 \$3,$c,$t2
  2382. vextracti32x4 \$3,$d,$t3
  2383. sub \$64,$len
  2384. jb .Ltail_avx512
  2385. vpxor 0x00($inp),$t0,$t0 # xor with input
  2386. vpxor 0x10($inp),$t1,$t1
  2387. vpxor 0x20($inp),$t2,$t2
  2388. vpxor 0x30($inp),$t3,$t3
  2389. lea 0x40($inp),$inp # inp+=64
  2390. vmovdqu $t0,0x00($out) # write output
  2391. vmovdqu $t1,0x10($out)
  2392. vmovdqu $t2,0x20($out)
  2393. vmovdqu $t3,0x30($out)
  2394. lea 0x40($out),$out # out+=64
  2395. jnz .Loop_outer_avx512
  2396. jmp .Ldone_avx512
  2397. .align 16
  2398. .Ltail64_avx512:
  2399. vmovdqa %x#$a,0x00(%rsp)
  2400. vmovdqa %x#$b,0x10(%rsp)
  2401. vmovdqa %x#$c,0x20(%rsp)
  2402. vmovdqa %x#$d,0x30(%rsp)
  2403. add \$64,$len
  2404. jmp .Loop_tail_avx512
  2405. .align 16
  2406. .Ltail_avx512:
  2407. vmovdqa $t0,0x00(%rsp)
  2408. vmovdqa $t1,0x10(%rsp)
  2409. vmovdqa $t2,0x20(%rsp)
  2410. vmovdqa $t3,0x30(%rsp)
  2411. add \$64,$len
  2412. .Loop_tail_avx512:
  2413. movzb ($inp,$counter),%eax
  2414. movzb (%rsp,$counter),%ecx
  2415. lea 1($counter),$counter
  2416. xor %ecx,%eax
  2417. mov %al,-1($out,$counter)
  2418. dec $len
  2419. jnz .Loop_tail_avx512
  2420. vmovdqu32 $a_,0x00(%rsp)
  2421. .Ldone_avx512:
  2422. vzeroall
  2423. ___
  2424. $code.=<<___ if ($win64);
  2425. movaps -0xa8(%r9),%xmm6
  2426. movaps -0x98(%r9),%xmm7
  2427. movaps -0x88(%r9),%xmm8
  2428. movaps -0x78(%r9),%xmm9
  2429. movaps -0x68(%r9),%xmm10
  2430. movaps -0x58(%r9),%xmm11
  2431. movaps -0x48(%r9),%xmm12
  2432. movaps -0x38(%r9),%xmm13
  2433. movaps -0x28(%r9),%xmm14
  2434. movaps -0x18(%r9),%xmm15
  2435. ___
  2436. $code.=<<___;
  2437. lea (%r9),%rsp
  2438. .cfi_def_cfa_register %rsp
  2439. .Lavx512_epilogue:
  2440. ret
  2441. .cfi_endproc
  2442. .size ChaCha20_avx512,.-ChaCha20_avx512
  2443. ___
  2444. map(s/%z/%y/, $a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz);
  2445. $code.=<<___;
  2446. .type ChaCha20_avx512vl,\@function,5
  2447. .align 32
  2448. ChaCha20_avx512vl:
  2449. .cfi_startproc
  2450. .LChaCha20_avx512vl:
  2451. mov %rsp,%r9 # frame pointer
  2452. .cfi_def_cfa_register %r9
  2453. cmp \$128,$len
  2454. ja .LChaCha20_8xvl
  2455. sub \$64+$xframe,%rsp
  2456. ___
  2457. $code.=<<___ if ($win64);
  2458. movaps %xmm6,-0xa8(%r9)
  2459. movaps %xmm7,-0x98(%r9)
  2460. movaps %xmm8,-0x88(%r9)
  2461. movaps %xmm9,-0x78(%r9)
  2462. movaps %xmm10,-0x68(%r9)
  2463. movaps %xmm11,-0x58(%r9)
  2464. movaps %xmm12,-0x48(%r9)
  2465. movaps %xmm13,-0x38(%r9)
  2466. movaps %xmm14,-0x28(%r9)
  2467. movaps %xmm15,-0x18(%r9)
  2468. .Lavx512vl_body:
  2469. ___
  2470. $code.=<<___;
  2471. vbroadcasti128 .Lsigma(%rip),$a
  2472. vbroadcasti128 ($key),$b
  2473. vbroadcasti128 16($key),$c
  2474. vbroadcasti128 ($counter),$d
  2475. vmovdqa32 $a,$a_
  2476. vmovdqa32 $b,$b_
  2477. vmovdqa32 $c,$c_
  2478. vpaddd .Lzeroz(%rip),$d,$d
  2479. vmovdqa32 .Ltwoy(%rip),$fourz
  2480. mov \$10,$counter # reuse $counter
  2481. vmovdqa32 $d,$d_
  2482. jmp .Loop_avx512vl
  2483. .align 16
  2484. .Loop_outer_avx512vl:
  2485. vmovdqa32 $c_,$c
  2486. vpaddd $fourz,$d_,$d
  2487. mov \$10,$counter
  2488. vmovdqa32 $d,$d_
  2489. jmp .Loop_avx512vl
  2490. .align 32
  2491. .Loop_avx512vl:
  2492. ___
  2493. &AVX512ROUND();
  2494. &vpshufd ($c,$c,0b01001110);
  2495. &vpshufd ($b,$b,0b00111001);
  2496. &vpshufd ($d,$d,0b10010011);
  2497. &AVX512ROUND();
  2498. &vpshufd ($c,$c,0b01001110);
  2499. &vpshufd ($b,$b,0b10010011);
  2500. &vpshufd ($d,$d,0b00111001);
  2501. &dec ($counter);
  2502. &jnz (".Loop_avx512vl");
  2503. $code.=<<___;
  2504. vpaddd $a_,$a,$a
  2505. vpaddd $b_,$b,$b
  2506. vpaddd $c_,$c,$c
  2507. vpaddd $d_,$d,$d
  2508. sub \$64,$len
  2509. jb .Ltail64_avx512vl
  2510. vpxor 0x00($inp),%x#$a,$t0 # xor with input
  2511. vpxor 0x10($inp),%x#$b,$t1
  2512. vpxor 0x20($inp),%x#$c,$t2
  2513. vpxor 0x30($inp),%x#$d,$t3
  2514. lea 0x40($inp),$inp # inp+=64
  2515. vmovdqu $t0,0x00($out) # write output
  2516. vmovdqu $t1,0x10($out)
  2517. vmovdqu $t2,0x20($out)
  2518. vmovdqu $t3,0x30($out)
  2519. lea 0x40($out),$out # out+=64
  2520. jz .Ldone_avx512vl
  2521. vextracti128 \$1,$a,$t0
  2522. vextracti128 \$1,$b,$t1
  2523. vextracti128 \$1,$c,$t2
  2524. vextracti128 \$1,$d,$t3
  2525. sub \$64,$len
  2526. jb .Ltail_avx512vl
  2527. vpxor 0x00($inp),$t0,$t0 # xor with input
  2528. vpxor 0x10($inp),$t1,$t1
  2529. vpxor 0x20($inp),$t2,$t2
  2530. vpxor 0x30($inp),$t3,$t3
  2531. lea 0x40($inp),$inp # inp+=64
  2532. vmovdqu $t0,0x00($out) # write output
  2533. vmovdqu $t1,0x10($out)
  2534. vmovdqu $t2,0x20($out)
  2535. vmovdqu $t3,0x30($out)
  2536. lea 0x40($out),$out # out+=64
  2537. vmovdqa32 $a_,$a
  2538. vmovdqa32 $b_,$b
  2539. jnz .Loop_outer_avx512vl
  2540. jmp .Ldone_avx512vl
  2541. .align 16
  2542. .Ltail64_avx512vl:
  2543. vmovdqa %x#$a,0x00(%rsp)
  2544. vmovdqa %x#$b,0x10(%rsp)
  2545. vmovdqa %x#$c,0x20(%rsp)
  2546. vmovdqa %x#$d,0x30(%rsp)
  2547. add \$64,$len
  2548. jmp .Loop_tail_avx512vl
  2549. .align 16
  2550. .Ltail_avx512vl:
  2551. vmovdqa $t0,0x00(%rsp)
  2552. vmovdqa $t1,0x10(%rsp)
  2553. vmovdqa $t2,0x20(%rsp)
  2554. vmovdqa $t3,0x30(%rsp)
  2555. add \$64,$len
  2556. .Loop_tail_avx512vl:
  2557. movzb ($inp,$counter),%eax
  2558. movzb (%rsp,$counter),%ecx
  2559. lea 1($counter),$counter
  2560. xor %ecx,%eax
  2561. mov %al,-1($out,$counter)
  2562. dec $len
  2563. jnz .Loop_tail_avx512vl
  2564. vmovdqu32 $a_,0x00(%rsp)
  2565. vmovdqu32 $a_,0x20(%rsp)
  2566. .Ldone_avx512vl:
  2567. vzeroall
  2568. ___
  2569. $code.=<<___ if ($win64);
  2570. movaps -0xa8(%r9),%xmm6
  2571. movaps -0x98(%r9),%xmm7
  2572. movaps -0x88(%r9),%xmm8
  2573. movaps -0x78(%r9),%xmm9
  2574. movaps -0x68(%r9),%xmm10
  2575. movaps -0x58(%r9),%xmm11
  2576. movaps -0x48(%r9),%xmm12
  2577. movaps -0x38(%r9),%xmm13
  2578. movaps -0x28(%r9),%xmm14
  2579. movaps -0x18(%r9),%xmm15
  2580. ___
  2581. $code.=<<___;
  2582. lea (%r9),%rsp
  2583. .cfi_def_cfa_register %rsp
  2584. .Lavx512vl_epilogue:
  2585. ret
  2586. .cfi_endproc
  2587. .size ChaCha20_avx512vl,.-ChaCha20_avx512vl
  2588. ___
  2589. }
  2590. if ($avx>2) {
  2591. # This one handles longer inputs...
  2592. my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  2593. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
  2594. my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  2595. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
  2596. my @key=map("%zmm$_",(16..31));
  2597. my ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
  2598. sub AVX512_lane_ROUND {
  2599. my ($a0,$b0,$c0,$d0)=@_;
  2600. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  2601. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  2602. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  2603. my @x=map("\"$_\"",@xx);
  2604. (
  2605. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
  2606. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
  2607. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
  2608. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
  2609. "&vpxord (@x[$d0],@x[$d0],@x[$a0])",
  2610. "&vpxord (@x[$d1],@x[$d1],@x[$a1])",
  2611. "&vpxord (@x[$d2],@x[$d2],@x[$a2])",
  2612. "&vpxord (@x[$d3],@x[$d3],@x[$a3])",
  2613. "&vprold (@x[$d0],@x[$d0],16)",
  2614. "&vprold (@x[$d1],@x[$d1],16)",
  2615. "&vprold (@x[$d2],@x[$d2],16)",
  2616. "&vprold (@x[$d3],@x[$d3],16)",
  2617. "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
  2618. "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
  2619. "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
  2620. "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
  2621. "&vpxord (@x[$b0],@x[$b0],@x[$c0])",
  2622. "&vpxord (@x[$b1],@x[$b1],@x[$c1])",
  2623. "&vpxord (@x[$b2],@x[$b2],@x[$c2])",
  2624. "&vpxord (@x[$b3],@x[$b3],@x[$c3])",
  2625. "&vprold (@x[$b0],@x[$b0],12)",
  2626. "&vprold (@x[$b1],@x[$b1],12)",
  2627. "&vprold (@x[$b2],@x[$b2],12)",
  2628. "&vprold (@x[$b3],@x[$b3],12)",
  2629. "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
  2630. "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
  2631. "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
  2632. "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
  2633. "&vpxord (@x[$d0],@x[$d0],@x[$a0])",
  2634. "&vpxord (@x[$d1],@x[$d1],@x[$a1])",
  2635. "&vpxord (@x[$d2],@x[$d2],@x[$a2])",
  2636. "&vpxord (@x[$d3],@x[$d3],@x[$a3])",
  2637. "&vprold (@x[$d0],@x[$d0],8)",
  2638. "&vprold (@x[$d1],@x[$d1],8)",
  2639. "&vprold (@x[$d2],@x[$d2],8)",
  2640. "&vprold (@x[$d3],@x[$d3],8)",
  2641. "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
  2642. "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
  2643. "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
  2644. "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
  2645. "&vpxord (@x[$b0],@x[$b0],@x[$c0])",
  2646. "&vpxord (@x[$b1],@x[$b1],@x[$c1])",
  2647. "&vpxord (@x[$b2],@x[$b2],@x[$c2])",
  2648. "&vpxord (@x[$b3],@x[$b3],@x[$c3])",
  2649. "&vprold (@x[$b0],@x[$b0],7)",
  2650. "&vprold (@x[$b1],@x[$b1],7)",
  2651. "&vprold (@x[$b2],@x[$b2],7)",
  2652. "&vprold (@x[$b3],@x[$b3],7)"
  2653. );
  2654. }
  2655. my $xframe = $win64 ? 0xa8 : 8;
  2656. $code.=<<___;
  2657. .type ChaCha20_16x,\@function,5
  2658. .align 32
  2659. ChaCha20_16x:
  2660. .cfi_startproc
  2661. .LChaCha20_16x:
  2662. mov %rsp,%r9 # frame register
  2663. .cfi_def_cfa_register %r9
  2664. sub \$64+$xframe,%rsp
  2665. and \$-64,%rsp
  2666. ___
  2667. $code.=<<___ if ($win64);
  2668. movaps %xmm6,-0xa8(%r9)
  2669. movaps %xmm7,-0x98(%r9)
  2670. movaps %xmm8,-0x88(%r9)
  2671. movaps %xmm9,-0x78(%r9)
  2672. movaps %xmm10,-0x68(%r9)
  2673. movaps %xmm11,-0x58(%r9)
  2674. movaps %xmm12,-0x48(%r9)
  2675. movaps %xmm13,-0x38(%r9)
  2676. movaps %xmm14,-0x28(%r9)
  2677. movaps %xmm15,-0x18(%r9)
  2678. .L16x_body:
  2679. ___
  2680. $code.=<<___;
  2681. vzeroupper
  2682. lea .Lsigma(%rip),%r10
  2683. vbroadcasti32x4 (%r10),$xa3 # key[0]
  2684. vbroadcasti32x4 ($key),$xb3 # key[1]
  2685. vbroadcasti32x4 16($key),$xc3 # key[2]
  2686. vbroadcasti32x4 ($counter),$xd3 # key[3]
  2687. vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  2688. vpshufd \$0x55,$xa3,$xa1
  2689. vpshufd \$0xaa,$xa3,$xa2
  2690. vpshufd \$0xff,$xa3,$xa3
  2691. vmovdqa64 $xa0,@key[0]
  2692. vmovdqa64 $xa1,@key[1]
  2693. vmovdqa64 $xa2,@key[2]
  2694. vmovdqa64 $xa3,@key[3]
  2695. vpshufd \$0x00,$xb3,$xb0
  2696. vpshufd \$0x55,$xb3,$xb1
  2697. vpshufd \$0xaa,$xb3,$xb2
  2698. vpshufd \$0xff,$xb3,$xb3
  2699. vmovdqa64 $xb0,@key[4]
  2700. vmovdqa64 $xb1,@key[5]
  2701. vmovdqa64 $xb2,@key[6]
  2702. vmovdqa64 $xb3,@key[7]
  2703. vpshufd \$0x00,$xc3,$xc0
  2704. vpshufd \$0x55,$xc3,$xc1
  2705. vpshufd \$0xaa,$xc3,$xc2
  2706. vpshufd \$0xff,$xc3,$xc3
  2707. vmovdqa64 $xc0,@key[8]
  2708. vmovdqa64 $xc1,@key[9]
  2709. vmovdqa64 $xc2,@key[10]
  2710. vmovdqa64 $xc3,@key[11]
  2711. vpshufd \$0x00,$xd3,$xd0
  2712. vpshufd \$0x55,$xd3,$xd1
  2713. vpshufd \$0xaa,$xd3,$xd2
  2714. vpshufd \$0xff,$xd3,$xd3
  2715. vpaddd .Lincz(%rip),$xd0,$xd0 # don't save counters yet
  2716. vmovdqa64 $xd0,@key[12]
  2717. vmovdqa64 $xd1,@key[13]
  2718. vmovdqa64 $xd2,@key[14]
  2719. vmovdqa64 $xd3,@key[15]
  2720. mov \$10,%eax
  2721. jmp .Loop16x
  2722. .align 32
  2723. .Loop_outer16x:
  2724. vpbroadcastd 0(%r10),$xa0 # reload key
  2725. vpbroadcastd 4(%r10),$xa1
  2726. vpbroadcastd 8(%r10),$xa2
  2727. vpbroadcastd 12(%r10),$xa3
  2728. vpaddd .Lsixteen(%rip),@key[12],@key[12] # next SIMD counters
  2729. vmovdqa64 @key[4],$xb0
  2730. vmovdqa64 @key[5],$xb1
  2731. vmovdqa64 @key[6],$xb2
  2732. vmovdqa64 @key[7],$xb3
  2733. vmovdqa64 @key[8],$xc0
  2734. vmovdqa64 @key[9],$xc1
  2735. vmovdqa64 @key[10],$xc2
  2736. vmovdqa64 @key[11],$xc3
  2737. vmovdqa64 @key[12],$xd0
  2738. vmovdqa64 @key[13],$xd1
  2739. vmovdqa64 @key[14],$xd2
  2740. vmovdqa64 @key[15],$xd3
  2741. vmovdqa64 $xa0,@key[0]
  2742. vmovdqa64 $xa1,@key[1]
  2743. vmovdqa64 $xa2,@key[2]
  2744. vmovdqa64 $xa3,@key[3]
  2745. mov \$10,%eax
  2746. jmp .Loop16x
  2747. .align 32
  2748. .Loop16x:
  2749. ___
  2750. foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
  2751. foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
  2752. $code.=<<___;
  2753. dec %eax
  2754. jnz .Loop16x
  2755. vpaddd @key[0],$xa0,$xa0 # accumulate key
  2756. vpaddd @key[1],$xa1,$xa1
  2757. vpaddd @key[2],$xa2,$xa2
  2758. vpaddd @key[3],$xa3,$xa3
  2759. vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
  2760. vpunpckldq $xa3,$xa2,$xt3
  2761. vpunpckhdq $xa1,$xa0,$xa0
  2762. vpunpckhdq $xa3,$xa2,$xa2
  2763. vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
  2764. vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
  2765. vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
  2766. vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
  2767. ___
  2768. ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
  2769. $code.=<<___;
  2770. vpaddd @key[4],$xb0,$xb0
  2771. vpaddd @key[5],$xb1,$xb1
  2772. vpaddd @key[6],$xb2,$xb2
  2773. vpaddd @key[7],$xb3,$xb3
  2774. vpunpckldq $xb1,$xb0,$xt2
  2775. vpunpckldq $xb3,$xb2,$xt3
  2776. vpunpckhdq $xb1,$xb0,$xb0
  2777. vpunpckhdq $xb3,$xb2,$xb2
  2778. vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
  2779. vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
  2780. vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
  2781. vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
  2782. ___
  2783. ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
  2784. $code.=<<___;
  2785. vshufi32x4 \$0x44,$xb0,$xa0,$xt3 # "de-interlace" further
  2786. vshufi32x4 \$0xee,$xb0,$xa0,$xb0
  2787. vshufi32x4 \$0x44,$xb1,$xa1,$xa0
  2788. vshufi32x4 \$0xee,$xb1,$xa1,$xb1
  2789. vshufi32x4 \$0x44,$xb2,$xa2,$xa1
  2790. vshufi32x4 \$0xee,$xb2,$xa2,$xb2
  2791. vshufi32x4 \$0x44,$xb3,$xa3,$xa2
  2792. vshufi32x4 \$0xee,$xb3,$xa3,$xb3
  2793. ___
  2794. ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
  2795. $code.=<<___;
  2796. vpaddd @key[8],$xc0,$xc0
  2797. vpaddd @key[9],$xc1,$xc1
  2798. vpaddd @key[10],$xc2,$xc2
  2799. vpaddd @key[11],$xc3,$xc3
  2800. vpunpckldq $xc1,$xc0,$xt2
  2801. vpunpckldq $xc3,$xc2,$xt3
  2802. vpunpckhdq $xc1,$xc0,$xc0
  2803. vpunpckhdq $xc3,$xc2,$xc2
  2804. vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
  2805. vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
  2806. vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
  2807. vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
  2808. ___
  2809. ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
  2810. $code.=<<___;
  2811. vpaddd @key[12],$xd0,$xd0
  2812. vpaddd @key[13],$xd1,$xd1
  2813. vpaddd @key[14],$xd2,$xd2
  2814. vpaddd @key[15],$xd3,$xd3
  2815. vpunpckldq $xd1,$xd0,$xt2
  2816. vpunpckldq $xd3,$xd2,$xt3
  2817. vpunpckhdq $xd1,$xd0,$xd0
  2818. vpunpckhdq $xd3,$xd2,$xd2
  2819. vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
  2820. vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
  2821. vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
  2822. vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
  2823. ___
  2824. ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
  2825. $code.=<<___;
  2826. vshufi32x4 \$0x44,$xd0,$xc0,$xt3 # "de-interlace" further
  2827. vshufi32x4 \$0xee,$xd0,$xc0,$xd0
  2828. vshufi32x4 \$0x44,$xd1,$xc1,$xc0
  2829. vshufi32x4 \$0xee,$xd1,$xc1,$xd1
  2830. vshufi32x4 \$0x44,$xd2,$xc2,$xc1
  2831. vshufi32x4 \$0xee,$xd2,$xc2,$xd2
  2832. vshufi32x4 \$0x44,$xd3,$xc3,$xc2
  2833. vshufi32x4 \$0xee,$xd3,$xc3,$xd3
  2834. ___
  2835. ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
  2836. $code.=<<___;
  2837. vshufi32x4 \$0x88,$xc0,$xa0,$xt0 # "de-interlace" further
  2838. vshufi32x4 \$0xdd,$xc0,$xa0,$xa0
  2839. vshufi32x4 \$0x88,$xd0,$xb0,$xc0
  2840. vshufi32x4 \$0xdd,$xd0,$xb0,$xd0
  2841. vshufi32x4 \$0x88,$xc1,$xa1,$xt1
  2842. vshufi32x4 \$0xdd,$xc1,$xa1,$xa1
  2843. vshufi32x4 \$0x88,$xd1,$xb1,$xc1
  2844. vshufi32x4 \$0xdd,$xd1,$xb1,$xd1
  2845. vshufi32x4 \$0x88,$xc2,$xa2,$xt2
  2846. vshufi32x4 \$0xdd,$xc2,$xa2,$xa2
  2847. vshufi32x4 \$0x88,$xd2,$xb2,$xc2
  2848. vshufi32x4 \$0xdd,$xd2,$xb2,$xd2
  2849. vshufi32x4 \$0x88,$xc3,$xa3,$xt3
  2850. vshufi32x4 \$0xdd,$xc3,$xa3,$xa3
  2851. vshufi32x4 \$0x88,$xd3,$xb3,$xc3
  2852. vshufi32x4 \$0xdd,$xd3,$xb3,$xd3
  2853. ___
  2854. ($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)=
  2855. ($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3);
  2856. ($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1,
  2857. $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) =
  2858. ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  2859. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
  2860. $code.=<<___;
  2861. cmp \$64*16,$len
  2862. jb .Ltail16x
  2863. vpxord 0x00($inp),$xa0,$xa0 # xor with input
  2864. vpxord 0x40($inp),$xb0,$xb0
  2865. vpxord 0x80($inp),$xc0,$xc0
  2866. vpxord 0xc0($inp),$xd0,$xd0
  2867. vmovdqu32 $xa0,0x00($out)
  2868. vmovdqu32 $xb0,0x40($out)
  2869. vmovdqu32 $xc0,0x80($out)
  2870. vmovdqu32 $xd0,0xc0($out)
  2871. vpxord 0x100($inp),$xa1,$xa1
  2872. vpxord 0x140($inp),$xb1,$xb1
  2873. vpxord 0x180($inp),$xc1,$xc1
  2874. vpxord 0x1c0($inp),$xd1,$xd1
  2875. vmovdqu32 $xa1,0x100($out)
  2876. vmovdqu32 $xb1,0x140($out)
  2877. vmovdqu32 $xc1,0x180($out)
  2878. vmovdqu32 $xd1,0x1c0($out)
  2879. vpxord 0x200($inp),$xa2,$xa2
  2880. vpxord 0x240($inp),$xb2,$xb2
  2881. vpxord 0x280($inp),$xc2,$xc2
  2882. vpxord 0x2c0($inp),$xd2,$xd2
  2883. vmovdqu32 $xa2,0x200($out)
  2884. vmovdqu32 $xb2,0x240($out)
  2885. vmovdqu32 $xc2,0x280($out)
  2886. vmovdqu32 $xd2,0x2c0($out)
  2887. vpxord 0x300($inp),$xa3,$xa3
  2888. vpxord 0x340($inp),$xb3,$xb3
  2889. vpxord 0x380($inp),$xc3,$xc3
  2890. vpxord 0x3c0($inp),$xd3,$xd3
  2891. lea 0x400($inp),$inp
  2892. vmovdqu32 $xa3,0x300($out)
  2893. vmovdqu32 $xb3,0x340($out)
  2894. vmovdqu32 $xc3,0x380($out)
  2895. vmovdqu32 $xd3,0x3c0($out)
  2896. lea 0x400($out),$out
  2897. sub \$64*16,$len
  2898. jnz .Loop_outer16x
  2899. jmp .Ldone16x
  2900. .align 32
  2901. .Ltail16x:
  2902. xor %r10,%r10
  2903. sub $inp,$out
  2904. cmp \$64*1,$len
  2905. jb .Less_than_64_16x
  2906. vpxord ($inp),$xa0,$xa0 # xor with input
  2907. vmovdqu32 $xa0,($out,$inp)
  2908. je .Ldone16x
  2909. vmovdqa32 $xb0,$xa0
  2910. lea 64($inp),$inp
  2911. cmp \$64*2,$len
  2912. jb .Less_than_64_16x
  2913. vpxord ($inp),$xb0,$xb0
  2914. vmovdqu32 $xb0,($out,$inp)
  2915. je .Ldone16x
  2916. vmovdqa32 $xc0,$xa0
  2917. lea 64($inp),$inp
  2918. cmp \$64*3,$len
  2919. jb .Less_than_64_16x
  2920. vpxord ($inp),$xc0,$xc0
  2921. vmovdqu32 $xc0,($out,$inp)
  2922. je .Ldone16x
  2923. vmovdqa32 $xd0,$xa0
  2924. lea 64($inp),$inp
  2925. cmp \$64*4,$len
  2926. jb .Less_than_64_16x
  2927. vpxord ($inp),$xd0,$xd0
  2928. vmovdqu32 $xd0,($out,$inp)
  2929. je .Ldone16x
  2930. vmovdqa32 $xa1,$xa0
  2931. lea 64($inp),$inp
  2932. cmp \$64*5,$len
  2933. jb .Less_than_64_16x
  2934. vpxord ($inp),$xa1,$xa1
  2935. vmovdqu32 $xa1,($out,$inp)
  2936. je .Ldone16x
  2937. vmovdqa32 $xb1,$xa0
  2938. lea 64($inp),$inp
  2939. cmp \$64*6,$len
  2940. jb .Less_than_64_16x
  2941. vpxord ($inp),$xb1,$xb1
  2942. vmovdqu32 $xb1,($out,$inp)
  2943. je .Ldone16x
  2944. vmovdqa32 $xc1,$xa0
  2945. lea 64($inp),$inp
  2946. cmp \$64*7,$len
  2947. jb .Less_than_64_16x
  2948. vpxord ($inp),$xc1,$xc1
  2949. vmovdqu32 $xc1,($out,$inp)
  2950. je .Ldone16x
  2951. vmovdqa32 $xd1,$xa0
  2952. lea 64($inp),$inp
  2953. cmp \$64*8,$len
  2954. jb .Less_than_64_16x
  2955. vpxord ($inp),$xd1,$xd1
  2956. vmovdqu32 $xd1,($out,$inp)
  2957. je .Ldone16x
  2958. vmovdqa32 $xa2,$xa0
  2959. lea 64($inp),$inp
  2960. cmp \$64*9,$len
  2961. jb .Less_than_64_16x
  2962. vpxord ($inp),$xa2,$xa2
  2963. vmovdqu32 $xa2,($out,$inp)
  2964. je .Ldone16x
  2965. vmovdqa32 $xb2,$xa0
  2966. lea 64($inp),$inp
  2967. cmp \$64*10,$len
  2968. jb .Less_than_64_16x
  2969. vpxord ($inp),$xb2,$xb2
  2970. vmovdqu32 $xb2,($out,$inp)
  2971. je .Ldone16x
  2972. vmovdqa32 $xc2,$xa0
  2973. lea 64($inp),$inp
  2974. cmp \$64*11,$len
  2975. jb .Less_than_64_16x
  2976. vpxord ($inp),$xc2,$xc2
  2977. vmovdqu32 $xc2,($out,$inp)
  2978. je .Ldone16x
  2979. vmovdqa32 $xd2,$xa0
  2980. lea 64($inp),$inp
  2981. cmp \$64*12,$len
  2982. jb .Less_than_64_16x
  2983. vpxord ($inp),$xd2,$xd2
  2984. vmovdqu32 $xd2,($out,$inp)
  2985. je .Ldone16x
  2986. vmovdqa32 $xa3,$xa0
  2987. lea 64($inp),$inp
  2988. cmp \$64*13,$len
  2989. jb .Less_than_64_16x
  2990. vpxord ($inp),$xa3,$xa3
  2991. vmovdqu32 $xa3,($out,$inp)
  2992. je .Ldone16x
  2993. vmovdqa32 $xb3,$xa0
  2994. lea 64($inp),$inp
  2995. cmp \$64*14,$len
  2996. jb .Less_than_64_16x
  2997. vpxord ($inp),$xb3,$xb3
  2998. vmovdqu32 $xb3,($out,$inp)
  2999. je .Ldone16x
  3000. vmovdqa32 $xc3,$xa0
  3001. lea 64($inp),$inp
  3002. cmp \$64*15,$len
  3003. jb .Less_than_64_16x
  3004. vpxord ($inp),$xc3,$xc3
  3005. vmovdqu32 $xc3,($out,$inp)
  3006. je .Ldone16x
  3007. vmovdqa32 $xd3,$xa0
  3008. lea 64($inp),$inp
  3009. .Less_than_64_16x:
  3010. vmovdqa32 $xa0,0x00(%rsp)
  3011. lea ($out,$inp),$out
  3012. and \$63,$len
  3013. .Loop_tail16x:
  3014. movzb ($inp,%r10),%eax
  3015. movzb (%rsp,%r10),%ecx
  3016. lea 1(%r10),%r10
  3017. xor %ecx,%eax
  3018. mov %al,-1($out,%r10)
  3019. dec $len
  3020. jnz .Loop_tail16x
  3021. vpxord $xa0,$xa0,$xa0
  3022. vmovdqa32 $xa0,0(%rsp)
  3023. .Ldone16x:
  3024. vzeroall
  3025. ___
  3026. $code.=<<___ if ($win64);
  3027. movaps -0xa8(%r9),%xmm6
  3028. movaps -0x98(%r9),%xmm7
  3029. movaps -0x88(%r9),%xmm8
  3030. movaps -0x78(%r9),%xmm9
  3031. movaps -0x68(%r9),%xmm10
  3032. movaps -0x58(%r9),%xmm11
  3033. movaps -0x48(%r9),%xmm12
  3034. movaps -0x38(%r9),%xmm13
  3035. movaps -0x28(%r9),%xmm14
  3036. movaps -0x18(%r9),%xmm15
  3037. ___
  3038. $code.=<<___;
  3039. lea (%r9),%rsp
  3040. .cfi_def_cfa_register %rsp
  3041. .L16x_epilogue:
  3042. ret
  3043. .cfi_endproc
  3044. .size ChaCha20_16x,.-ChaCha20_16x
  3045. ___
  3046. # switch to %ymm domain
  3047. ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  3048. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%ymm$_",(0..15));
  3049. @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  3050. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
  3051. @key=map("%ymm$_",(16..31));
  3052. ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
  3053. $code.=<<___;
  3054. .type ChaCha20_8xvl,\@function,5
  3055. .align 32
  3056. ChaCha20_8xvl:
  3057. .cfi_startproc
  3058. .LChaCha20_8xvl:
  3059. mov %rsp,%r9 # frame register
  3060. .cfi_def_cfa_register %r9
  3061. sub \$64+$xframe,%rsp
  3062. and \$-64,%rsp
  3063. ___
  3064. $code.=<<___ if ($win64);
  3065. movaps %xmm6,-0xa8(%r9)
  3066. movaps %xmm7,-0x98(%r9)
  3067. movaps %xmm8,-0x88(%r9)
  3068. movaps %xmm9,-0x78(%r9)
  3069. movaps %xmm10,-0x68(%r9)
  3070. movaps %xmm11,-0x58(%r9)
  3071. movaps %xmm12,-0x48(%r9)
  3072. movaps %xmm13,-0x38(%r9)
  3073. movaps %xmm14,-0x28(%r9)
  3074. movaps %xmm15,-0x18(%r9)
  3075. .L8xvl_body:
  3076. ___
  3077. $code.=<<___;
  3078. vzeroupper
  3079. lea .Lsigma(%rip),%r10
  3080. vbroadcasti128 (%r10),$xa3 # key[0]
  3081. vbroadcasti128 ($key),$xb3 # key[1]
  3082. vbroadcasti128 16($key),$xc3 # key[2]
  3083. vbroadcasti128 ($counter),$xd3 # key[3]
  3084. vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
  3085. vpshufd \$0x55,$xa3,$xa1
  3086. vpshufd \$0xaa,$xa3,$xa2
  3087. vpshufd \$0xff,$xa3,$xa3
  3088. vmovdqa64 $xa0,@key[0]
  3089. vmovdqa64 $xa1,@key[1]
  3090. vmovdqa64 $xa2,@key[2]
  3091. vmovdqa64 $xa3,@key[3]
  3092. vpshufd \$0x00,$xb3,$xb0
  3093. vpshufd \$0x55,$xb3,$xb1
  3094. vpshufd \$0xaa,$xb3,$xb2
  3095. vpshufd \$0xff,$xb3,$xb3
  3096. vmovdqa64 $xb0,@key[4]
  3097. vmovdqa64 $xb1,@key[5]
  3098. vmovdqa64 $xb2,@key[6]
  3099. vmovdqa64 $xb3,@key[7]
  3100. vpshufd \$0x00,$xc3,$xc0
  3101. vpshufd \$0x55,$xc3,$xc1
  3102. vpshufd \$0xaa,$xc3,$xc2
  3103. vpshufd \$0xff,$xc3,$xc3
  3104. vmovdqa64 $xc0,@key[8]
  3105. vmovdqa64 $xc1,@key[9]
  3106. vmovdqa64 $xc2,@key[10]
  3107. vmovdqa64 $xc3,@key[11]
  3108. vpshufd \$0x00,$xd3,$xd0
  3109. vpshufd \$0x55,$xd3,$xd1
  3110. vpshufd \$0xaa,$xd3,$xd2
  3111. vpshufd \$0xff,$xd3,$xd3
  3112. vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet
  3113. vmovdqa64 $xd0,@key[12]
  3114. vmovdqa64 $xd1,@key[13]
  3115. vmovdqa64 $xd2,@key[14]
  3116. vmovdqa64 $xd3,@key[15]
  3117. mov \$10,%eax
  3118. jmp .Loop8xvl
  3119. .align 32
  3120. .Loop_outer8xvl:
  3121. #vpbroadcastd 0(%r10),$xa0 # reload key
  3122. #vpbroadcastd 4(%r10),$xa1
  3123. vpbroadcastd 8(%r10),$xa2
  3124. vpbroadcastd 12(%r10),$xa3
  3125. vpaddd .Leight(%rip),@key[12],@key[12] # next SIMD counters
  3126. vmovdqa64 @key[4],$xb0
  3127. vmovdqa64 @key[5],$xb1
  3128. vmovdqa64 @key[6],$xb2
  3129. vmovdqa64 @key[7],$xb3
  3130. vmovdqa64 @key[8],$xc0
  3131. vmovdqa64 @key[9],$xc1
  3132. vmovdqa64 @key[10],$xc2
  3133. vmovdqa64 @key[11],$xc3
  3134. vmovdqa64 @key[12],$xd0
  3135. vmovdqa64 @key[13],$xd1
  3136. vmovdqa64 @key[14],$xd2
  3137. vmovdqa64 @key[15],$xd3
  3138. vmovdqa64 $xa0,@key[0]
  3139. vmovdqa64 $xa1,@key[1]
  3140. vmovdqa64 $xa2,@key[2]
  3141. vmovdqa64 $xa3,@key[3]
  3142. mov \$10,%eax
  3143. jmp .Loop8xvl
  3144. .align 32
  3145. .Loop8xvl:
  3146. ___
  3147. foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
  3148. foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
  3149. $code.=<<___;
  3150. dec %eax
  3151. jnz .Loop8xvl
  3152. vpaddd @key[0],$xa0,$xa0 # accumulate key
  3153. vpaddd @key[1],$xa1,$xa1
  3154. vpaddd @key[2],$xa2,$xa2
  3155. vpaddd @key[3],$xa3,$xa3
  3156. vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
  3157. vpunpckldq $xa3,$xa2,$xt3
  3158. vpunpckhdq $xa1,$xa0,$xa0
  3159. vpunpckhdq $xa3,$xa2,$xa2
  3160. vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
  3161. vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
  3162. vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
  3163. vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
  3164. ___
  3165. ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
  3166. $code.=<<___;
  3167. vpaddd @key[4],$xb0,$xb0
  3168. vpaddd @key[5],$xb1,$xb1
  3169. vpaddd @key[6],$xb2,$xb2
  3170. vpaddd @key[7],$xb3,$xb3
  3171. vpunpckldq $xb1,$xb0,$xt2
  3172. vpunpckldq $xb3,$xb2,$xt3
  3173. vpunpckhdq $xb1,$xb0,$xb0
  3174. vpunpckhdq $xb3,$xb2,$xb2
  3175. vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
  3176. vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
  3177. vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
  3178. vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
  3179. ___
  3180. ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
  3181. $code.=<<___;
  3182. vshufi32x4 \$0,$xb0,$xa0,$xt3 # "de-interlace" further
  3183. vshufi32x4 \$3,$xb0,$xa0,$xb0
  3184. vshufi32x4 \$0,$xb1,$xa1,$xa0
  3185. vshufi32x4 \$3,$xb1,$xa1,$xb1
  3186. vshufi32x4 \$0,$xb2,$xa2,$xa1
  3187. vshufi32x4 \$3,$xb2,$xa2,$xb2
  3188. vshufi32x4 \$0,$xb3,$xa3,$xa2
  3189. vshufi32x4 \$3,$xb3,$xa3,$xb3
  3190. ___
  3191. ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
  3192. $code.=<<___;
  3193. vpaddd @key[8],$xc0,$xc0
  3194. vpaddd @key[9],$xc1,$xc1
  3195. vpaddd @key[10],$xc2,$xc2
  3196. vpaddd @key[11],$xc3,$xc3
  3197. vpunpckldq $xc1,$xc0,$xt2
  3198. vpunpckldq $xc3,$xc2,$xt3
  3199. vpunpckhdq $xc1,$xc0,$xc0
  3200. vpunpckhdq $xc3,$xc2,$xc2
  3201. vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
  3202. vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
  3203. vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
  3204. vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
  3205. ___
  3206. ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
  3207. $code.=<<___;
  3208. vpaddd @key[12],$xd0,$xd0
  3209. vpaddd @key[13],$xd1,$xd1
  3210. vpaddd @key[14],$xd2,$xd2
  3211. vpaddd @key[15],$xd3,$xd3
  3212. vpunpckldq $xd1,$xd0,$xt2
  3213. vpunpckldq $xd3,$xd2,$xt3
  3214. vpunpckhdq $xd1,$xd0,$xd0
  3215. vpunpckhdq $xd3,$xd2,$xd2
  3216. vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
  3217. vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
  3218. vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
  3219. vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
  3220. ___
  3221. ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
  3222. $code.=<<___;
  3223. vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further
  3224. vperm2i128 \$0x31,$xd0,$xc0,$xd0
  3225. vperm2i128 \$0x20,$xd1,$xc1,$xc0
  3226. vperm2i128 \$0x31,$xd1,$xc1,$xd1
  3227. vperm2i128 \$0x20,$xd2,$xc2,$xc1
  3228. vperm2i128 \$0x31,$xd2,$xc2,$xd2
  3229. vperm2i128 \$0x20,$xd3,$xc3,$xc2
  3230. vperm2i128 \$0x31,$xd3,$xc3,$xd3
  3231. ___
  3232. ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
  3233. ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
  3234. ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
  3235. $code.=<<___;
  3236. cmp \$64*8,$len
  3237. jb .Ltail8xvl
  3238. mov \$0x80,%eax # size optimization
  3239. vpxord 0x00($inp),$xa0,$xa0 # xor with input
  3240. vpxor 0x20($inp),$xb0,$xb0
  3241. vpxor 0x40($inp),$xc0,$xc0
  3242. vpxor 0x60($inp),$xd0,$xd0
  3243. lea ($inp,%rax),$inp # size optimization
  3244. vmovdqu32 $xa0,0x00($out)
  3245. vmovdqu $xb0,0x20($out)
  3246. vmovdqu $xc0,0x40($out)
  3247. vmovdqu $xd0,0x60($out)
  3248. lea ($out,%rax),$out # size optimization
  3249. vpxor 0x00($inp),$xa1,$xa1
  3250. vpxor 0x20($inp),$xb1,$xb1
  3251. vpxor 0x40($inp),$xc1,$xc1
  3252. vpxor 0x60($inp),$xd1,$xd1
  3253. lea ($inp,%rax),$inp # size optimization
  3254. vmovdqu $xa1,0x00($out)
  3255. vmovdqu $xb1,0x20($out)
  3256. vmovdqu $xc1,0x40($out)
  3257. vmovdqu $xd1,0x60($out)
  3258. lea ($out,%rax),$out # size optimization
  3259. vpxord 0x00($inp),$xa2,$xa2
  3260. vpxor 0x20($inp),$xb2,$xb2
  3261. vpxor 0x40($inp),$xc2,$xc2
  3262. vpxor 0x60($inp),$xd2,$xd2
  3263. lea ($inp,%rax),$inp # size optimization
  3264. vmovdqu32 $xa2,0x00($out)
  3265. vmovdqu $xb2,0x20($out)
  3266. vmovdqu $xc2,0x40($out)
  3267. vmovdqu $xd2,0x60($out)
  3268. lea ($out,%rax),$out # size optimization
  3269. vpxor 0x00($inp),$xa3,$xa3
  3270. vpxor 0x20($inp),$xb3,$xb3
  3271. vpxor 0x40($inp),$xc3,$xc3
  3272. vpxor 0x60($inp),$xd3,$xd3
  3273. lea ($inp,%rax),$inp # size optimization
  3274. vmovdqu $xa3,0x00($out)
  3275. vmovdqu $xb3,0x20($out)
  3276. vmovdqu $xc3,0x40($out)
  3277. vmovdqu $xd3,0x60($out)
  3278. lea ($out,%rax),$out # size optimization
  3279. vpbroadcastd 0(%r10),%ymm0 # reload key
  3280. vpbroadcastd 4(%r10),%ymm1
  3281. sub \$64*8,$len
  3282. jnz .Loop_outer8xvl
  3283. jmp .Ldone8xvl
  3284. .align 32
  3285. .Ltail8xvl:
  3286. vmovdqa64 $xa0,%ymm8 # size optimization
  3287. ___
  3288. $xa0 = "%ymm8";
  3289. $code.=<<___;
  3290. xor %r10,%r10
  3291. sub $inp,$out
  3292. cmp \$64*1,$len
  3293. jb .Less_than_64_8xvl
  3294. vpxor 0x00($inp),$xa0,$xa0 # xor with input
  3295. vpxor 0x20($inp),$xb0,$xb0
  3296. vmovdqu $xa0,0x00($out,$inp)
  3297. vmovdqu $xb0,0x20($out,$inp)
  3298. je .Ldone8xvl
  3299. vmovdqa $xc0,$xa0
  3300. vmovdqa $xd0,$xb0
  3301. lea 64($inp),$inp
  3302. cmp \$64*2,$len
  3303. jb .Less_than_64_8xvl
  3304. vpxor 0x00($inp),$xc0,$xc0
  3305. vpxor 0x20($inp),$xd0,$xd0
  3306. vmovdqu $xc0,0x00($out,$inp)
  3307. vmovdqu $xd0,0x20($out,$inp)
  3308. je .Ldone8xvl
  3309. vmovdqa $xa1,$xa0
  3310. vmovdqa $xb1,$xb0
  3311. lea 64($inp),$inp
  3312. cmp \$64*3,$len
  3313. jb .Less_than_64_8xvl
  3314. vpxor 0x00($inp),$xa1,$xa1
  3315. vpxor 0x20($inp),$xb1,$xb1
  3316. vmovdqu $xa1,0x00($out,$inp)
  3317. vmovdqu $xb1,0x20($out,$inp)
  3318. je .Ldone8xvl
  3319. vmovdqa $xc1,$xa0
  3320. vmovdqa $xd1,$xb0
  3321. lea 64($inp),$inp
  3322. cmp \$64*4,$len
  3323. jb .Less_than_64_8xvl
  3324. vpxor 0x00($inp),$xc1,$xc1
  3325. vpxor 0x20($inp),$xd1,$xd1
  3326. vmovdqu $xc1,0x00($out,$inp)
  3327. vmovdqu $xd1,0x20($out,$inp)
  3328. je .Ldone8xvl
  3329. vmovdqa32 $xa2,$xa0
  3330. vmovdqa $xb2,$xb0
  3331. lea 64($inp),$inp
  3332. cmp \$64*5,$len
  3333. jb .Less_than_64_8xvl
  3334. vpxord 0x00($inp),$xa2,$xa2
  3335. vpxor 0x20($inp),$xb2,$xb2
  3336. vmovdqu32 $xa2,0x00($out,$inp)
  3337. vmovdqu $xb2,0x20($out,$inp)
  3338. je .Ldone8xvl
  3339. vmovdqa $xc2,$xa0
  3340. vmovdqa $xd2,$xb0
  3341. lea 64($inp),$inp
  3342. cmp \$64*6,$len
  3343. jb .Less_than_64_8xvl
  3344. vpxor 0x00($inp),$xc2,$xc2
  3345. vpxor 0x20($inp),$xd2,$xd2
  3346. vmovdqu $xc2,0x00($out,$inp)
  3347. vmovdqu $xd2,0x20($out,$inp)
  3348. je .Ldone8xvl
  3349. vmovdqa $xa3,$xa0
  3350. vmovdqa $xb3,$xb0
  3351. lea 64($inp),$inp
  3352. cmp \$64*7,$len
  3353. jb .Less_than_64_8xvl
  3354. vpxor 0x00($inp),$xa3,$xa3
  3355. vpxor 0x20($inp),$xb3,$xb3
  3356. vmovdqu $xa3,0x00($out,$inp)
  3357. vmovdqu $xb3,0x20($out,$inp)
  3358. je .Ldone8xvl
  3359. vmovdqa $xc3,$xa0
  3360. vmovdqa $xd3,$xb0
  3361. lea 64($inp),$inp
  3362. .Less_than_64_8xvl:
  3363. vmovdqa $xa0,0x00(%rsp)
  3364. vmovdqa $xb0,0x20(%rsp)
  3365. lea ($out,$inp),$out
  3366. and \$63,$len
  3367. .Loop_tail8xvl:
  3368. movzb ($inp,%r10),%eax
  3369. movzb (%rsp,%r10),%ecx
  3370. lea 1(%r10),%r10
  3371. xor %ecx,%eax
  3372. mov %al,-1($out,%r10)
  3373. dec $len
  3374. jnz .Loop_tail8xvl
  3375. vpxor $xa0,$xa0,$xa0
  3376. vmovdqa $xa0,0x00(%rsp)
  3377. vmovdqa $xa0,0x20(%rsp)
  3378. .Ldone8xvl:
  3379. vzeroall
  3380. ___
  3381. $code.=<<___ if ($win64);
  3382. movaps -0xa8(%r9),%xmm6
  3383. movaps -0x98(%r9),%xmm7
  3384. movaps -0x88(%r9),%xmm8
  3385. movaps -0x78(%r9),%xmm9
  3386. movaps -0x68(%r9),%xmm10
  3387. movaps -0x58(%r9),%xmm11
  3388. movaps -0x48(%r9),%xmm12
  3389. movaps -0x38(%r9),%xmm13
  3390. movaps -0x28(%r9),%xmm14
  3391. movaps -0x18(%r9),%xmm15
  3392. ___
  3393. $code.=<<___;
  3394. lea (%r9),%rsp
  3395. .cfi_def_cfa_register %rsp
  3396. .L8xvl_epilogue:
  3397. ret
  3398. .cfi_endproc
  3399. .size ChaCha20_8xvl,.-ChaCha20_8xvl
  3400. ___
  3401. }
  3402. # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
  3403. # CONTEXT *context,DISPATCHER_CONTEXT *disp)
  3404. if ($win64) {
  3405. $rec="%rcx";
  3406. $frame="%rdx";
  3407. $context="%r8";
  3408. $disp="%r9";
  3409. $code.=<<___;
  3410. .extern __imp_RtlVirtualUnwind
  3411. .type se_handler,\@abi-omnipotent
  3412. .align 16
  3413. se_handler:
  3414. push %rsi
  3415. push %rdi
  3416. push %rbx
  3417. push %rbp
  3418. push %r12
  3419. push %r13
  3420. push %r14
  3421. push %r15
  3422. pushfq
  3423. sub \$64,%rsp
  3424. mov 120($context),%rax # pull context->Rax
  3425. mov 248($context),%rbx # pull context->Rip
  3426. mov 8($disp),%rsi # disp->ImageBase
  3427. mov 56($disp),%r11 # disp->HandlerData
  3428. lea .Lctr32_body(%rip),%r10
  3429. cmp %r10,%rbx # context->Rip<.Lprologue
  3430. jb .Lcommon_seh_tail
  3431. mov 152($context),%rax # pull context->Rsp
  3432. lea .Lno_data(%rip),%r10 # epilogue label
  3433. cmp %r10,%rbx # context->Rip>=.Lepilogue
  3434. jae .Lcommon_seh_tail
  3435. lea 64+24+48(%rax),%rax
  3436. mov -8(%rax),%rbx
  3437. mov -16(%rax),%rbp
  3438. mov -24(%rax),%r12
  3439. mov -32(%rax),%r13
  3440. mov -40(%rax),%r14
  3441. mov -48(%rax),%r15
  3442. mov %rbx,144($context) # restore context->Rbx
  3443. mov %rbp,160($context) # restore context->Rbp
  3444. mov %r12,216($context) # restore context->R12
  3445. mov %r13,224($context) # restore context->R13
  3446. mov %r14,232($context) # restore context->R14
  3447. mov %r15,240($context) # restore context->R14
  3448. .Lcommon_seh_tail:
  3449. mov 8(%rax),%rdi
  3450. mov 16(%rax),%rsi
  3451. mov %rax,152($context) # restore context->Rsp
  3452. mov %rsi,168($context) # restore context->Rsi
  3453. mov %rdi,176($context) # restore context->Rdi
  3454. mov 40($disp),%rdi # disp->ContextRecord
  3455. mov $context,%rsi # context
  3456. mov \$154,%ecx # sizeof(CONTEXT)
  3457. .long 0xa548f3fc # cld; rep movsq
  3458. mov $disp,%rsi
  3459. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  3460. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  3461. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  3462. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  3463. mov 40(%rsi),%r10 # disp->ContextRecord
  3464. lea 56(%rsi),%r11 # &disp->HandlerData
  3465. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  3466. mov %r10,32(%rsp) # arg5
  3467. mov %r11,40(%rsp) # arg6
  3468. mov %r12,48(%rsp) # arg7
  3469. mov %rcx,56(%rsp) # arg8, (NULL)
  3470. call *__imp_RtlVirtualUnwind(%rip)
  3471. mov \$1,%eax # ExceptionContinueSearch
  3472. add \$64,%rsp
  3473. popfq
  3474. pop %r15
  3475. pop %r14
  3476. pop %r13
  3477. pop %r12
  3478. pop %rbp
  3479. pop %rbx
  3480. pop %rdi
  3481. pop %rsi
  3482. ret
  3483. .size se_handler,.-se_handler
  3484. .type simd_handler,\@abi-omnipotent
  3485. .align 16
  3486. simd_handler:
  3487. push %rsi
  3488. push %rdi
  3489. push %rbx
  3490. push %rbp
  3491. push %r12
  3492. push %r13
  3493. push %r14
  3494. push %r15
  3495. pushfq
  3496. sub \$64,%rsp
  3497. mov 120($context),%rax # pull context->Rax
  3498. mov 248($context),%rbx # pull context->Rip
  3499. mov 8($disp),%rsi # disp->ImageBase
  3500. mov 56($disp),%r11 # disp->HandlerData
  3501. mov 0(%r11),%r10d # HandlerData[0]
  3502. lea (%rsi,%r10),%r10 # prologue label
  3503. cmp %r10,%rbx # context->Rip<prologue label
  3504. jb .Lcommon_seh_tail
  3505. mov 192($context),%rax # pull context->R9
  3506. mov 4(%r11),%r10d # HandlerData[1]
  3507. mov 8(%r11),%ecx # HandlerData[2]
  3508. lea (%rsi,%r10),%r10 # epilogue label
  3509. cmp %r10,%rbx # context->Rip>=epilogue label
  3510. jae .Lcommon_seh_tail
  3511. neg %rcx
  3512. lea -8(%rax,%rcx),%rsi
  3513. lea 512($context),%rdi # &context.Xmm6
  3514. neg %ecx
  3515. shr \$3,%ecx
  3516. .long 0xa548f3fc # cld; rep movsq
  3517. jmp .Lcommon_seh_tail
  3518. .size simd_handler,.-simd_handler
  3519. .section .pdata
  3520. .align 4
  3521. .rva .LSEH_begin_ChaCha20_ctr32
  3522. .rva .LSEH_end_ChaCha20_ctr32
  3523. .rva .LSEH_info_ChaCha20_ctr32
  3524. .rva .LSEH_begin_ChaCha20_ssse3
  3525. .rva .LSEH_end_ChaCha20_ssse3
  3526. .rva .LSEH_info_ChaCha20_ssse3
  3527. .rva .LSEH_begin_ChaCha20_128
  3528. .rva .LSEH_end_ChaCha20_128
  3529. .rva .LSEH_info_ChaCha20_128
  3530. .rva .LSEH_begin_ChaCha20_4x
  3531. .rva .LSEH_end_ChaCha20_4x
  3532. .rva .LSEH_info_ChaCha20_4x
  3533. ___
  3534. $code.=<<___ if ($avx);
  3535. .rva .LSEH_begin_ChaCha20_4xop
  3536. .rva .LSEH_end_ChaCha20_4xop
  3537. .rva .LSEH_info_ChaCha20_4xop
  3538. ___
  3539. $code.=<<___ if ($avx>1);
  3540. .rva .LSEH_begin_ChaCha20_8x
  3541. .rva .LSEH_end_ChaCha20_8x
  3542. .rva .LSEH_info_ChaCha20_8x
  3543. ___
  3544. $code.=<<___ if ($avx>2);
  3545. .rva .LSEH_begin_ChaCha20_avx512
  3546. .rva .LSEH_end_ChaCha20_avx512
  3547. .rva .LSEH_info_ChaCha20_avx512
  3548. .rva .LSEH_begin_ChaCha20_avx512vl
  3549. .rva .LSEH_end_ChaCha20_avx512vl
  3550. .rva .LSEH_info_ChaCha20_avx512vl
  3551. .rva .LSEH_begin_ChaCha20_16x
  3552. .rva .LSEH_end_ChaCha20_16x
  3553. .rva .LSEH_info_ChaCha20_16x
  3554. .rva .LSEH_begin_ChaCha20_8xvl
  3555. .rva .LSEH_end_ChaCha20_8xvl
  3556. .rva .LSEH_info_ChaCha20_8xvl
  3557. ___
  3558. $code.=<<___;
  3559. .section .xdata
  3560. .align 8
  3561. .LSEH_info_ChaCha20_ctr32:
  3562. .byte 9,0,0,0
  3563. .rva se_handler
  3564. .LSEH_info_ChaCha20_ssse3:
  3565. .byte 9,0,0,0
  3566. .rva simd_handler
  3567. .rva .Lssse3_body,.Lssse3_epilogue
  3568. .long 0x20,0
  3569. .LSEH_info_ChaCha20_128:
  3570. .byte 9,0,0,0
  3571. .rva simd_handler
  3572. .rva .L128_body,.L128_epilogue
  3573. .long 0x60,0
  3574. .LSEH_info_ChaCha20_4x:
  3575. .byte 9,0,0,0
  3576. .rva simd_handler
  3577. .rva .L4x_body,.L4x_epilogue
  3578. .long 0xa0,0
  3579. ___
  3580. $code.=<<___ if ($avx);
  3581. .LSEH_info_ChaCha20_4xop:
  3582. .byte 9,0,0,0
  3583. .rva simd_handler
  3584. .rva .L4xop_body,.L4xop_epilogue # HandlerData[]
  3585. .long 0xa0,0
  3586. ___
  3587. $code.=<<___ if ($avx>1);
  3588. .LSEH_info_ChaCha20_8x:
  3589. .byte 9,0,0,0
  3590. .rva simd_handler
  3591. .rva .L8x_body,.L8x_epilogue # HandlerData[]
  3592. .long 0xa0,0
  3593. ___
  3594. $code.=<<___ if ($avx>2);
  3595. .LSEH_info_ChaCha20_avx512:
  3596. .byte 9,0,0,0
  3597. .rva simd_handler
  3598. .rva .Lavx512_body,.Lavx512_epilogue # HandlerData[]
  3599. .long 0x20,0
  3600. .LSEH_info_ChaCha20_avx512vl:
  3601. .byte 9,0,0,0
  3602. .rva simd_handler
  3603. .rva .Lavx512vl_body,.Lavx512vl_epilogue # HandlerData[]
  3604. .long 0x20,0
  3605. .LSEH_info_ChaCha20_16x:
  3606. .byte 9,0,0,0
  3607. .rva simd_handler
  3608. .rva .L16x_body,.L16x_epilogue # HandlerData[]
  3609. .long 0xa0,0
  3610. .LSEH_info_ChaCha20_8xvl:
  3611. .byte 9,0,0,0
  3612. .rva simd_handler
  3613. .rva .L8xvl_body,.L8xvl_epilogue # HandlerData[]
  3614. .long 0xa0,0
  3615. ___
  3616. }
  3617. foreach (split("\n",$code)) {
  3618. s/\`([^\`]*)\`/eval $1/ge;
  3619. s/%x#%[yz]/%x/g; # "down-shift"
  3620. print $_,"\n";
  3621. }
  3622. close STDOUT or die "error closing STDOUT: $!";