2
0

ecp_nistz256-x86_64.pl 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070
  1. #!/usr/bin/env perl
  2. ##############################################################################
  3. # #
  4. # Copyright 2014 Intel Corporation #
  5. # #
  6. # Licensed under the Apache License, Version 2.0 (the "License"); #
  7. # you may not use this file except in compliance with the License. #
  8. # You may obtain a copy of the License at #
  9. # #
  10. # http://www.apache.org/licenses/LICENSE-2.0 #
  11. # #
  12. # Unless required by applicable law or agreed to in writing, software #
  13. # distributed under the License is distributed on an "AS IS" BASIS, #
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
  15. # See the License for the specific language governing permissions and #
  16. # limitations under the License. #
  17. # #
  18. ##############################################################################
  19. # #
  20. # Developers and authors: #
  21. # Shay Gueron (1, 2), and Vlad Krasnov (1) #
  22. # (1) Intel Corporation, Israel Development Center #
  23. # (2) University of Haifa #
  24. # Reference: #
  25. # S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with#
  26. # 256 Bit Primes" #
  27. # #
  28. ##############################################################################
  29. # Further optimization by <appro@openssl.org>:
  30. #
  31. # this/original with/without -DECP_NISTZ256_ASM(*)
  32. # Opteron +12-49% +110-150%
  33. # Bulldozer +14-45% +175-210%
  34. # P4 +18-46% n/a :-(
  35. # Westmere +12-34% +80-87%
  36. # Sandy Bridge +9-35% +110-120%
  37. # Ivy Bridge +9-35% +110-125%
  38. # Haswell +8-37% +140-160%
  39. # Broadwell +18-58% +145-210%
  40. # Atom +15-50% +130-180%
  41. # VIA Nano +43-160% +300-480%
  42. #
  43. # (*) "without -DECP_NISTZ256_ASM" refers to build with
  44. # "enable-ec_nistp_64_gcc_128";
  45. #
  46. # Ranges denote minimum and maximum improvement coefficients depending
  47. # on benchmark. Lower coefficients are for ECDSA sign, relatively fastest
  48. # server-side operation. Keep in mind that +100% means 2x improvement.
  49. $flavour = shift;
  50. $output = shift;
  51. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  52. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  53. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  54. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  55. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  56. die "can't locate x86_64-xlate.pl";
  57. open OUT,"| \"$^X\" $xlate $flavour $output";
  58. *STDOUT=*OUT;
  59. if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  60. =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
  61. $avx = ($1>=2.19) + ($1>=2.22);
  62. $addx = ($1>=2.23);
  63. }
  64. if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
  65. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
  66. $avx = ($1>=2.09) + ($1>=2.10);
  67. $addx = ($1>=2.10);
  68. }
  69. if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
  70. `ml64 2>&1` =~ /Version ([0-9]+)\./) {
  71. $avx = ($1>=10) + ($1>=11);
  72. $addx = ($1>=12);
  73. }
  74. if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
  75. my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
  76. $avx = ($ver>=3.0) + ($ver>=3.01);
  77. $addx = ($ver>=3.03);
  78. }
  79. $code.=<<___;
  80. .text
  81. .extern OPENSSL_ia32cap_P
  82. # The polynomial
  83. .align 64
  84. .Lpoly:
  85. .quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001
  86. # 2^512 mod P precomputed for NIST P256 polynomial
  87. .LRR:
  88. .quad 0x0000000000000003, 0xfffffffbffffffff, 0xfffffffffffffffe, 0x00000004fffffffd
  89. .LOne:
  90. .long 1,1,1,1,1,1,1,1
  91. .LTwo:
  92. .long 2,2,2,2,2,2,2,2
  93. .LThree:
  94. .long 3,3,3,3,3,3,3,3
  95. .LONE_mont:
  96. .quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe
  97. ___
  98. {
  99. ################################################################################
  100. # void ecp_nistz256_mul_by_2(uint64_t res[4], uint64_t a[4]);
  101. my ($a0,$a1,$a2,$a3)=map("%r$_",(8..11));
  102. my ($t0,$t1,$t2,$t3,$t4)=("%rax","%rdx","%rcx","%r12","%r13");
  103. my ($r_ptr,$a_ptr,$b_ptr)=("%rdi","%rsi","%rdx");
  104. $code.=<<___;
  105. .globl ecp_nistz256_mul_by_2
  106. .type ecp_nistz256_mul_by_2,\@function,2
  107. .align 64
  108. ecp_nistz256_mul_by_2:
  109. push %r12
  110. push %r13
  111. mov 8*0($a_ptr), $a0
  112. mov 8*1($a_ptr), $a1
  113. add $a0, $a0 # a0:a3+a0:a3
  114. mov 8*2($a_ptr), $a2
  115. adc $a1, $a1
  116. mov 8*3($a_ptr), $a3
  117. lea .Lpoly(%rip), $a_ptr
  118. mov $a0, $t0
  119. adc $a2, $a2
  120. adc $a3, $a3
  121. mov $a1, $t1
  122. sbb $t4, $t4
  123. sub 8*0($a_ptr), $a0
  124. mov $a2, $t2
  125. sbb 8*1($a_ptr), $a1
  126. sbb 8*2($a_ptr), $a2
  127. mov $a3, $t3
  128. sbb 8*3($a_ptr), $a3
  129. test $t4, $t4
  130. cmovz $t0, $a0
  131. cmovz $t1, $a1
  132. mov $a0, 8*0($r_ptr)
  133. cmovz $t2, $a2
  134. mov $a1, 8*1($r_ptr)
  135. cmovz $t3, $a3
  136. mov $a2, 8*2($r_ptr)
  137. mov $a3, 8*3($r_ptr)
  138. pop %r13
  139. pop %r12
  140. ret
  141. .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
  142. ################################################################################
  143. # void ecp_nistz256_div_by_2(uint64_t res[4], uint64_t a[4]);
  144. .globl ecp_nistz256_div_by_2
  145. .type ecp_nistz256_div_by_2,\@function,2
  146. .align 32
  147. ecp_nistz256_div_by_2:
  148. push %r12
  149. push %r13
  150. mov 8*0($a_ptr), $a0
  151. mov 8*1($a_ptr), $a1
  152. mov 8*2($a_ptr), $a2
  153. mov $a0, $t0
  154. mov 8*3($a_ptr), $a3
  155. lea .Lpoly(%rip), $a_ptr
  156. mov $a1, $t1
  157. xor $t4, $t4
  158. add 8*0($a_ptr), $a0
  159. mov $a2, $t2
  160. adc 8*1($a_ptr), $a1
  161. adc 8*2($a_ptr), $a2
  162. mov $a3, $t3
  163. adc 8*3($a_ptr), $a3
  164. adc \$0, $t4
  165. xor $a_ptr, $a_ptr # borrow $a_ptr
  166. test \$1, $t0
  167. cmovz $t0, $a0
  168. cmovz $t1, $a1
  169. cmovz $t2, $a2
  170. cmovz $t3, $a3
  171. cmovz $a_ptr, $t4
  172. mov $a1, $t0 # a0:a3>>1
  173. shr \$1, $a0
  174. shl \$63, $t0
  175. mov $a2, $t1
  176. shr \$1, $a1
  177. or $t0, $a0
  178. shl \$63, $t1
  179. mov $a3, $t2
  180. shr \$1, $a2
  181. or $t1, $a1
  182. shl \$63, $t2
  183. shr \$1, $a3
  184. shl \$63, $t4
  185. or $t2, $a2
  186. or $t4, $a3
  187. mov $a0, 8*0($r_ptr)
  188. mov $a1, 8*1($r_ptr)
  189. mov $a2, 8*2($r_ptr)
  190. mov $a3, 8*3($r_ptr)
  191. pop %r13
  192. pop %r12
  193. ret
  194. .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
  195. ################################################################################
  196. # void ecp_nistz256_mul_by_3(uint64_t res[4], uint64_t a[4]);
  197. .globl ecp_nistz256_mul_by_3
  198. .type ecp_nistz256_mul_by_3,\@function,2
  199. .align 32
  200. ecp_nistz256_mul_by_3:
  201. push %r12
  202. push %r13
  203. mov 8*0($a_ptr), $a0
  204. xor $t4, $t4
  205. mov 8*1($a_ptr), $a1
  206. add $a0, $a0 # a0:a3+a0:a3
  207. mov 8*2($a_ptr), $a2
  208. adc $a1, $a1
  209. mov 8*3($a_ptr), $a3
  210. mov $a0, $t0
  211. adc $a2, $a2
  212. adc $a3, $a3
  213. mov $a1, $t1
  214. adc \$0, $t4
  215. sub \$-1, $a0
  216. mov $a2, $t2
  217. sbb .Lpoly+8*1(%rip), $a1
  218. sbb \$0, $a2
  219. mov $a3, $t3
  220. sbb .Lpoly+8*3(%rip), $a3
  221. test $t4, $t4
  222. cmovz $t0, $a0
  223. cmovz $t1, $a1
  224. cmovz $t2, $a2
  225. cmovz $t3, $a3
  226. xor $t4, $t4
  227. add 8*0($a_ptr), $a0 # a0:a3+=a_ptr[0:3]
  228. adc 8*1($a_ptr), $a1
  229. mov $a0, $t0
  230. adc 8*2($a_ptr), $a2
  231. adc 8*3($a_ptr), $a3
  232. mov $a1, $t1
  233. adc \$0, $t4
  234. sub \$-1, $a0
  235. mov $a2, $t2
  236. sbb .Lpoly+8*1(%rip), $a1
  237. sbb \$0, $a2
  238. mov $a3, $t3
  239. sbb .Lpoly+8*3(%rip), $a3
  240. test $t4, $t4
  241. cmovz $t0, $a0
  242. cmovz $t1, $a1
  243. mov $a0, 8*0($r_ptr)
  244. cmovz $t2, $a2
  245. mov $a1, 8*1($r_ptr)
  246. cmovz $t3, $a3
  247. mov $a2, 8*2($r_ptr)
  248. mov $a3, 8*3($r_ptr)
  249. pop %r13
  250. pop %r12
  251. ret
  252. .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
  253. ################################################################################
  254. # void ecp_nistz256_add(uint64_t res[4], uint64_t a[4], uint64_t b[4]);
  255. .globl ecp_nistz256_add
  256. .type ecp_nistz256_add,\@function,3
  257. .align 32
  258. ecp_nistz256_add:
  259. push %r12
  260. push %r13
  261. mov 8*0($a_ptr), $a0
  262. xor $t4, $t4
  263. mov 8*1($a_ptr), $a1
  264. mov 8*2($a_ptr), $a2
  265. mov 8*3($a_ptr), $a3
  266. lea .Lpoly(%rip), $a_ptr
  267. add 8*0($b_ptr), $a0
  268. adc 8*1($b_ptr), $a1
  269. mov $a0, $t0
  270. adc 8*2($b_ptr), $a2
  271. adc 8*3($b_ptr), $a3
  272. mov $a1, $t1
  273. adc \$0, $t4
  274. sub 8*0($a_ptr), $a0
  275. mov $a2, $t2
  276. sbb 8*1($a_ptr), $a1
  277. sbb 8*2($a_ptr), $a2
  278. mov $a3, $t3
  279. sbb 8*3($a_ptr), $a3
  280. test $t4, $t4
  281. cmovz $t0, $a0
  282. cmovz $t1, $a1
  283. mov $a0, 8*0($r_ptr)
  284. cmovz $t2, $a2
  285. mov $a1, 8*1($r_ptr)
  286. cmovz $t3, $a3
  287. mov $a2, 8*2($r_ptr)
  288. mov $a3, 8*3($r_ptr)
  289. pop %r13
  290. pop %r12
  291. ret
  292. .size ecp_nistz256_add,.-ecp_nistz256_add
  293. ################################################################################
  294. # void ecp_nistz256_sub(uint64_t res[4], uint64_t a[4], uint64_t b[4]);
  295. .globl ecp_nistz256_sub
  296. .type ecp_nistz256_sub,\@function,3
  297. .align 32
  298. ecp_nistz256_sub:
  299. push %r12
  300. push %r13
  301. mov 8*0($a_ptr), $a0
  302. xor $t4, $t4
  303. mov 8*1($a_ptr), $a1
  304. mov 8*2($a_ptr), $a2
  305. mov 8*3($a_ptr), $a3
  306. lea .Lpoly(%rip), $a_ptr
  307. sub 8*0($b_ptr), $a0
  308. sbb 8*1($b_ptr), $a1
  309. mov $a0, $t0
  310. sbb 8*2($b_ptr), $a2
  311. sbb 8*3($b_ptr), $a3
  312. mov $a1, $t1
  313. sbb \$0, $t4
  314. add 8*0($a_ptr), $a0
  315. mov $a2, $t2
  316. adc 8*1($a_ptr), $a1
  317. adc 8*2($a_ptr), $a2
  318. mov $a3, $t3
  319. adc 8*3($a_ptr), $a3
  320. test $t4, $t4
  321. cmovz $t0, $a0
  322. cmovz $t1, $a1
  323. mov $a0, 8*0($r_ptr)
  324. cmovz $t2, $a2
  325. mov $a1, 8*1($r_ptr)
  326. cmovz $t3, $a3
  327. mov $a2, 8*2($r_ptr)
  328. mov $a3, 8*3($r_ptr)
  329. pop %r13
  330. pop %r12
  331. ret
  332. .size ecp_nistz256_sub,.-ecp_nistz256_sub
  333. ################################################################################
  334. # void ecp_nistz256_neg(uint64_t res[4], uint64_t a[4]);
  335. .globl ecp_nistz256_neg
  336. .type ecp_nistz256_neg,\@function,2
  337. .align 32
  338. ecp_nistz256_neg:
  339. push %r12
  340. push %r13
  341. xor $a0, $a0
  342. xor $a1, $a1
  343. xor $a2, $a2
  344. xor $a3, $a3
  345. xor $t4, $t4
  346. sub 8*0($a_ptr), $a0
  347. sbb 8*1($a_ptr), $a1
  348. sbb 8*2($a_ptr), $a2
  349. mov $a0, $t0
  350. sbb 8*3($a_ptr), $a3
  351. lea .Lpoly(%rip), $a_ptr
  352. mov $a1, $t1
  353. sbb \$0, $t4
  354. add 8*0($a_ptr), $a0
  355. mov $a2, $t2
  356. adc 8*1($a_ptr), $a1
  357. adc 8*2($a_ptr), $a2
  358. mov $a3, $t3
  359. adc 8*3($a_ptr), $a3
  360. test $t4, $t4
  361. cmovz $t0, $a0
  362. cmovz $t1, $a1
  363. mov $a0, 8*0($r_ptr)
  364. cmovz $t2, $a2
  365. mov $a1, 8*1($r_ptr)
  366. cmovz $t3, $a3
  367. mov $a2, 8*2($r_ptr)
  368. mov $a3, 8*3($r_ptr)
  369. pop %r13
  370. pop %r12
  371. ret
  372. .size ecp_nistz256_neg,.-ecp_nistz256_neg
  373. ___
  374. }
  375. {
  376. my ($r_ptr,$a_ptr,$b_org,$b_ptr)=("%rdi","%rsi","%rdx","%rbx");
  377. my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15));
  378. my ($t0,$t1,$t2,$t3,$t4)=("%rcx","%rbp","%rbx","%rdx","%rax");
  379. my ($poly1,$poly3)=($acc6,$acc7);
  380. $code.=<<___;
  381. ################################################################################
  382. # void ecp_nistz256_to_mont(
  383. # uint64_t res[4],
  384. # uint64_t in[4]);
  385. .globl ecp_nistz256_to_mont
  386. .type ecp_nistz256_to_mont,\@function,2
  387. .align 32
  388. ecp_nistz256_to_mont:
  389. ___
  390. $code.=<<___ if ($addx);
  391. mov \$0x80100, %ecx
  392. and OPENSSL_ia32cap_P+8(%rip), %ecx
  393. ___
  394. $code.=<<___;
  395. lea .LRR(%rip), $b_org
  396. jmp .Lmul_mont
  397. .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
  398. ################################################################################
  399. # void ecp_nistz256_mul_mont(
  400. # uint64_t res[4],
  401. # uint64_t a[4],
  402. # uint64_t b[4]);
  403. .globl ecp_nistz256_mul_mont
  404. .type ecp_nistz256_mul_mont,\@function,3
  405. .align 32
  406. ecp_nistz256_mul_mont:
  407. ___
  408. $code.=<<___ if ($addx);
  409. mov \$0x80100, %ecx
  410. and OPENSSL_ia32cap_P+8(%rip), %ecx
  411. ___
  412. $code.=<<___;
  413. .Lmul_mont:
  414. push %rbp
  415. push %rbx
  416. push %r12
  417. push %r13
  418. push %r14
  419. push %r15
  420. ___
  421. $code.=<<___ if ($addx);
  422. cmp \$0x80100, %ecx
  423. je .Lmul_montx
  424. ___
  425. $code.=<<___;
  426. mov $b_org, $b_ptr
  427. mov 8*0($b_org), %rax
  428. mov 8*0($a_ptr), $acc1
  429. mov 8*1($a_ptr), $acc2
  430. mov 8*2($a_ptr), $acc3
  431. mov 8*3($a_ptr), $acc4
  432. call __ecp_nistz256_mul_montq
  433. ___
  434. $code.=<<___ if ($addx);
  435. jmp .Lmul_mont_done
  436. .align 32
  437. .Lmul_montx:
  438. mov $b_org, $b_ptr
  439. mov 8*0($b_org), %rdx
  440. mov 8*0($a_ptr), $acc1
  441. mov 8*1($a_ptr), $acc2
  442. mov 8*2($a_ptr), $acc3
  443. mov 8*3($a_ptr), $acc4
  444. lea -128($a_ptr), $a_ptr # control u-op density
  445. call __ecp_nistz256_mul_montx
  446. ___
  447. $code.=<<___;
  448. .Lmul_mont_done:
  449. pop %r15
  450. pop %r14
  451. pop %r13
  452. pop %r12
  453. pop %rbx
  454. pop %rbp
  455. ret
  456. .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
  457. .type __ecp_nistz256_mul_montq,\@abi-omnipotent
  458. .align 32
  459. __ecp_nistz256_mul_montq:
  460. ########################################################################
  461. # Multiply a by b[0]
  462. mov %rax, $t1
  463. mulq $acc1
  464. mov .Lpoly+8*1(%rip),$poly1
  465. mov %rax, $acc0
  466. mov $t1, %rax
  467. mov %rdx, $acc1
  468. mulq $acc2
  469. mov .Lpoly+8*3(%rip),$poly3
  470. add %rax, $acc1
  471. mov $t1, %rax
  472. adc \$0, %rdx
  473. mov %rdx, $acc2
  474. mulq $acc3
  475. add %rax, $acc2
  476. mov $t1, %rax
  477. adc \$0, %rdx
  478. mov %rdx, $acc3
  479. mulq $acc4
  480. add %rax, $acc3
  481. mov $acc0, %rax
  482. adc \$0, %rdx
  483. xor $acc5, $acc5
  484. mov %rdx, $acc4
  485. ########################################################################
  486. # First reduction step
  487. # Basically now we want to multiply acc[0] by p256,
  488. # and add the result to the acc.
  489. # Due to the special form of p256 we do some optimizations
  490. #
  491. # acc[0] x p256[0..1] = acc[0] x 2^96 - acc[0]
  492. # then we add acc[0] and get acc[0] x 2^96
  493. mov $acc0, $t1
  494. shl \$32, $acc0
  495. mulq $poly3
  496. shr \$32, $t1
  497. add $acc0, $acc1 # +=acc[0]<<96
  498. adc $t1, $acc2
  499. adc %rax, $acc3
  500. mov 8*1($b_ptr), %rax
  501. adc %rdx, $acc4
  502. adc \$0, $acc5
  503. xor $acc0, $acc0
  504. ########################################################################
  505. # Multiply by b[1]
  506. mov %rax, $t1
  507. mulq 8*0($a_ptr)
  508. add %rax, $acc1
  509. mov $t1, %rax
  510. adc \$0, %rdx
  511. mov %rdx, $t0
  512. mulq 8*1($a_ptr)
  513. add $t0, $acc2
  514. adc \$0, %rdx
  515. add %rax, $acc2
  516. mov $t1, %rax
  517. adc \$0, %rdx
  518. mov %rdx, $t0
  519. mulq 8*2($a_ptr)
  520. add $t0, $acc3
  521. adc \$0, %rdx
  522. add %rax, $acc3
  523. mov $t1, %rax
  524. adc \$0, %rdx
  525. mov %rdx, $t0
  526. mulq 8*3($a_ptr)
  527. add $t0, $acc4
  528. adc \$0, %rdx
  529. add %rax, $acc4
  530. mov $acc1, %rax
  531. adc %rdx, $acc5
  532. adc \$0, $acc0
  533. ########################################################################
  534. # Second reduction step
  535. mov $acc1, $t1
  536. shl \$32, $acc1
  537. mulq $poly3
  538. shr \$32, $t1
  539. add $acc1, $acc2
  540. adc $t1, $acc3
  541. adc %rax, $acc4
  542. mov 8*2($b_ptr), %rax
  543. adc %rdx, $acc5
  544. adc \$0, $acc0
  545. xor $acc1, $acc1
  546. ########################################################################
  547. # Multiply by b[2]
  548. mov %rax, $t1
  549. mulq 8*0($a_ptr)
  550. add %rax, $acc2
  551. mov $t1, %rax
  552. adc \$0, %rdx
  553. mov %rdx, $t0
  554. mulq 8*1($a_ptr)
  555. add $t0, $acc3
  556. adc \$0, %rdx
  557. add %rax, $acc3
  558. mov $t1, %rax
  559. adc \$0, %rdx
  560. mov %rdx, $t0
  561. mulq 8*2($a_ptr)
  562. add $t0, $acc4
  563. adc \$0, %rdx
  564. add %rax, $acc4
  565. mov $t1, %rax
  566. adc \$0, %rdx
  567. mov %rdx, $t0
  568. mulq 8*3($a_ptr)
  569. add $t0, $acc5
  570. adc \$0, %rdx
  571. add %rax, $acc5
  572. mov $acc2, %rax
  573. adc %rdx, $acc0
  574. adc \$0, $acc1
  575. ########################################################################
  576. # Third reduction step
  577. mov $acc2, $t1
  578. shl \$32, $acc2
  579. mulq $poly3
  580. shr \$32, $t1
  581. add $acc2, $acc3
  582. adc $t1, $acc4
  583. adc %rax, $acc5
  584. mov 8*3($b_ptr), %rax
  585. adc %rdx, $acc0
  586. adc \$0, $acc1
  587. xor $acc2, $acc2
  588. ########################################################################
  589. # Multiply by b[3]
  590. mov %rax, $t1
  591. mulq 8*0($a_ptr)
  592. add %rax, $acc3
  593. mov $t1, %rax
  594. adc \$0, %rdx
  595. mov %rdx, $t0
  596. mulq 8*1($a_ptr)
  597. add $t0, $acc4
  598. adc \$0, %rdx
  599. add %rax, $acc4
  600. mov $t1, %rax
  601. adc \$0, %rdx
  602. mov %rdx, $t0
  603. mulq 8*2($a_ptr)
  604. add $t0, $acc5
  605. adc \$0, %rdx
  606. add %rax, $acc5
  607. mov $t1, %rax
  608. adc \$0, %rdx
  609. mov %rdx, $t0
  610. mulq 8*3($a_ptr)
  611. add $t0, $acc0
  612. adc \$0, %rdx
  613. add %rax, $acc0
  614. mov $acc3, %rax
  615. adc %rdx, $acc1
  616. adc \$0, $acc2
  617. ########################################################################
  618. # Final reduction step
  619. mov $acc3, $t1
  620. shl \$32, $acc3
  621. mulq $poly3
  622. shr \$32, $t1
  623. add $acc3, $acc4
  624. adc $t1, $acc5
  625. mov $acc4, $t0
  626. adc %rax, $acc0
  627. adc %rdx, $acc1
  628. mov $acc5, $t1
  629. adc \$0, $acc2
  630. ########################################################################
  631. # Branch-less conditional subtraction of P
  632. sub \$-1, $acc4 # .Lpoly[0]
  633. mov $acc0, $t2
  634. sbb $poly1, $acc5 # .Lpoly[1]
  635. sbb \$0, $acc0 # .Lpoly[2]
  636. mov $acc1, $t3
  637. sbb $poly3, $acc1 # .Lpoly[3]
  638. sbb \$0, $acc2
  639. cmovc $t0, $acc4
  640. cmovc $t1, $acc5
  641. mov $acc4, 8*0($r_ptr)
  642. cmovc $t2, $acc0
  643. mov $acc5, 8*1($r_ptr)
  644. cmovc $t3, $acc1
  645. mov $acc0, 8*2($r_ptr)
  646. mov $acc1, 8*3($r_ptr)
  647. ret
  648. .size __ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq
  649. ################################################################################
  650. # void ecp_nistz256_sqr_mont(
  651. # uint64_t res[4],
  652. # uint64_t a[4]);
  653. # we optimize the square according to S.Gueron and V.Krasnov,
  654. # "Speeding up Big-Number Squaring"
  655. .globl ecp_nistz256_sqr_mont
  656. .type ecp_nistz256_sqr_mont,\@function,2
  657. .align 32
  658. ecp_nistz256_sqr_mont:
  659. ___
  660. $code.=<<___ if ($addx);
  661. mov \$0x80100, %ecx
  662. and OPENSSL_ia32cap_P+8(%rip), %ecx
  663. ___
  664. $code.=<<___;
  665. push %rbp
  666. push %rbx
  667. push %r12
  668. push %r13
  669. push %r14
  670. push %r15
  671. ___
  672. $code.=<<___ if ($addx);
  673. cmp \$0x80100, %ecx
  674. je .Lsqr_montx
  675. ___
  676. $code.=<<___;
  677. mov 8*0($a_ptr), %rax
  678. mov 8*1($a_ptr), $acc6
  679. mov 8*2($a_ptr), $acc7
  680. mov 8*3($a_ptr), $acc0
  681. call __ecp_nistz256_sqr_montq
  682. ___
  683. $code.=<<___ if ($addx);
  684. jmp .Lsqr_mont_done
  685. .align 32
  686. .Lsqr_montx:
  687. mov 8*0($a_ptr), %rdx
  688. mov 8*1($a_ptr), $acc6
  689. mov 8*2($a_ptr), $acc7
  690. mov 8*3($a_ptr), $acc0
  691. lea -128($a_ptr), $a_ptr # control u-op density
  692. call __ecp_nistz256_sqr_montx
  693. ___
  694. $code.=<<___;
  695. .Lsqr_mont_done:
  696. pop %r15
  697. pop %r14
  698. pop %r13
  699. pop %r12
  700. pop %rbx
  701. pop %rbp
  702. ret
  703. .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
  704. .type __ecp_nistz256_sqr_montq,\@abi-omnipotent
  705. .align 32
  706. __ecp_nistz256_sqr_montq:
  707. mov %rax, $acc5
  708. mulq $acc6 # a[1]*a[0]
  709. mov %rax, $acc1
  710. mov $acc7, %rax
  711. mov %rdx, $acc2
  712. mulq $acc5 # a[0]*a[2]
  713. add %rax, $acc2
  714. mov $acc0, %rax
  715. adc \$0, %rdx
  716. mov %rdx, $acc3
  717. mulq $acc5 # a[0]*a[3]
  718. add %rax, $acc3
  719. mov $acc7, %rax
  720. adc \$0, %rdx
  721. mov %rdx, $acc4
  722. #################################
  723. mulq $acc6 # a[1]*a[2]
  724. add %rax, $acc3
  725. mov $acc0, %rax
  726. adc \$0, %rdx
  727. mov %rdx, $t1
  728. mulq $acc6 # a[1]*a[3]
  729. add %rax, $acc4
  730. mov $acc0, %rax
  731. adc \$0, %rdx
  732. add $t1, $acc4
  733. mov %rdx, $acc5
  734. adc \$0, $acc5
  735. #################################
  736. mulq $acc7 # a[2]*a[3]
  737. xor $acc7, $acc7
  738. add %rax, $acc5
  739. mov 8*0($a_ptr), %rax
  740. mov %rdx, $acc6
  741. adc \$0, $acc6
  742. add $acc1, $acc1 # acc1:6<<1
  743. adc $acc2, $acc2
  744. adc $acc3, $acc3
  745. adc $acc4, $acc4
  746. adc $acc5, $acc5
  747. adc $acc6, $acc6
  748. adc \$0, $acc7
  749. mulq %rax
  750. mov %rax, $acc0
  751. mov 8*1($a_ptr), %rax
  752. mov %rdx, $t0
  753. mulq %rax
  754. add $t0, $acc1
  755. adc %rax, $acc2
  756. mov 8*2($a_ptr), %rax
  757. adc \$0, %rdx
  758. mov %rdx, $t0
  759. mulq %rax
  760. add $t0, $acc3
  761. adc %rax, $acc4
  762. mov 8*3($a_ptr), %rax
  763. adc \$0, %rdx
  764. mov %rdx, $t0
  765. mulq %rax
  766. add $t0, $acc5
  767. adc %rax, $acc6
  768. mov $acc0, %rax
  769. adc %rdx, $acc7
  770. mov .Lpoly+8*1(%rip), $a_ptr
  771. mov .Lpoly+8*3(%rip), $t1
  772. ##########################################
  773. # Now the reduction
  774. # First iteration
  775. mov $acc0, $t0
  776. shl \$32, $acc0
  777. mulq $t1
  778. shr \$32, $t0
  779. add $acc0, $acc1 # +=acc[0]<<96
  780. adc $t0, $acc2
  781. adc %rax, $acc3
  782. mov $acc1, %rax
  783. adc \$0, %rdx
  784. ##########################################
  785. # Second iteration
  786. mov $acc1, $t0
  787. shl \$32, $acc1
  788. mov %rdx, $acc0
  789. mulq $t1
  790. shr \$32, $t0
  791. add $acc1, $acc2
  792. adc $t0, $acc3
  793. adc %rax, $acc0
  794. mov $acc2, %rax
  795. adc \$0, %rdx
  796. ##########################################
  797. # Third iteration
  798. mov $acc2, $t0
  799. shl \$32, $acc2
  800. mov %rdx, $acc1
  801. mulq $t1
  802. shr \$32, $t0
  803. add $acc2, $acc3
  804. adc $t0, $acc0
  805. adc %rax, $acc1
  806. mov $acc3, %rax
  807. adc \$0, %rdx
  808. ###########################################
  809. # Last iteration
  810. mov $acc3, $t0
  811. shl \$32, $acc3
  812. mov %rdx, $acc2
  813. mulq $t1
  814. shr \$32, $t0
  815. add $acc3, $acc0
  816. adc $t0, $acc1
  817. adc %rax, $acc2
  818. adc \$0, %rdx
  819. xor $acc3, $acc3
  820. ############################################
  821. # Add the rest of the acc
  822. add $acc0, $acc4
  823. adc $acc1, $acc5
  824. mov $acc4, $acc0
  825. adc $acc2, $acc6
  826. adc %rdx, $acc7
  827. mov $acc5, $acc1
  828. adc \$0, $acc3
  829. sub \$-1, $acc4 # .Lpoly[0]
  830. mov $acc6, $acc2
  831. sbb $a_ptr, $acc5 # .Lpoly[1]
  832. sbb \$0, $acc6 # .Lpoly[2]
  833. mov $acc7, $t0
  834. sbb $t1, $acc7 # .Lpoly[3]
  835. sbb \$0, $acc3
  836. cmovc $acc0, $acc4
  837. cmovc $acc1, $acc5
  838. mov $acc4, 8*0($r_ptr)
  839. cmovc $acc2, $acc6
  840. mov $acc5, 8*1($r_ptr)
  841. cmovc $t0, $acc7
  842. mov $acc6, 8*2($r_ptr)
  843. mov $acc7, 8*3($r_ptr)
  844. ret
  845. .size __ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq
  846. ___
  847. if ($addx) {
  848. $code.=<<___;
  849. .type __ecp_nistz256_mul_montx,\@abi-omnipotent
  850. .align 32
  851. __ecp_nistz256_mul_montx:
  852. ########################################################################
  853. # Multiply by b[0]
  854. mulx $acc1, $acc0, $acc1
  855. mulx $acc2, $t0, $acc2
  856. mov \$32, $poly1
  857. xor $acc5, $acc5 # cf=0
  858. mulx $acc3, $t1, $acc3
  859. mov .Lpoly+8*3(%rip), $poly3
  860. adc $t0, $acc1
  861. mulx $acc4, $t0, $acc4
  862. mov $acc0, %rdx
  863. adc $t1, $acc2
  864. shlx $poly1,$acc0,$t1
  865. adc $t0, $acc3
  866. shrx $poly1,$acc0,$t0
  867. adc \$0, $acc4
  868. ########################################################################
  869. # First reduction step
  870. add $t1, $acc1
  871. adc $t0, $acc2
  872. mulx $poly3, $t0, $t1
  873. mov 8*1($b_ptr), %rdx
  874. adc $t0, $acc3
  875. adc $t1, $acc4
  876. adc \$0, $acc5
  877. xor $acc0, $acc0 # $acc0=0,cf=0,of=0
  878. ########################################################################
  879. # Multiply by b[1]
  880. mulx 8*0+128($a_ptr), $t0, $t1
  881. adcx $t0, $acc1
  882. adox $t1, $acc2
  883. mulx 8*1+128($a_ptr), $t0, $t1
  884. adcx $t0, $acc2
  885. adox $t1, $acc3
  886. mulx 8*2+128($a_ptr), $t0, $t1
  887. adcx $t0, $acc3
  888. adox $t1, $acc4
  889. mulx 8*3+128($a_ptr), $t0, $t1
  890. mov $acc1, %rdx
  891. adcx $t0, $acc4
  892. shlx $poly1, $acc1, $t0
  893. adox $t1, $acc5
  894. shrx $poly1, $acc1, $t1
  895. adcx $acc0, $acc5
  896. adox $acc0, $acc0
  897. adc \$0, $acc0
  898. ########################################################################
  899. # Second reduction step
  900. add $t0, $acc2
  901. adc $t1, $acc3
  902. mulx $poly3, $t0, $t1
  903. mov 8*2($b_ptr), %rdx
  904. adc $t0, $acc4
  905. adc $t1, $acc5
  906. adc \$0, $acc0
  907. xor $acc1 ,$acc1 # $acc1=0,cf=0,of=0
  908. ########################################################################
  909. # Multiply by b[2]
  910. mulx 8*0+128($a_ptr), $t0, $t1
  911. adcx $t0, $acc2
  912. adox $t1, $acc3
  913. mulx 8*1+128($a_ptr), $t0, $t1
  914. adcx $t0, $acc3
  915. adox $t1, $acc4
  916. mulx 8*2+128($a_ptr), $t0, $t1
  917. adcx $t0, $acc4
  918. adox $t1, $acc5
  919. mulx 8*3+128($a_ptr), $t0, $t1
  920. mov $acc2, %rdx
  921. adcx $t0, $acc5
  922. shlx $poly1, $acc2, $t0
  923. adox $t1, $acc0
  924. shrx $poly1, $acc2, $t1
  925. adcx $acc1, $acc0
  926. adox $acc1, $acc1
  927. adc \$0, $acc1
  928. ########################################################################
  929. # Third reduction step
  930. add $t0, $acc3
  931. adc $t1, $acc4
  932. mulx $poly3, $t0, $t1
  933. mov 8*3($b_ptr), %rdx
  934. adc $t0, $acc5
  935. adc $t1, $acc0
  936. adc \$0, $acc1
  937. xor $acc2, $acc2 # $acc2=0,cf=0,of=0
  938. ########################################################################
  939. # Multiply by b[3]
  940. mulx 8*0+128($a_ptr), $t0, $t1
  941. adcx $t0, $acc3
  942. adox $t1, $acc4
  943. mulx 8*1+128($a_ptr), $t0, $t1
  944. adcx $t0, $acc4
  945. adox $t1, $acc5
  946. mulx 8*2+128($a_ptr), $t0, $t1
  947. adcx $t0, $acc5
  948. adox $t1, $acc0
  949. mulx 8*3+128($a_ptr), $t0, $t1
  950. mov $acc3, %rdx
  951. adcx $t0, $acc0
  952. shlx $poly1, $acc3, $t0
  953. adox $t1, $acc1
  954. shrx $poly1, $acc3, $t1
  955. adcx $acc2, $acc1
  956. adox $acc2, $acc2
  957. adc \$0, $acc2
  958. ########################################################################
  959. # Fourth reduction step
  960. add $t0, $acc4
  961. adc $t1, $acc5
  962. mulx $poly3, $t0, $t1
  963. mov $acc4, $t2
  964. mov .Lpoly+8*1(%rip), $poly1
  965. adc $t0, $acc0
  966. mov $acc5, $t3
  967. adc $t1, $acc1
  968. adc \$0, $acc2
  969. ########################################################################
  970. # Branch-less conditional subtraction of P
  971. xor %eax, %eax
  972. mov $acc0, $t0
  973. sbb \$-1, $acc4 # .Lpoly[0]
  974. sbb $poly1, $acc5 # .Lpoly[1]
  975. sbb \$0, $acc0 # .Lpoly[2]
  976. mov $acc1, $t1
  977. sbb $poly3, $acc1 # .Lpoly[3]
  978. sbb \$0, $acc2
  979. cmovc $t2, $acc4
  980. cmovc $t3, $acc5
  981. mov $acc4, 8*0($r_ptr)
  982. cmovc $t0, $acc0
  983. mov $acc5, 8*1($r_ptr)
  984. cmovc $t1, $acc1
  985. mov $acc0, 8*2($r_ptr)
  986. mov $acc1, 8*3($r_ptr)
  987. ret
  988. .size __ecp_nistz256_mul_montx,.-__ecp_nistz256_mul_montx
  989. .type __ecp_nistz256_sqr_montx,\@abi-omnipotent
  990. .align 32
  991. __ecp_nistz256_sqr_montx:
  992. mulx $acc6, $acc1, $acc2 # a[0]*a[1]
  993. mulx $acc7, $t0, $acc3 # a[0]*a[2]
  994. xor %eax, %eax
  995. adc $t0, $acc2
  996. mulx $acc0, $t1, $acc4 # a[0]*a[3]
  997. mov $acc6, %rdx
  998. adc $t1, $acc3
  999. adc \$0, $acc4
  1000. xor $acc5, $acc5 # $acc5=0,cf=0,of=0
  1001. #################################
  1002. mulx $acc7, $t0, $t1 # a[1]*a[2]
  1003. adcx $t0, $acc3
  1004. adox $t1, $acc4
  1005. mulx $acc0, $t0, $t1 # a[1]*a[3]
  1006. mov $acc7, %rdx
  1007. adcx $t0, $acc4
  1008. adox $t1, $acc5
  1009. adc \$0, $acc5
  1010. #################################
  1011. mulx $acc0, $t0, $acc6 # a[2]*a[3]
  1012. mov 8*0+128($a_ptr), %rdx
  1013. xor $acc7, $acc7 # $acc7=0,cf=0,of=0
  1014. adcx $acc1, $acc1 # acc1:6<<1
  1015. adox $t0, $acc5
  1016. adcx $acc2, $acc2
  1017. adox $acc7, $acc6 # of=0
  1018. mulx %rdx, $acc0, $t1
  1019. mov 8*1+128($a_ptr), %rdx
  1020. adcx $acc3, $acc3
  1021. adox $t1, $acc1
  1022. adcx $acc4, $acc4
  1023. mulx %rdx, $t0, $t4
  1024. mov 8*2+128($a_ptr), %rdx
  1025. adcx $acc5, $acc5
  1026. adox $t0, $acc2
  1027. adcx $acc6, $acc6
  1028. .byte 0x67
  1029. mulx %rdx, $t0, $t1
  1030. mov 8*3+128($a_ptr), %rdx
  1031. adox $t4, $acc3
  1032. adcx $acc7, $acc7
  1033. adox $t0, $acc4
  1034. mov \$32, $a_ptr
  1035. adox $t1, $acc5
  1036. .byte 0x67,0x67
  1037. mulx %rdx, $t0, $t4
  1038. mov $acc0, %rdx
  1039. adox $t0, $acc6
  1040. shlx $a_ptr, $acc0, $t0
  1041. adox $t4, $acc7
  1042. shrx $a_ptr, $acc0, $t4
  1043. mov .Lpoly+8*3(%rip), $t1
  1044. # reduction step 1
  1045. add $t0, $acc1
  1046. adc $t4, $acc2
  1047. mulx $t1, $t0, $acc0
  1048. mov $acc1, %rdx
  1049. adc $t0, $acc3
  1050. shlx $a_ptr, $acc1, $t0
  1051. adc \$0, $acc0
  1052. shrx $a_ptr, $acc1, $t4
  1053. # reduction step 2
  1054. add $t0, $acc2
  1055. adc $t4, $acc3
  1056. mulx $t1, $t0, $acc1
  1057. mov $acc2, %rdx
  1058. adc $t0, $acc0
  1059. shlx $a_ptr, $acc2, $t0
  1060. adc \$0, $acc1
  1061. shrx $a_ptr, $acc2, $t4
  1062. # reduction step 3
  1063. add $t0, $acc3
  1064. adc $t4, $acc0
  1065. mulx $t1, $t0, $acc2
  1066. mov $acc3, %rdx
  1067. adc $t0, $acc1
  1068. shlx $a_ptr, $acc3, $t0
  1069. adc \$0, $acc2
  1070. shrx $a_ptr, $acc3, $t4
  1071. # reduction step 4
  1072. add $t0, $acc0
  1073. adc $t4, $acc1
  1074. mulx $t1, $t0, $acc3
  1075. adc $t0, $acc2
  1076. adc \$0, $acc3
  1077. xor $t3, $t3 # cf=0
  1078. adc $acc0, $acc4 # accumulate upper half
  1079. mov .Lpoly+8*1(%rip), $a_ptr
  1080. adc $acc1, $acc5
  1081. mov $acc4, $acc0
  1082. adc $acc2, $acc6
  1083. adc $acc3, $acc7
  1084. mov $acc5, $acc1
  1085. adc \$0, $t3
  1086. xor %eax, %eax # cf=0
  1087. sbb \$-1, $acc4 # .Lpoly[0]
  1088. mov $acc6, $acc2
  1089. sbb $a_ptr, $acc5 # .Lpoly[1]
  1090. sbb \$0, $acc6 # .Lpoly[2]
  1091. mov $acc7, $acc3
  1092. sbb $t1, $acc7 # .Lpoly[3]
  1093. sbb \$0, $t3
  1094. cmovc $acc0, $acc4
  1095. cmovc $acc1, $acc5
  1096. mov $acc4, 8*0($r_ptr)
  1097. cmovc $acc2, $acc6
  1098. mov $acc5, 8*1($r_ptr)
  1099. cmovc $acc3, $acc7
  1100. mov $acc6, 8*2($r_ptr)
  1101. mov $acc7, 8*3($r_ptr)
  1102. ret
  1103. .size __ecp_nistz256_sqr_montx,.-__ecp_nistz256_sqr_montx
  1104. ___
  1105. }
  1106. }
  1107. {
  1108. my ($r_ptr,$in_ptr)=("%rdi","%rsi");
  1109. my ($acc0,$acc1,$acc2,$acc3)=map("%r$_",(8..11));
  1110. my ($t0,$t1,$t2)=("%rcx","%r12","%r13");
  1111. $code.=<<___;
  1112. ################################################################################
  1113. # void ecp_nistz256_from_mont(
  1114. # uint64_t res[4],
  1115. # uint64_t in[4]);
  1116. # This one performs Montgomery multiplication by 1, so we only need the reduction
  1117. .globl ecp_nistz256_from_mont
  1118. .type ecp_nistz256_from_mont,\@function,2
  1119. .align 32
  1120. ecp_nistz256_from_mont:
  1121. push %r12
  1122. push %r13
  1123. mov 8*0($in_ptr), %rax
  1124. mov .Lpoly+8*3(%rip), $t2
  1125. mov 8*1($in_ptr), $acc1
  1126. mov 8*2($in_ptr), $acc2
  1127. mov 8*3($in_ptr), $acc3
  1128. mov %rax, $acc0
  1129. mov .Lpoly+8*1(%rip), $t1
  1130. #########################################
  1131. # First iteration
  1132. mov %rax, $t0
  1133. shl \$32, $acc0
  1134. mulq $t2
  1135. shr \$32, $t0
  1136. add $acc0, $acc1
  1137. adc $t0, $acc2
  1138. adc %rax, $acc3
  1139. mov $acc1, %rax
  1140. adc \$0, %rdx
  1141. #########################################
  1142. # Second iteration
  1143. mov $acc1, $t0
  1144. shl \$32, $acc1
  1145. mov %rdx, $acc0
  1146. mulq $t2
  1147. shr \$32, $t0
  1148. add $acc1, $acc2
  1149. adc $t0, $acc3
  1150. adc %rax, $acc0
  1151. mov $acc2, %rax
  1152. adc \$0, %rdx
  1153. ##########################################
  1154. # Third iteration
  1155. mov $acc2, $t0
  1156. shl \$32, $acc2
  1157. mov %rdx, $acc1
  1158. mulq $t2
  1159. shr \$32, $t0
  1160. add $acc2, $acc3
  1161. adc $t0, $acc0
  1162. adc %rax, $acc1
  1163. mov $acc3, %rax
  1164. adc \$0, %rdx
  1165. ###########################################
  1166. # Last iteration
  1167. mov $acc3, $t0
  1168. shl \$32, $acc3
  1169. mov %rdx, $acc2
  1170. mulq $t2
  1171. shr \$32, $t0
  1172. add $acc3, $acc0
  1173. adc $t0, $acc1
  1174. mov $acc0, $t0
  1175. adc %rax, $acc2
  1176. mov $acc1, $in_ptr
  1177. adc \$0, %rdx
  1178. ###########################################
  1179. # Branch-less conditional subtraction
  1180. sub \$-1, $acc0
  1181. mov $acc2, %rax
  1182. sbb $t1, $acc1
  1183. sbb \$0, $acc2
  1184. mov %rdx, $acc3
  1185. sbb $t2, %rdx
  1186. sbb $t2, $t2
  1187. cmovnz $t0, $acc0
  1188. cmovnz $in_ptr, $acc1
  1189. mov $acc0, 8*0($r_ptr)
  1190. cmovnz %rax, $acc2
  1191. mov $acc1, 8*1($r_ptr)
  1192. cmovz %rdx, $acc3
  1193. mov $acc2, 8*2($r_ptr)
  1194. mov $acc3, 8*3($r_ptr)
  1195. pop %r13
  1196. pop %r12
  1197. ret
  1198. .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
  1199. ___
  1200. }
  1201. {
  1202. my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
  1203. my ($ONE,$INDEX,$Ra,$Rb,$Rc,$Rd,$Re,$Rf)=map("%xmm$_",(0..7));
  1204. my ($M0,$T0a,$T0b,$T0c,$T0d,$T0e,$T0f,$TMP0)=map("%xmm$_",(8..15));
  1205. my ($M1,$T2a,$T2b,$TMP2,$M2,$T2a,$T2b,$TMP2)=map("%xmm$_",(8..15));
  1206. $code.=<<___;
  1207. ################################################################################
  1208. # void ecp_nistz256_scatter_w5(uint64_t *val, uint64_t *in_t, int index);
  1209. .globl ecp_nistz256_scatter_w5
  1210. .type ecp_nistz256_scatter_w5,\@abi-omnipotent
  1211. .align 32
  1212. ecp_nistz256_scatter_w5:
  1213. lea -3($index,$index,2), $index
  1214. movdqa 0x00($in_t), %xmm0
  1215. shl \$5, $index
  1216. movdqa 0x10($in_t), %xmm1
  1217. movdqa 0x20($in_t), %xmm2
  1218. movdqa 0x30($in_t), %xmm3
  1219. movdqa 0x40($in_t), %xmm4
  1220. movdqa 0x50($in_t), %xmm5
  1221. movdqa %xmm0, 0x00($val,$index)
  1222. movdqa %xmm1, 0x10($val,$index)
  1223. movdqa %xmm2, 0x20($val,$index)
  1224. movdqa %xmm3, 0x30($val,$index)
  1225. movdqa %xmm4, 0x40($val,$index)
  1226. movdqa %xmm5, 0x50($val,$index)
  1227. ret
  1228. .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
  1229. ################################################################################
  1230. # void ecp_nistz256_gather_w5(uint64_t *val, uint64_t *in_t, int index);
  1231. .globl ecp_nistz256_gather_w5
  1232. .type ecp_nistz256_gather_w5,\@abi-omnipotent
  1233. .align 32
  1234. ecp_nistz256_gather_w5:
  1235. ___
  1236. $code.=<<___ if ($avx>1);
  1237. mov OPENSSL_ia32cap_P+8(%rip), %eax
  1238. test \$`1<<5`, %eax
  1239. jnz .Lavx2_gather_w5
  1240. ___
  1241. $code.=<<___ if ($win64);
  1242. lea -0x88(%rsp), %rax
  1243. .LSEH_begin_ecp_nistz256_gather_w5:
  1244. .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
  1245. .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6, -0x20(%rax)
  1246. .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7, -0x10(%rax)
  1247. .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8, 0(%rax)
  1248. .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9, 0x10(%rax)
  1249. .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10, 0x20(%rax)
  1250. .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11, 0x30(%rax)
  1251. .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12, 0x40(%rax)
  1252. .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13, 0x50(%rax)
  1253. .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14, 0x60(%rax)
  1254. .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15, 0x70(%rax)
  1255. ___
  1256. $code.=<<___;
  1257. movdqa .LOne(%rip), $ONE
  1258. movd $index, $INDEX
  1259. pxor $Ra, $Ra
  1260. pxor $Rb, $Rb
  1261. pxor $Rc, $Rc
  1262. pxor $Rd, $Rd
  1263. pxor $Re, $Re
  1264. pxor $Rf, $Rf
  1265. movdqa $ONE, $M0
  1266. pshufd \$0, $INDEX, $INDEX
  1267. mov \$16, %rax
  1268. .Lselect_loop_sse_w5:
  1269. movdqa $M0, $TMP0
  1270. paddd $ONE, $M0
  1271. pcmpeqd $INDEX, $TMP0
  1272. movdqa 16*0($in_t), $T0a
  1273. movdqa 16*1($in_t), $T0b
  1274. movdqa 16*2($in_t), $T0c
  1275. movdqa 16*3($in_t), $T0d
  1276. movdqa 16*4($in_t), $T0e
  1277. movdqa 16*5($in_t), $T0f
  1278. lea 16*6($in_t), $in_t
  1279. pand $TMP0, $T0a
  1280. pand $TMP0, $T0b
  1281. por $T0a, $Ra
  1282. pand $TMP0, $T0c
  1283. por $T0b, $Rb
  1284. pand $TMP0, $T0d
  1285. por $T0c, $Rc
  1286. pand $TMP0, $T0e
  1287. por $T0d, $Rd
  1288. pand $TMP0, $T0f
  1289. por $T0e, $Re
  1290. por $T0f, $Rf
  1291. dec %rax
  1292. jnz .Lselect_loop_sse_w5
  1293. movdqu $Ra, 16*0($val)
  1294. movdqu $Rb, 16*1($val)
  1295. movdqu $Rc, 16*2($val)
  1296. movdqu $Rd, 16*3($val)
  1297. movdqu $Re, 16*4($val)
  1298. movdqu $Rf, 16*5($val)
  1299. ___
  1300. $code.=<<___ if ($win64);
  1301. movaps (%rsp), %xmm6
  1302. movaps 0x10(%rsp), %xmm7
  1303. movaps 0x20(%rsp), %xmm8
  1304. movaps 0x30(%rsp), %xmm9
  1305. movaps 0x40(%rsp), %xmm10
  1306. movaps 0x50(%rsp), %xmm11
  1307. movaps 0x60(%rsp), %xmm12
  1308. movaps 0x70(%rsp), %xmm13
  1309. movaps 0x80(%rsp), %xmm14
  1310. movaps 0x90(%rsp), %xmm15
  1311. lea 0xa8(%rsp), %rsp
  1312. .LSEH_end_ecp_nistz256_gather_w5:
  1313. ___
  1314. $code.=<<___;
  1315. ret
  1316. .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
  1317. ################################################################################
  1318. # void ecp_nistz256_scatter_w7(uint64_t *val, uint64_t *in_t, int index);
  1319. .globl ecp_nistz256_scatter_w7
  1320. .type ecp_nistz256_scatter_w7,\@abi-omnipotent
  1321. .align 32
  1322. ecp_nistz256_scatter_w7:
  1323. movdqu 0x00($in_t), %xmm0
  1324. shl \$6, $index
  1325. movdqu 0x10($in_t), %xmm1
  1326. movdqu 0x20($in_t), %xmm2
  1327. movdqu 0x30($in_t), %xmm3
  1328. movdqa %xmm0, 0x00($val,$index)
  1329. movdqa %xmm1, 0x10($val,$index)
  1330. movdqa %xmm2, 0x20($val,$index)
  1331. movdqa %xmm3, 0x30($val,$index)
  1332. ret
  1333. .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
  1334. ################################################################################
  1335. # void ecp_nistz256_gather_w7(uint64_t *val, uint64_t *in_t, int index);
  1336. .globl ecp_nistz256_gather_w7
  1337. .type ecp_nistz256_gather_w7,\@abi-omnipotent
  1338. .align 32
  1339. ecp_nistz256_gather_w7:
  1340. ___
  1341. $code.=<<___ if ($avx>1);
  1342. mov OPENSSL_ia32cap_P+8(%rip), %eax
  1343. test \$`1<<5`, %eax
  1344. jnz .Lavx2_gather_w7
  1345. ___
  1346. $code.=<<___ if ($win64);
  1347. lea -0x88(%rsp), %rax
  1348. .LSEH_begin_ecp_nistz256_gather_w7:
  1349. .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
  1350. .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6, -0x20(%rax)
  1351. .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7, -0x10(%rax)
  1352. .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8, 0(%rax)
  1353. .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9, 0x10(%rax)
  1354. .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10, 0x20(%rax)
  1355. .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11, 0x30(%rax)
  1356. .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12, 0x40(%rax)
  1357. .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13, 0x50(%rax)
  1358. .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14, 0x60(%rax)
  1359. .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15, 0x70(%rax)
  1360. ___
  1361. $code.=<<___;
  1362. movdqa .LOne(%rip), $M0
  1363. movd $index, $INDEX
  1364. pxor $Ra, $Ra
  1365. pxor $Rb, $Rb
  1366. pxor $Rc, $Rc
  1367. pxor $Rd, $Rd
  1368. movdqa $M0, $ONE
  1369. pshufd \$0, $INDEX, $INDEX
  1370. mov \$64, %rax
  1371. .Lselect_loop_sse_w7:
  1372. movdqa $M0, $TMP0
  1373. paddd $ONE, $M0
  1374. movdqa 16*0($in_t), $T0a
  1375. movdqa 16*1($in_t), $T0b
  1376. pcmpeqd $INDEX, $TMP0
  1377. movdqa 16*2($in_t), $T0c
  1378. movdqa 16*3($in_t), $T0d
  1379. lea 16*4($in_t), $in_t
  1380. pand $TMP0, $T0a
  1381. pand $TMP0, $T0b
  1382. por $T0a, $Ra
  1383. pand $TMP0, $T0c
  1384. por $T0b, $Rb
  1385. pand $TMP0, $T0d
  1386. por $T0c, $Rc
  1387. prefetcht0 255($in_t)
  1388. por $T0d, $Rd
  1389. dec %rax
  1390. jnz .Lselect_loop_sse_w7
  1391. movdqu $Ra, 16*0($val)
  1392. movdqu $Rb, 16*1($val)
  1393. movdqu $Rc, 16*2($val)
  1394. movdqu $Rd, 16*3($val)
  1395. ___
  1396. $code.=<<___ if ($win64);
  1397. movaps (%rsp), %xmm6
  1398. movaps 0x10(%rsp), %xmm7
  1399. movaps 0x20(%rsp), %xmm8
  1400. movaps 0x30(%rsp), %xmm9
  1401. movaps 0x40(%rsp), %xmm10
  1402. movaps 0x50(%rsp), %xmm11
  1403. movaps 0x60(%rsp), %xmm12
  1404. movaps 0x70(%rsp), %xmm13
  1405. movaps 0x80(%rsp), %xmm14
  1406. movaps 0x90(%rsp), %xmm15
  1407. lea 0xa8(%rsp), %rsp
  1408. .LSEH_end_ecp_nistz256_gather_w7:
  1409. ___
  1410. $code.=<<___;
  1411. ret
  1412. .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
  1413. ___
  1414. }
  1415. if ($avx>1) {
  1416. my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
  1417. my ($TWO,$INDEX,$Ra,$Rb,$Rc)=map("%ymm$_",(0..4));
  1418. my ($M0,$T0a,$T0b,$T0c,$TMP0)=map("%ymm$_",(5..9));
  1419. my ($M1,$T1a,$T1b,$T1c,$TMP1)=map("%ymm$_",(10..14));
  1420. $code.=<<___;
  1421. ################################################################################
  1422. # void ecp_nistz256_avx2_gather_w5(uint64_t *val, uint64_t *in_t, int index);
  1423. .type ecp_nistz256_avx2_gather_w5,\@abi-omnipotent
  1424. .align 32
  1425. ecp_nistz256_avx2_gather_w5:
  1426. .Lavx2_gather_w5:
  1427. vzeroupper
  1428. ___
  1429. $code.=<<___ if ($win64);
  1430. lea -0x88(%rsp), %rax
  1431. .LSEH_begin_ecp_nistz256_avx2_gather_w5:
  1432. .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
  1433. .byte 0xc5,0xf8,0x29,0x70,0xe0 #vmovaps %xmm6, -0x20(%rax)
  1434. .byte 0xc5,0xf8,0x29,0x78,0xf0 #vmovaps %xmm7, -0x10(%rax)
  1435. .byte 0xc5,0x78,0x29,0x40,0x00 #vmovaps %xmm8, 8(%rax)
  1436. .byte 0xc5,0x78,0x29,0x48,0x10 #vmovaps %xmm9, 0x10(%rax)
  1437. .byte 0xc5,0x78,0x29,0x50,0x20 #vmovaps %xmm10, 0x20(%rax)
  1438. .byte 0xc5,0x78,0x29,0x58,0x30 #vmovaps %xmm11, 0x30(%rax)
  1439. .byte 0xc5,0x78,0x29,0x60,0x40 #vmovaps %xmm12, 0x40(%rax)
  1440. .byte 0xc5,0x78,0x29,0x68,0x50 #vmovaps %xmm13, 0x50(%rax)
  1441. .byte 0xc5,0x78,0x29,0x70,0x60 #vmovaps %xmm14, 0x60(%rax)
  1442. .byte 0xc5,0x78,0x29,0x78,0x70 #vmovaps %xmm15, 0x70(%rax)
  1443. ___
  1444. $code.=<<___;
  1445. vmovdqa .LTwo(%rip), $TWO
  1446. vpxor $Ra, $Ra, $Ra
  1447. vpxor $Rb, $Rb, $Rb
  1448. vpxor $Rc, $Rc, $Rc
  1449. vmovdqa .LOne(%rip), $M0
  1450. vmovdqa .LTwo(%rip), $M1
  1451. vmovd $index, %xmm1
  1452. vpermd $INDEX, $Ra, $INDEX
  1453. mov \$8, %rax
  1454. .Lselect_loop_avx2_w5:
  1455. vmovdqa 32*0($in_t), $T0a
  1456. vmovdqa 32*1($in_t), $T0b
  1457. vmovdqa 32*2($in_t), $T0c
  1458. vmovdqa 32*3($in_t), $T1a
  1459. vmovdqa 32*4($in_t), $T1b
  1460. vmovdqa 32*5($in_t), $T1c
  1461. vpcmpeqd $INDEX, $M0, $TMP0
  1462. vpcmpeqd $INDEX, $M1, $TMP1
  1463. vpaddd $TWO, $M0, $M0
  1464. vpaddd $TWO, $M1, $M1
  1465. lea 32*6($in_t), $in_t
  1466. vpand $TMP0, $T0a, $T0a
  1467. vpand $TMP0, $T0b, $T0b
  1468. vpand $TMP0, $T0c, $T0c
  1469. vpand $TMP1, $T1a, $T1a
  1470. vpand $TMP1, $T1b, $T1b
  1471. vpand $TMP1, $T1c, $T1c
  1472. vpxor $T0a, $Ra, $Ra
  1473. vpxor $T0b, $Rb, $Rb
  1474. vpxor $T0c, $Rc, $Rc
  1475. vpxor $T1a, $Ra, $Ra
  1476. vpxor $T1b, $Rb, $Rb
  1477. vpxor $T1c, $Rc, $Rc
  1478. dec %rax
  1479. jnz .Lselect_loop_avx2_w5
  1480. vmovdqu $Ra, 32*0($val)
  1481. vmovdqu $Rb, 32*1($val)
  1482. vmovdqu $Rc, 32*2($val)
  1483. vzeroupper
  1484. ___
  1485. $code.=<<___ if ($win64);
  1486. movaps (%rsp), %xmm6
  1487. movaps 0x10(%rsp), %xmm7
  1488. movaps 0x20(%rsp), %xmm8
  1489. movaps 0x30(%rsp), %xmm9
  1490. movaps 0x40(%rsp), %xmm10
  1491. movaps 0x50(%rsp), %xmm11
  1492. movaps 0x60(%rsp), %xmm12
  1493. movaps 0x70(%rsp), %xmm13
  1494. movaps 0x80(%rsp), %xmm14
  1495. movaps 0x90(%rsp), %xmm15
  1496. lea 0xa8(%rsp), %rsp
  1497. .LSEH_end_ecp_nistz256_avx2_gather_w5:
  1498. ___
  1499. $code.=<<___;
  1500. ret
  1501. .size ecp_nistz256_avx2_gather_w5,.-ecp_nistz256_avx2_gather_w5
  1502. ___
  1503. }
  1504. if ($avx>1) {
  1505. my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
  1506. my ($THREE,$INDEX,$Ra,$Rb)=map("%ymm$_",(0..3));
  1507. my ($M0,$T0a,$T0b,$TMP0)=map("%ymm$_",(4..7));
  1508. my ($M1,$T1a,$T1b,$TMP1)=map("%ymm$_",(8..11));
  1509. my ($M2,$T2a,$T2b,$TMP2)=map("%ymm$_",(12..15));
  1510. $code.=<<___;
  1511. ################################################################################
  1512. # void ecp_nistz256_avx2_gather_w7(uint64_t *val, uint64_t *in_t, int index);
  1513. .globl ecp_nistz256_avx2_gather_w7
  1514. .type ecp_nistz256_avx2_gather_w7,\@abi-omnipotent
  1515. .align 32
  1516. ecp_nistz256_avx2_gather_w7:
  1517. .Lavx2_gather_w7:
  1518. vzeroupper
  1519. ___
  1520. $code.=<<___ if ($win64);
  1521. lea -0x88(%rsp), %rax
  1522. .LSEH_begin_ecp_nistz256_avx2_gather_w7:
  1523. .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
  1524. .byte 0xc5,0xf8,0x29,0x70,0xe0 #vmovaps %xmm6, -0x20(%rax)
  1525. .byte 0xc5,0xf8,0x29,0x78,0xf0 #vmovaps %xmm7, -0x10(%rax)
  1526. .byte 0xc5,0x78,0x29,0x40,0x00 #vmovaps %xmm8, 8(%rax)
  1527. .byte 0xc5,0x78,0x29,0x48,0x10 #vmovaps %xmm9, 0x10(%rax)
  1528. .byte 0xc5,0x78,0x29,0x50,0x20 #vmovaps %xmm10, 0x20(%rax)
  1529. .byte 0xc5,0x78,0x29,0x58,0x30 #vmovaps %xmm11, 0x30(%rax)
  1530. .byte 0xc5,0x78,0x29,0x60,0x40 #vmovaps %xmm12, 0x40(%rax)
  1531. .byte 0xc5,0x78,0x29,0x68,0x50 #vmovaps %xmm13, 0x50(%rax)
  1532. .byte 0xc5,0x78,0x29,0x70,0x60 #vmovaps %xmm14, 0x60(%rax)
  1533. .byte 0xc5,0x78,0x29,0x78,0x70 #vmovaps %xmm15, 0x70(%rax)
  1534. ___
  1535. $code.=<<___;
  1536. vmovdqa .LThree(%rip), $THREE
  1537. vpxor $Ra, $Ra, $Ra
  1538. vpxor $Rb, $Rb, $Rb
  1539. vmovdqa .LOne(%rip), $M0
  1540. vmovdqa .LTwo(%rip), $M1
  1541. vmovdqa .LThree(%rip), $M2
  1542. vmovd $index, %xmm1
  1543. vpermd $INDEX, $Ra, $INDEX
  1544. # Skip index = 0, because it is implicitly the point at infinity
  1545. mov \$21, %rax
  1546. .Lselect_loop_avx2_w7:
  1547. vmovdqa 32*0($in_t), $T0a
  1548. vmovdqa 32*1($in_t), $T0b
  1549. vmovdqa 32*2($in_t), $T1a
  1550. vmovdqa 32*3($in_t), $T1b
  1551. vmovdqa 32*4($in_t), $T2a
  1552. vmovdqa 32*5($in_t), $T2b
  1553. vpcmpeqd $INDEX, $M0, $TMP0
  1554. vpcmpeqd $INDEX, $M1, $TMP1
  1555. vpcmpeqd $INDEX, $M2, $TMP2
  1556. vpaddd $THREE, $M0, $M0
  1557. vpaddd $THREE, $M1, $M1
  1558. vpaddd $THREE, $M2, $M2
  1559. lea 32*6($in_t), $in_t
  1560. vpand $TMP0, $T0a, $T0a
  1561. vpand $TMP0, $T0b, $T0b
  1562. vpand $TMP1, $T1a, $T1a
  1563. vpand $TMP1, $T1b, $T1b
  1564. vpand $TMP2, $T2a, $T2a
  1565. vpand $TMP2, $T2b, $T2b
  1566. vpxor $T0a, $Ra, $Ra
  1567. vpxor $T0b, $Rb, $Rb
  1568. vpxor $T1a, $Ra, $Ra
  1569. vpxor $T1b, $Rb, $Rb
  1570. vpxor $T2a, $Ra, $Ra
  1571. vpxor $T2b, $Rb, $Rb
  1572. dec %rax
  1573. jnz .Lselect_loop_avx2_w7
  1574. vmovdqa 32*0($in_t), $T0a
  1575. vmovdqa 32*1($in_t), $T0b
  1576. vpcmpeqd $INDEX, $M0, $TMP0
  1577. vpand $TMP0, $T0a, $T0a
  1578. vpand $TMP0, $T0b, $T0b
  1579. vpxor $T0a, $Ra, $Ra
  1580. vpxor $T0b, $Rb, $Rb
  1581. vmovdqu $Ra, 32*0($val)
  1582. vmovdqu $Rb, 32*1($val)
  1583. vzeroupper
  1584. ___
  1585. $code.=<<___ if ($win64);
  1586. movaps (%rsp), %xmm6
  1587. movaps 0x10(%rsp), %xmm7
  1588. movaps 0x20(%rsp), %xmm8
  1589. movaps 0x30(%rsp), %xmm9
  1590. movaps 0x40(%rsp), %xmm10
  1591. movaps 0x50(%rsp), %xmm11
  1592. movaps 0x60(%rsp), %xmm12
  1593. movaps 0x70(%rsp), %xmm13
  1594. movaps 0x80(%rsp), %xmm14
  1595. movaps 0x90(%rsp), %xmm15
  1596. lea 0xa8(%rsp), %rsp
  1597. .LSEH_end_ecp_nistz256_avx2_gather_w7:
  1598. ___
  1599. $code.=<<___;
  1600. ret
  1601. .size ecp_nistz256_avx2_gather_w7,.-ecp_nistz256_avx2_gather_w7
  1602. ___
  1603. } else {
  1604. $code.=<<___;
  1605. .globl ecp_nistz256_avx2_gather_w7
  1606. .type ecp_nistz256_avx2_gather_w7,\@function,3
  1607. .align 32
  1608. ecp_nistz256_avx2_gather_w7:
  1609. .byte 0x0f,0x0b # ud2
  1610. ret
  1611. .size ecp_nistz256_avx2_gather_w7,.-ecp_nistz256_avx2_gather_w7
  1612. ___
  1613. }
  1614. {{{
  1615. ########################################################################
  1616. # This block implements higher level point_double, point_add and
  1617. # point_add_affine. The key to performance in this case is to allow
  1618. # out-of-order execution logic to overlap computations from next step
  1619. # with tail processing from current step. By using tailored calling
  1620. # sequence we minimize inter-step overhead to give processor better
  1621. # shot at overlapping operations...
  1622. #
  1623. # You will notice that input data is copied to stack. Trouble is that
  1624. # there are no registers to spare for holding original pointers and
  1625. # reloading them, pointers, would create undesired dependencies on
  1626. # effective addresses calculation paths. In other words it's too done
  1627. # to favour out-of-order execution logic.
  1628. # <appro@openssl.org>
  1629. my ($r_ptr,$a_ptr,$b_org,$b_ptr)=("%rdi","%rsi","%rdx","%rbx");
  1630. my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15));
  1631. my ($t0,$t1,$t2,$t3,$t4)=("%rax","%rbp","%rcx",$acc4,$acc4);
  1632. my ($poly1,$poly3)=($acc6,$acc7);
  1633. sub load_for_mul () {
  1634. my ($a,$b,$src0) = @_;
  1635. my $bias = $src0 eq "%rax" ? 0 : -128;
  1636. " mov $b, $src0
  1637. lea $b, $b_ptr
  1638. mov 8*0+$a, $acc1
  1639. mov 8*1+$a, $acc2
  1640. lea $bias+$a, $a_ptr
  1641. mov 8*2+$a, $acc3
  1642. mov 8*3+$a, $acc4"
  1643. }
  1644. sub load_for_sqr () {
  1645. my ($a,$src0) = @_;
  1646. my $bias = $src0 eq "%rax" ? 0 : -128;
  1647. " mov 8*0+$a, $src0
  1648. mov 8*1+$a, $acc6
  1649. lea $bias+$a, $a_ptr
  1650. mov 8*2+$a, $acc7
  1651. mov 8*3+$a, $acc0"
  1652. }
  1653. {
  1654. ########################################################################
  1655. # operate in 4-5-0-1 "name space" that matches multiplication output
  1656. #
  1657. my ($a0,$a1,$a2,$a3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
  1658. $code.=<<___;
  1659. .type __ecp_nistz256_add_toq,\@abi-omnipotent
  1660. .align 32
  1661. __ecp_nistz256_add_toq:
  1662. add 8*0($b_ptr), $a0
  1663. adc 8*1($b_ptr), $a1
  1664. mov $a0, $t0
  1665. adc 8*2($b_ptr), $a2
  1666. adc 8*3($b_ptr), $a3
  1667. mov $a1, $t1
  1668. sbb $t4, $t4
  1669. sub \$-1, $a0
  1670. mov $a2, $t2
  1671. sbb $poly1, $a1
  1672. sbb \$0, $a2
  1673. mov $a3, $t3
  1674. sbb $poly3, $a3
  1675. test $t4, $t4
  1676. cmovz $t0, $a0
  1677. cmovz $t1, $a1
  1678. mov $a0, 8*0($r_ptr)
  1679. cmovz $t2, $a2
  1680. mov $a1, 8*1($r_ptr)
  1681. cmovz $t3, $a3
  1682. mov $a2, 8*2($r_ptr)
  1683. mov $a3, 8*3($r_ptr)
  1684. ret
  1685. .size __ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq
  1686. .type __ecp_nistz256_sub_fromq,\@abi-omnipotent
  1687. .align 32
  1688. __ecp_nistz256_sub_fromq:
  1689. sub 8*0($b_ptr), $a0
  1690. sbb 8*1($b_ptr), $a1
  1691. mov $a0, $t0
  1692. sbb 8*2($b_ptr), $a2
  1693. sbb 8*3($b_ptr), $a3
  1694. mov $a1, $t1
  1695. sbb $t4, $t4
  1696. add \$-1, $a0
  1697. mov $a2, $t2
  1698. adc $poly1, $a1
  1699. adc \$0, $a2
  1700. mov $a3, $t3
  1701. adc $poly3, $a3
  1702. test $t4, $t4
  1703. cmovz $t0, $a0
  1704. cmovz $t1, $a1
  1705. mov $a0, 8*0($r_ptr)
  1706. cmovz $t2, $a2
  1707. mov $a1, 8*1($r_ptr)
  1708. cmovz $t3, $a3
  1709. mov $a2, 8*2($r_ptr)
  1710. mov $a3, 8*3($r_ptr)
  1711. ret
  1712. .size __ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq
  1713. .type __ecp_nistz256_subq,\@abi-omnipotent
  1714. .align 32
  1715. __ecp_nistz256_subq:
  1716. sub $a0, $t0
  1717. sbb $a1, $t1
  1718. mov $t0, $a0
  1719. sbb $a2, $t2
  1720. sbb $a3, $t3
  1721. mov $t1, $a1
  1722. sbb $t4, $t4
  1723. add \$-1, $t0
  1724. mov $t2, $a2
  1725. adc $poly1, $t1
  1726. adc \$0, $t2
  1727. mov $t3, $a3
  1728. adc $poly3, $t3
  1729. test $t4, $t4
  1730. cmovnz $t0, $a0
  1731. cmovnz $t1, $a1
  1732. cmovnz $t2, $a2
  1733. cmovnz $t3, $a3
  1734. ret
  1735. .size __ecp_nistz256_subq,.-__ecp_nistz256_subq
  1736. .type __ecp_nistz256_mul_by_2q,\@abi-omnipotent
  1737. .align 32
  1738. __ecp_nistz256_mul_by_2q:
  1739. add $a0, $a0 # a0:a3+a0:a3
  1740. adc $a1, $a1
  1741. mov $a0, $t0
  1742. adc $a2, $a2
  1743. adc $a3, $a3
  1744. mov $a1, $t1
  1745. sbb $t4, $t4
  1746. sub \$-1, $a0
  1747. mov $a2, $t2
  1748. sbb $poly1, $a1
  1749. sbb \$0, $a2
  1750. mov $a3, $t3
  1751. sbb $poly3, $a3
  1752. test $t4, $t4
  1753. cmovz $t0, $a0
  1754. cmovz $t1, $a1
  1755. mov $a0, 8*0($r_ptr)
  1756. cmovz $t2, $a2
  1757. mov $a1, 8*1($r_ptr)
  1758. cmovz $t3, $a3
  1759. mov $a2, 8*2($r_ptr)
  1760. mov $a3, 8*3($r_ptr)
  1761. ret
  1762. .size __ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q
  1763. ___
  1764. }
  1765. sub gen_double () {
  1766. my $x = shift;
  1767. my ($src0,$sfx,$bias);
  1768. my ($S,$M,$Zsqr,$in_x,$tmp0)=map(32*$_,(0..4));
  1769. if ($x ne "x") {
  1770. $src0 = "%rax";
  1771. $sfx = "";
  1772. $bias = 0;
  1773. $code.=<<___;
  1774. .globl ecp_nistz256_point_double
  1775. .type ecp_nistz256_point_double,\@function,2
  1776. .align 32
  1777. ecp_nistz256_point_double:
  1778. ___
  1779. $code.=<<___ if ($addx);
  1780. mov \$0x80100, %ecx
  1781. and OPENSSL_ia32cap_P+8(%rip), %ecx
  1782. cmp \$0x80100, %ecx
  1783. je .Lpoint_doublex
  1784. ___
  1785. } else {
  1786. $src0 = "%rdx";
  1787. $sfx = "x";
  1788. $bias = 128;
  1789. $code.=<<___;
  1790. .type ecp_nistz256_point_doublex,\@function,2
  1791. .align 32
  1792. ecp_nistz256_point_doublex:
  1793. .Lpoint_doublex:
  1794. ___
  1795. }
  1796. $code.=<<___;
  1797. push %rbp
  1798. push %rbx
  1799. push %r12
  1800. push %r13
  1801. push %r14
  1802. push %r15
  1803. sub \$32*5+8, %rsp
  1804. movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr.x
  1805. mov $a_ptr, $b_ptr # backup copy
  1806. movdqu 0x10($a_ptr), %xmm1
  1807. mov 0x20+8*0($a_ptr), $acc4 # load in_y in "5-4-0-1" order
  1808. mov 0x20+8*1($a_ptr), $acc5
  1809. mov 0x20+8*2($a_ptr), $acc0
  1810. mov 0x20+8*3($a_ptr), $acc1
  1811. mov .Lpoly+8*1(%rip), $poly1
  1812. mov .Lpoly+8*3(%rip), $poly3
  1813. movdqa %xmm0, $in_x(%rsp)
  1814. movdqa %xmm1, $in_x+0x10(%rsp)
  1815. lea 0x20($r_ptr), $acc2
  1816. lea 0x40($r_ptr), $acc3
  1817. movq $r_ptr, %xmm0
  1818. movq $acc2, %xmm1
  1819. movq $acc3, %xmm2
  1820. lea $S(%rsp), $r_ptr
  1821. call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(S, in_y);
  1822. mov 0x40+8*0($a_ptr), $src0
  1823. mov 0x40+8*1($a_ptr), $acc6
  1824. mov 0x40+8*2($a_ptr), $acc7
  1825. mov 0x40+8*3($a_ptr), $acc0
  1826. lea 0x40-$bias($a_ptr), $a_ptr
  1827. lea $Zsqr(%rsp), $r_ptr
  1828. call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Zsqr, in_z);
  1829. `&load_for_sqr("$S(%rsp)", "$src0")`
  1830. lea $S(%rsp), $r_ptr
  1831. call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(S, S);
  1832. mov 0x20($b_ptr), $src0 # $b_ptr is still valid
  1833. mov 0x40+8*0($b_ptr), $acc1
  1834. mov 0x40+8*1($b_ptr), $acc2
  1835. mov 0x40+8*2($b_ptr), $acc3
  1836. mov 0x40+8*3($b_ptr), $acc4
  1837. lea 0x40-$bias($b_ptr), $a_ptr
  1838. lea 0x20($b_ptr), $b_ptr
  1839. movq %xmm2, $r_ptr
  1840. call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, in_z, in_y);
  1841. call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(res_z, res_z);
  1842. mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
  1843. mov $in_x+8*1(%rsp), $acc5
  1844. lea $Zsqr(%rsp), $b_ptr
  1845. mov $in_x+8*2(%rsp), $acc0
  1846. mov $in_x+8*3(%rsp), $acc1
  1847. lea $M(%rsp), $r_ptr
  1848. call __ecp_nistz256_add_to$x # p256_add(M, in_x, Zsqr);
  1849. mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
  1850. mov $in_x+8*1(%rsp), $acc5
  1851. lea $Zsqr(%rsp), $b_ptr
  1852. mov $in_x+8*2(%rsp), $acc0
  1853. mov $in_x+8*3(%rsp), $acc1
  1854. lea $Zsqr(%rsp), $r_ptr
  1855. call __ecp_nistz256_sub_from$x # p256_sub(Zsqr, in_x, Zsqr);
  1856. `&load_for_sqr("$S(%rsp)", "$src0")`
  1857. movq %xmm1, $r_ptr
  1858. call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(res_y, S);
  1859. ___
  1860. {
  1861. ######## ecp_nistz256_div_by_2(res_y, res_y); ##########################
  1862. # operate in 4-5-6-7 "name space" that matches squaring output
  1863. #
  1864. my ($poly1,$poly3)=($a_ptr,$t1);
  1865. my ($a0,$a1,$a2,$a3,$t3,$t4,$t1)=($acc4,$acc5,$acc6,$acc7,$acc0,$acc1,$acc2);
  1866. $code.=<<___;
  1867. xor $t4, $t4
  1868. mov $a0, $t0
  1869. add \$-1, $a0
  1870. mov $a1, $t1
  1871. adc $poly1, $a1
  1872. mov $a2, $t2
  1873. adc \$0, $a2
  1874. mov $a3, $t3
  1875. adc $poly3, $a3
  1876. adc \$0, $t4
  1877. xor $a_ptr, $a_ptr # borrow $a_ptr
  1878. test \$1, $t0
  1879. cmovz $t0, $a0
  1880. cmovz $t1, $a1
  1881. cmovz $t2, $a2
  1882. cmovz $t3, $a3
  1883. cmovz $a_ptr, $t4
  1884. mov $a1, $t0 # a0:a3>>1
  1885. shr \$1, $a0
  1886. shl \$63, $t0
  1887. mov $a2, $t1
  1888. shr \$1, $a1
  1889. or $t0, $a0
  1890. shl \$63, $t1
  1891. mov $a3, $t2
  1892. shr \$1, $a2
  1893. or $t1, $a1
  1894. shl \$63, $t2
  1895. mov $a0, 8*0($r_ptr)
  1896. shr \$1, $a3
  1897. mov $a1, 8*1($r_ptr)
  1898. shl \$63, $t4
  1899. or $t2, $a2
  1900. or $t4, $a3
  1901. mov $a2, 8*2($r_ptr)
  1902. mov $a3, 8*3($r_ptr)
  1903. ___
  1904. }
  1905. $code.=<<___;
  1906. `&load_for_mul("$M(%rsp)", "$Zsqr(%rsp)", "$src0")`
  1907. lea $M(%rsp), $r_ptr
  1908. call __ecp_nistz256_mul_mont$x # p256_mul_mont(M, M, Zsqr);
  1909. lea $tmp0(%rsp), $r_ptr
  1910. call __ecp_nistz256_mul_by_2$x
  1911. lea $M(%rsp), $b_ptr
  1912. lea $M(%rsp), $r_ptr
  1913. call __ecp_nistz256_add_to$x # p256_mul_by_3(M, M);
  1914. `&load_for_mul("$S(%rsp)", "$in_x(%rsp)", "$src0")`
  1915. lea $S(%rsp), $r_ptr
  1916. call __ecp_nistz256_mul_mont$x # p256_mul_mont(S, S, in_x);
  1917. lea $tmp0(%rsp), $r_ptr
  1918. call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(tmp0, S);
  1919. `&load_for_sqr("$M(%rsp)", "$src0")`
  1920. movq %xmm0, $r_ptr
  1921. call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(res_x, M);
  1922. lea $tmp0(%rsp), $b_ptr
  1923. mov $acc6, $acc0 # harmonize sqr output and sub input
  1924. mov $acc7, $acc1
  1925. mov $a_ptr, $poly1
  1926. mov $t1, $poly3
  1927. call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, tmp0);
  1928. mov $S+8*0(%rsp), $t0
  1929. mov $S+8*1(%rsp), $t1
  1930. mov $S+8*2(%rsp), $t2
  1931. mov $S+8*3(%rsp), $acc2 # "4-5-0-1" order
  1932. lea $S(%rsp), $r_ptr
  1933. call __ecp_nistz256_sub$x # p256_sub(S, S, res_x);
  1934. mov $M(%rsp), $src0
  1935. lea $M(%rsp), $b_ptr
  1936. mov $acc4, $acc6 # harmonize sub output and mul input
  1937. xor %ecx, %ecx
  1938. mov $acc4, $S+8*0(%rsp) # have to save:-(
  1939. mov $acc5, $acc2
  1940. mov $acc5, $S+8*1(%rsp)
  1941. cmovz $acc0, $acc3
  1942. mov $acc0, $S+8*2(%rsp)
  1943. lea $S-$bias(%rsp), $a_ptr
  1944. cmovz $acc1, $acc4
  1945. mov $acc1, $S+8*3(%rsp)
  1946. mov $acc6, $acc1
  1947. lea $S(%rsp), $r_ptr
  1948. call __ecp_nistz256_mul_mont$x # p256_mul_mont(S, S, M);
  1949. movq %xmm1, $b_ptr
  1950. movq %xmm1, $r_ptr
  1951. call __ecp_nistz256_sub_from$x # p256_sub(res_y, S, res_y);
  1952. add \$32*5+8, %rsp
  1953. pop %r15
  1954. pop %r14
  1955. pop %r13
  1956. pop %r12
  1957. pop %rbx
  1958. pop %rbp
  1959. ret
  1960. .size ecp_nistz256_point_double$sfx,.-ecp_nistz256_point_double$sfx
  1961. ___
  1962. }
  1963. &gen_double("q");
  1964. sub gen_add () {
  1965. my $x = shift;
  1966. my ($src0,$sfx,$bias);
  1967. my ($H,$Hsqr,$R,$Rsqr,$Hcub,
  1968. $U1,$U2,$S1,$S2,
  1969. $res_x,$res_y,$res_z,
  1970. $in1_x,$in1_y,$in1_z,
  1971. $in2_x,$in2_y,$in2_z)=map(32*$_,(0..17));
  1972. my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
  1973. if ($x ne "x") {
  1974. $src0 = "%rax";
  1975. $sfx = "";
  1976. $bias = 0;
  1977. $code.=<<___;
  1978. .globl ecp_nistz256_point_add
  1979. .type ecp_nistz256_point_add,\@function,3
  1980. .align 32
  1981. ecp_nistz256_point_add:
  1982. ___
  1983. $code.=<<___ if ($addx);
  1984. mov \$0x80100, %ecx
  1985. and OPENSSL_ia32cap_P+8(%rip), %ecx
  1986. cmp \$0x80100, %ecx
  1987. je .Lpoint_addx
  1988. ___
  1989. } else {
  1990. $src0 = "%rdx";
  1991. $sfx = "x";
  1992. $bias = 128;
  1993. $code.=<<___;
  1994. .type ecp_nistz256_point_addx,\@function,3
  1995. .align 32
  1996. ecp_nistz256_point_addx:
  1997. .Lpoint_addx:
  1998. ___
  1999. }
  2000. $code.=<<___;
  2001. push %rbp
  2002. push %rbx
  2003. push %r12
  2004. push %r13
  2005. push %r14
  2006. push %r15
  2007. sub \$32*18+8, %rsp
  2008. movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr
  2009. movdqu 0x10($a_ptr), %xmm1
  2010. movdqu 0x20($a_ptr), %xmm2
  2011. movdqu 0x30($a_ptr), %xmm3
  2012. movdqu 0x40($a_ptr), %xmm4
  2013. movdqu 0x50($a_ptr), %xmm5
  2014. mov $a_ptr, $b_ptr # reassign
  2015. mov $b_org, $a_ptr # reassign
  2016. movdqa %xmm0, $in1_x(%rsp)
  2017. movdqa %xmm1, $in1_x+0x10(%rsp)
  2018. por %xmm0, %xmm1
  2019. movdqa %xmm2, $in1_y(%rsp)
  2020. movdqa %xmm3, $in1_y+0x10(%rsp)
  2021. por %xmm2, %xmm3
  2022. movdqa %xmm4, $in1_z(%rsp)
  2023. movdqa %xmm5, $in1_z+0x10(%rsp)
  2024. por %xmm1, %xmm3
  2025. movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$b_ptr
  2026. pshufd \$0xb1, %xmm3, %xmm5
  2027. movdqu 0x10($a_ptr), %xmm1
  2028. movdqu 0x20($a_ptr), %xmm2
  2029. por %xmm3, %xmm5
  2030. movdqu 0x30($a_ptr), %xmm3
  2031. mov 0x40+8*0($a_ptr), $src0 # load original in2_z
  2032. mov 0x40+8*1($a_ptr), $acc6
  2033. mov 0x40+8*2($a_ptr), $acc7
  2034. mov 0x40+8*3($a_ptr), $acc0
  2035. movdqa %xmm0, $in2_x(%rsp)
  2036. pshufd \$0x1e, %xmm5, %xmm4
  2037. movdqa %xmm1, $in2_x+0x10(%rsp)
  2038. por %xmm0, %xmm1
  2039. movq $r_ptr, %xmm0 # save $r_ptr
  2040. movdqa %xmm2, $in2_y(%rsp)
  2041. movdqa %xmm3, $in2_y+0x10(%rsp)
  2042. por %xmm2, %xmm3
  2043. por %xmm4, %xmm5
  2044. pxor %xmm4, %xmm4
  2045. por %xmm1, %xmm3
  2046. lea 0x40-$bias($a_ptr), $a_ptr # $a_ptr is still valid
  2047. mov $src0, $in2_z+8*0(%rsp) # make in2_z copy
  2048. mov $acc6, $in2_z+8*1(%rsp)
  2049. mov $acc7, $in2_z+8*2(%rsp)
  2050. mov $acc0, $in2_z+8*3(%rsp)
  2051. lea $Z2sqr(%rsp), $r_ptr # Z2^2
  2052. call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z2sqr, in2_z);
  2053. pcmpeqd %xmm4, %xmm5
  2054. pshufd \$0xb1, %xmm3, %xmm4
  2055. por %xmm3, %xmm4
  2056. pshufd \$0, %xmm5, %xmm5 # in1infty
  2057. pshufd \$0x1e, %xmm4, %xmm3
  2058. por %xmm3, %xmm4
  2059. pxor %xmm3, %xmm3
  2060. pcmpeqd %xmm3, %xmm4
  2061. pshufd \$0, %xmm4, %xmm4 # in2infty
  2062. mov 0x40+8*0($b_ptr), $src0 # load original in1_z
  2063. mov 0x40+8*1($b_ptr), $acc6
  2064. mov 0x40+8*2($b_ptr), $acc7
  2065. mov 0x40+8*3($b_ptr), $acc0
  2066. lea 0x40-$bias($b_ptr), $a_ptr
  2067. lea $Z1sqr(%rsp), $r_ptr # Z1^2
  2068. call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z1sqr, in1_z);
  2069. `&load_for_mul("$Z2sqr(%rsp)", "$in2_z(%rsp)", "$src0")`
  2070. lea $S1(%rsp), $r_ptr # S1 = Z2^3
  2071. call __ecp_nistz256_mul_mont$x # p256_mul_mont(S1, Z2sqr, in2_z);
  2072. `&load_for_mul("$Z1sqr(%rsp)", "$in1_z(%rsp)", "$src0")`
  2073. lea $S2(%rsp), $r_ptr # S2 = Z1^3
  2074. call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Z1sqr, in1_z);
  2075. `&load_for_mul("$S1(%rsp)", "$in1_y(%rsp)", "$src0")`
  2076. lea $S1(%rsp), $r_ptr # S1 = Y1*Z2^3
  2077. call __ecp_nistz256_mul_mont$x # p256_mul_mont(S1, S1, in1_y);
  2078. `&load_for_mul("$S2(%rsp)", "$in2_y(%rsp)", "$src0")`
  2079. lea $S2(%rsp), $r_ptr # S2 = Y2*Z1^3
  2080. call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S2, in2_y);
  2081. lea $S1(%rsp), $b_ptr
  2082. lea $R(%rsp), $r_ptr # R = S2 - S1
  2083. call __ecp_nistz256_sub_from$x # p256_sub(R, S2, S1);
  2084. or $acc5, $acc4 # see if result is zero
  2085. movdqa %xmm4, %xmm2
  2086. or $acc0, $acc4
  2087. or $acc1, $acc4
  2088. por %xmm5, %xmm2 # in1infty || in2infty
  2089. movq $acc4, %xmm3
  2090. `&load_for_mul("$Z2sqr(%rsp)", "$in1_x(%rsp)", "$src0")`
  2091. lea $U1(%rsp), $r_ptr # U1 = X1*Z2^2
  2092. call __ecp_nistz256_mul_mont$x # p256_mul_mont(U1, in1_x, Z2sqr);
  2093. `&load_for_mul("$Z1sqr(%rsp)", "$in2_x(%rsp)", "$src0")`
  2094. lea $U2(%rsp), $r_ptr # U2 = X2*Z1^2
  2095. call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, in2_x, Z1sqr);
  2096. lea $U1(%rsp), $b_ptr
  2097. lea $H(%rsp), $r_ptr # H = U2 - U1
  2098. call __ecp_nistz256_sub_from$x # p256_sub(H, U2, U1);
  2099. or $acc5, $acc4 # see if result is zero
  2100. or $acc0, $acc4
  2101. or $acc1, $acc4
  2102. .byte 0x3e # predict taken
  2103. jnz .Ladd_proceed$x # is_equal(U1,U2)?
  2104. movq %xmm2, $acc0
  2105. movq %xmm3, $acc1
  2106. test $acc0, $acc0
  2107. jnz .Ladd_proceed$x # (in1infty || in2infty)?
  2108. test $acc1, $acc1
  2109. jz .Ladd_proceed$x # is_equal(S1,S2)?
  2110. movq %xmm0, $r_ptr # restore $r_ptr
  2111. pxor %xmm0, %xmm0
  2112. movdqu %xmm0, 0x00($r_ptr)
  2113. movdqu %xmm0, 0x10($r_ptr)
  2114. movdqu %xmm0, 0x20($r_ptr)
  2115. movdqu %xmm0, 0x30($r_ptr)
  2116. movdqu %xmm0, 0x40($r_ptr)
  2117. movdqu %xmm0, 0x50($r_ptr)
  2118. jmp .Ladd_done$x
  2119. .align 32
  2120. .Ladd_proceed$x:
  2121. `&load_for_sqr("$R(%rsp)", "$src0")`
  2122. lea $Rsqr(%rsp), $r_ptr # R^2
  2123. call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Rsqr, R);
  2124. `&load_for_mul("$H(%rsp)", "$in1_z(%rsp)", "$src0")`
  2125. lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
  2126. call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, H, in1_z);
  2127. `&load_for_sqr("$H(%rsp)", "$src0")`
  2128. lea $Hsqr(%rsp), $r_ptr # H^2
  2129. call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Hsqr, H);
  2130. `&load_for_mul("$res_z(%rsp)", "$in2_z(%rsp)", "$src0")`
  2131. lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
  2132. call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, res_z, in2_z);
  2133. `&load_for_mul("$Hsqr(%rsp)", "$H(%rsp)", "$src0")`
  2134. lea $Hcub(%rsp), $r_ptr # H^3
  2135. call __ecp_nistz256_mul_mont$x # p256_mul_mont(Hcub, Hsqr, H);
  2136. `&load_for_mul("$Hsqr(%rsp)", "$U1(%rsp)", "$src0")`
  2137. lea $U2(%rsp), $r_ptr # U1*H^2
  2138. call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, U1, Hsqr);
  2139. ___
  2140. {
  2141. #######################################################################
  2142. # operate in 4-5-0-1 "name space" that matches multiplication output
  2143. #
  2144. my ($acc0,$acc1,$acc2,$acc3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
  2145. my ($poly1, $poly3)=($acc6,$acc7);
  2146. $code.=<<___;
  2147. #lea $U2(%rsp), $a_ptr
  2148. #lea $Hsqr(%rsp), $r_ptr # 2*U1*H^2
  2149. #call __ecp_nistz256_mul_by_2 # ecp_nistz256_mul_by_2(Hsqr, U2);
  2150. add $acc0, $acc0 # a0:a3+a0:a3
  2151. lea $Rsqr(%rsp), $a_ptr
  2152. adc $acc1, $acc1
  2153. mov $acc0, $t0
  2154. adc $acc2, $acc2
  2155. adc $acc3, $acc3
  2156. mov $acc1, $t1
  2157. sbb $t4, $t4
  2158. sub \$-1, $acc0
  2159. mov $acc2, $t2
  2160. sbb $poly1, $acc1
  2161. sbb \$0, $acc2
  2162. mov $acc3, $t3
  2163. sbb $poly3, $acc3
  2164. test $t4, $t4
  2165. cmovz $t0, $acc0
  2166. mov 8*0($a_ptr), $t0
  2167. cmovz $t1, $acc1
  2168. mov 8*1($a_ptr), $t1
  2169. cmovz $t2, $acc2
  2170. mov 8*2($a_ptr), $t2
  2171. cmovz $t3, $acc3
  2172. mov 8*3($a_ptr), $t3
  2173. call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
  2174. lea $Hcub(%rsp), $b_ptr
  2175. lea $res_x(%rsp), $r_ptr
  2176. call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, Hcub);
  2177. mov $U2+8*0(%rsp), $t0
  2178. mov $U2+8*1(%rsp), $t1
  2179. mov $U2+8*2(%rsp), $t2
  2180. mov $U2+8*3(%rsp), $t3
  2181. lea $res_y(%rsp), $r_ptr
  2182. call __ecp_nistz256_sub$x # p256_sub(res_y, U2, res_x);
  2183. mov $acc0, 8*0($r_ptr) # save the result, as
  2184. mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
  2185. mov $acc2, 8*2($r_ptr)
  2186. mov $acc3, 8*3($r_ptr)
  2187. ___
  2188. }
  2189. $code.=<<___;
  2190. `&load_for_mul("$S1(%rsp)", "$Hcub(%rsp)", "$src0")`
  2191. lea $S2(%rsp), $r_ptr
  2192. call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S1, Hcub);
  2193. `&load_for_mul("$R(%rsp)", "$res_y(%rsp)", "$src0")`
  2194. lea $res_y(%rsp), $r_ptr
  2195. call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_y, R, res_y);
  2196. lea $S2(%rsp), $b_ptr
  2197. lea $res_y(%rsp), $r_ptr
  2198. call __ecp_nistz256_sub_from$x # p256_sub(res_y, res_y, S2);
  2199. movq %xmm0, $r_ptr # restore $r_ptr
  2200. movdqa %xmm5, %xmm0 # copy_conditional(res_z, in2_z, in1infty);
  2201. movdqa %xmm5, %xmm1
  2202. pandn $res_z(%rsp), %xmm0
  2203. movdqa %xmm5, %xmm2
  2204. pandn $res_z+0x10(%rsp), %xmm1
  2205. movdqa %xmm5, %xmm3
  2206. pand $in2_z(%rsp), %xmm2
  2207. pand $in2_z+0x10(%rsp), %xmm3
  2208. por %xmm0, %xmm2
  2209. por %xmm1, %xmm3
  2210. movdqa %xmm4, %xmm0 # copy_conditional(res_z, in1_z, in2infty);
  2211. movdqa %xmm4, %xmm1
  2212. pandn %xmm2, %xmm0
  2213. movdqa %xmm4, %xmm2
  2214. pandn %xmm3, %xmm1
  2215. movdqa %xmm4, %xmm3
  2216. pand $in1_z(%rsp), %xmm2
  2217. pand $in1_z+0x10(%rsp), %xmm3
  2218. por %xmm0, %xmm2
  2219. por %xmm1, %xmm3
  2220. movdqu %xmm2, 0x40($r_ptr)
  2221. movdqu %xmm3, 0x50($r_ptr)
  2222. movdqa %xmm5, %xmm0 # copy_conditional(res_x, in2_x, in1infty);
  2223. movdqa %xmm5, %xmm1
  2224. pandn $res_x(%rsp), %xmm0
  2225. movdqa %xmm5, %xmm2
  2226. pandn $res_x+0x10(%rsp), %xmm1
  2227. movdqa %xmm5, %xmm3
  2228. pand $in2_x(%rsp), %xmm2
  2229. pand $in2_x+0x10(%rsp), %xmm3
  2230. por %xmm0, %xmm2
  2231. por %xmm1, %xmm3
  2232. movdqa %xmm4, %xmm0 # copy_conditional(res_x, in1_x, in2infty);
  2233. movdqa %xmm4, %xmm1
  2234. pandn %xmm2, %xmm0
  2235. movdqa %xmm4, %xmm2
  2236. pandn %xmm3, %xmm1
  2237. movdqa %xmm4, %xmm3
  2238. pand $in1_x(%rsp), %xmm2
  2239. pand $in1_x+0x10(%rsp), %xmm3
  2240. por %xmm0, %xmm2
  2241. por %xmm1, %xmm3
  2242. movdqu %xmm2, 0x00($r_ptr)
  2243. movdqu %xmm3, 0x10($r_ptr)
  2244. movdqa %xmm5, %xmm0 # copy_conditional(res_y, in2_y, in1infty);
  2245. movdqa %xmm5, %xmm1
  2246. pandn $res_y(%rsp), %xmm0
  2247. movdqa %xmm5, %xmm2
  2248. pandn $res_y+0x10(%rsp), %xmm1
  2249. movdqa %xmm5, %xmm3
  2250. pand $in2_y(%rsp), %xmm2
  2251. pand $in2_y+0x10(%rsp), %xmm3
  2252. por %xmm0, %xmm2
  2253. por %xmm1, %xmm3
  2254. movdqa %xmm4, %xmm0 # copy_conditional(res_y, in1_y, in2infty);
  2255. movdqa %xmm4, %xmm1
  2256. pandn %xmm2, %xmm0
  2257. movdqa %xmm4, %xmm2
  2258. pandn %xmm3, %xmm1
  2259. movdqa %xmm4, %xmm3
  2260. pand $in1_y(%rsp), %xmm2
  2261. pand $in1_y+0x10(%rsp), %xmm3
  2262. por %xmm0, %xmm2
  2263. por %xmm1, %xmm3
  2264. movdqu %xmm2, 0x20($r_ptr)
  2265. movdqu %xmm3, 0x30($r_ptr)
  2266. .Ladd_done$x:
  2267. add \$32*18+8, %rsp
  2268. pop %r15
  2269. pop %r14
  2270. pop %r13
  2271. pop %r12
  2272. pop %rbx
  2273. pop %rbp
  2274. ret
  2275. .size ecp_nistz256_point_add$sfx,.-ecp_nistz256_point_add$sfx
  2276. ___
  2277. }
  2278. &gen_add("q");
  2279. sub gen_add_affine () {
  2280. my $x = shift;
  2281. my ($src0,$sfx,$bias);
  2282. my ($U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr,
  2283. $res_x,$res_y,$res_z,
  2284. $in1_x,$in1_y,$in1_z,
  2285. $in2_x,$in2_y)=map(32*$_,(0..14));
  2286. my $Z1sqr = $S2;
  2287. if ($x ne "x") {
  2288. $src0 = "%rax";
  2289. $sfx = "";
  2290. $bias = 0;
  2291. $code.=<<___;
  2292. .globl ecp_nistz256_point_add_affine
  2293. .type ecp_nistz256_point_add_affine,\@function,3
  2294. .align 32
  2295. ecp_nistz256_point_add_affine:
  2296. ___
  2297. $code.=<<___ if ($addx);
  2298. mov \$0x80100, %ecx
  2299. and OPENSSL_ia32cap_P+8(%rip), %ecx
  2300. cmp \$0x80100, %ecx
  2301. je .Lpoint_add_affinex
  2302. ___
  2303. } else {
  2304. $src0 = "%rdx";
  2305. $sfx = "x";
  2306. $bias = 128;
  2307. $code.=<<___;
  2308. .type ecp_nistz256_point_add_affinex,\@function,3
  2309. .align 32
  2310. ecp_nistz256_point_add_affinex:
  2311. .Lpoint_add_affinex:
  2312. ___
  2313. }
  2314. $code.=<<___;
  2315. push %rbp
  2316. push %rbx
  2317. push %r12
  2318. push %r13
  2319. push %r14
  2320. push %r15
  2321. sub \$32*15+8, %rsp
  2322. movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr
  2323. mov $b_org, $b_ptr # reassign
  2324. movdqu 0x10($a_ptr), %xmm1
  2325. movdqu 0x20($a_ptr), %xmm2
  2326. movdqu 0x30($a_ptr), %xmm3
  2327. movdqu 0x40($a_ptr), %xmm4
  2328. movdqu 0x50($a_ptr), %xmm5
  2329. mov 0x40+8*0($a_ptr), $src0 # load original in1_z
  2330. mov 0x40+8*1($a_ptr), $acc6
  2331. mov 0x40+8*2($a_ptr), $acc7
  2332. mov 0x40+8*3($a_ptr), $acc0
  2333. movdqa %xmm0, $in1_x(%rsp)
  2334. movdqa %xmm1, $in1_x+0x10(%rsp)
  2335. por %xmm0, %xmm1
  2336. movdqa %xmm2, $in1_y(%rsp)
  2337. movdqa %xmm3, $in1_y+0x10(%rsp)
  2338. por %xmm2, %xmm3
  2339. movdqa %xmm4, $in1_z(%rsp)
  2340. movdqa %xmm5, $in1_z+0x10(%rsp)
  2341. por %xmm1, %xmm3
  2342. movdqu 0x00($b_ptr), %xmm0 # copy *(P256_POINT_AFFINE *)$b_ptr
  2343. pshufd \$0xb1, %xmm3, %xmm5
  2344. movdqu 0x10($b_ptr), %xmm1
  2345. movdqu 0x20($b_ptr), %xmm2
  2346. por %xmm3, %xmm5
  2347. movdqu 0x30($b_ptr), %xmm3
  2348. movdqa %xmm0, $in2_x(%rsp)
  2349. pshufd \$0x1e, %xmm5, %xmm4
  2350. movdqa %xmm1, $in2_x+0x10(%rsp)
  2351. por %xmm0, %xmm1
  2352. movq $r_ptr, %xmm0 # save $r_ptr
  2353. movdqa %xmm2, $in2_y(%rsp)
  2354. movdqa %xmm3, $in2_y+0x10(%rsp)
  2355. por %xmm2, %xmm3
  2356. por %xmm4, %xmm5
  2357. pxor %xmm4, %xmm4
  2358. por %xmm1, %xmm3
  2359. lea 0x40-$bias($a_ptr), $a_ptr # $a_ptr is still valid
  2360. lea $Z1sqr(%rsp), $r_ptr # Z1^2
  2361. call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z1sqr, in1_z);
  2362. pcmpeqd %xmm4, %xmm5
  2363. pshufd \$0xb1, %xmm3, %xmm4
  2364. mov 0x00($b_ptr), $src0 # $b_ptr is still valid
  2365. #lea 0x00($b_ptr), $b_ptr
  2366. mov $acc4, $acc1 # harmonize sqr output and mul input
  2367. por %xmm3, %xmm4
  2368. pshufd \$0, %xmm5, %xmm5 # in1infty
  2369. pshufd \$0x1e, %xmm4, %xmm3
  2370. mov $acc5, $acc2
  2371. por %xmm3, %xmm4
  2372. pxor %xmm3, %xmm3
  2373. mov $acc6, $acc3
  2374. pcmpeqd %xmm3, %xmm4
  2375. pshufd \$0, %xmm4, %xmm4 # in2infty
  2376. lea $Z1sqr-$bias(%rsp), $a_ptr
  2377. mov $acc7, $acc4
  2378. lea $U2(%rsp), $r_ptr # U2 = X2*Z1^2
  2379. call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, Z1sqr, in2_x);
  2380. lea $in1_x(%rsp), $b_ptr
  2381. lea $H(%rsp), $r_ptr # H = U2 - U1
  2382. call __ecp_nistz256_sub_from$x # p256_sub(H, U2, in1_x);
  2383. `&load_for_mul("$Z1sqr(%rsp)", "$in1_z(%rsp)", "$src0")`
  2384. lea $S2(%rsp), $r_ptr # S2 = Z1^3
  2385. call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Z1sqr, in1_z);
  2386. `&load_for_mul("$H(%rsp)", "$in1_z(%rsp)", "$src0")`
  2387. lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
  2388. call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, H, in1_z);
  2389. `&load_for_mul("$S2(%rsp)", "$in2_y(%rsp)", "$src0")`
  2390. lea $S2(%rsp), $r_ptr # S2 = Y2*Z1^3
  2391. call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S2, in2_y);
  2392. lea $in1_y(%rsp), $b_ptr
  2393. lea $R(%rsp), $r_ptr # R = S2 - S1
  2394. call __ecp_nistz256_sub_from$x # p256_sub(R, S2, in1_y);
  2395. `&load_for_sqr("$H(%rsp)", "$src0")`
  2396. lea $Hsqr(%rsp), $r_ptr # H^2
  2397. call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Hsqr, H);
  2398. `&load_for_sqr("$R(%rsp)", "$src0")`
  2399. lea $Rsqr(%rsp), $r_ptr # R^2
  2400. call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Rsqr, R);
  2401. `&load_for_mul("$H(%rsp)", "$Hsqr(%rsp)", "$src0")`
  2402. lea $Hcub(%rsp), $r_ptr # H^3
  2403. call __ecp_nistz256_mul_mont$x # p256_mul_mont(Hcub, Hsqr, H);
  2404. `&load_for_mul("$Hsqr(%rsp)", "$in1_x(%rsp)", "$src0")`
  2405. lea $U2(%rsp), $r_ptr # U1*H^2
  2406. call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, in1_x, Hsqr);
  2407. ___
  2408. {
  2409. #######################################################################
  2410. # operate in 4-5-0-1 "name space" that matches multiplication output
  2411. #
  2412. my ($acc0,$acc1,$acc2,$acc3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
  2413. my ($poly1, $poly3)=($acc6,$acc7);
  2414. $code.=<<___;
  2415. #lea $U2(%rsp), $a_ptr
  2416. #lea $Hsqr(%rsp), $r_ptr # 2*U1*H^2
  2417. #call __ecp_nistz256_mul_by_2 # ecp_nistz256_mul_by_2(Hsqr, U2);
  2418. add $acc0, $acc0 # a0:a3+a0:a3
  2419. lea $Rsqr(%rsp), $a_ptr
  2420. adc $acc1, $acc1
  2421. mov $acc0, $t0
  2422. adc $acc2, $acc2
  2423. adc $acc3, $acc3
  2424. mov $acc1, $t1
  2425. sbb $t4, $t4
  2426. sub \$-1, $acc0
  2427. mov $acc2, $t2
  2428. sbb $poly1, $acc1
  2429. sbb \$0, $acc2
  2430. mov $acc3, $t3
  2431. sbb $poly3, $acc3
  2432. test $t4, $t4
  2433. cmovz $t0, $acc0
  2434. mov 8*0($a_ptr), $t0
  2435. cmovz $t1, $acc1
  2436. mov 8*1($a_ptr), $t1
  2437. cmovz $t2, $acc2
  2438. mov 8*2($a_ptr), $t2
  2439. cmovz $t3, $acc3
  2440. mov 8*3($a_ptr), $t3
  2441. call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
  2442. lea $Hcub(%rsp), $b_ptr
  2443. lea $res_x(%rsp), $r_ptr
  2444. call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, Hcub);
  2445. mov $U2+8*0(%rsp), $t0
  2446. mov $U2+8*1(%rsp), $t1
  2447. mov $U2+8*2(%rsp), $t2
  2448. mov $U2+8*3(%rsp), $t3
  2449. lea $H(%rsp), $r_ptr
  2450. call __ecp_nistz256_sub$x # p256_sub(H, U2, res_x);
  2451. mov $acc0, 8*0($r_ptr) # save the result, as
  2452. mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
  2453. mov $acc2, 8*2($r_ptr)
  2454. mov $acc3, 8*3($r_ptr)
  2455. ___
  2456. }
  2457. $code.=<<___;
  2458. `&load_for_mul("$Hcub(%rsp)", "$in1_y(%rsp)", "$src0")`
  2459. lea $S2(%rsp), $r_ptr
  2460. call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Hcub, in1_y);
  2461. `&load_for_mul("$H(%rsp)", "$R(%rsp)", "$src0")`
  2462. lea $H(%rsp), $r_ptr
  2463. call __ecp_nistz256_mul_mont$x # p256_mul_mont(H, H, R);
  2464. lea $S2(%rsp), $b_ptr
  2465. lea $res_y(%rsp), $r_ptr
  2466. call __ecp_nistz256_sub_from$x # p256_sub(res_y, H, S2);
  2467. movq %xmm0, $r_ptr # restore $r_ptr
  2468. movdqa %xmm5, %xmm0 # copy_conditional(res_z, ONE, in1infty);
  2469. movdqa %xmm5, %xmm1
  2470. pandn $res_z(%rsp), %xmm0
  2471. movdqa %xmm5, %xmm2
  2472. pandn $res_z+0x10(%rsp), %xmm1
  2473. movdqa %xmm5, %xmm3
  2474. pand .LONE_mont(%rip), %xmm2
  2475. pand .LONE_mont+0x10(%rip), %xmm3
  2476. por %xmm0, %xmm2
  2477. por %xmm1, %xmm3
  2478. movdqa %xmm4, %xmm0 # copy_conditional(res_z, in1_z, in2infty);
  2479. movdqa %xmm4, %xmm1
  2480. pandn %xmm2, %xmm0
  2481. movdqa %xmm4, %xmm2
  2482. pandn %xmm3, %xmm1
  2483. movdqa %xmm4, %xmm3
  2484. pand $in1_z(%rsp), %xmm2
  2485. pand $in1_z+0x10(%rsp), %xmm3
  2486. por %xmm0, %xmm2
  2487. por %xmm1, %xmm3
  2488. movdqu %xmm2, 0x40($r_ptr)
  2489. movdqu %xmm3, 0x50($r_ptr)
  2490. movdqa %xmm5, %xmm0 # copy_conditional(res_x, in2_x, in1infty);
  2491. movdqa %xmm5, %xmm1
  2492. pandn $res_x(%rsp), %xmm0
  2493. movdqa %xmm5, %xmm2
  2494. pandn $res_x+0x10(%rsp), %xmm1
  2495. movdqa %xmm5, %xmm3
  2496. pand $in2_x(%rsp), %xmm2
  2497. pand $in2_x+0x10(%rsp), %xmm3
  2498. por %xmm0, %xmm2
  2499. por %xmm1, %xmm3
  2500. movdqa %xmm4, %xmm0 # copy_conditional(res_x, in1_x, in2infty);
  2501. movdqa %xmm4, %xmm1
  2502. pandn %xmm2, %xmm0
  2503. movdqa %xmm4, %xmm2
  2504. pandn %xmm3, %xmm1
  2505. movdqa %xmm4, %xmm3
  2506. pand $in1_x(%rsp), %xmm2
  2507. pand $in1_x+0x10(%rsp), %xmm3
  2508. por %xmm0, %xmm2
  2509. por %xmm1, %xmm3
  2510. movdqu %xmm2, 0x00($r_ptr)
  2511. movdqu %xmm3, 0x10($r_ptr)
  2512. movdqa %xmm5, %xmm0 # copy_conditional(res_y, in2_y, in1infty);
  2513. movdqa %xmm5, %xmm1
  2514. pandn $res_y(%rsp), %xmm0
  2515. movdqa %xmm5, %xmm2
  2516. pandn $res_y+0x10(%rsp), %xmm1
  2517. movdqa %xmm5, %xmm3
  2518. pand $in2_y(%rsp), %xmm2
  2519. pand $in2_y+0x10(%rsp), %xmm3
  2520. por %xmm0, %xmm2
  2521. por %xmm1, %xmm3
  2522. movdqa %xmm4, %xmm0 # copy_conditional(res_y, in1_y, in2infty);
  2523. movdqa %xmm4, %xmm1
  2524. pandn %xmm2, %xmm0
  2525. movdqa %xmm4, %xmm2
  2526. pandn %xmm3, %xmm1
  2527. movdqa %xmm4, %xmm3
  2528. pand $in1_y(%rsp), %xmm2
  2529. pand $in1_y+0x10(%rsp), %xmm3
  2530. por %xmm0, %xmm2
  2531. por %xmm1, %xmm3
  2532. movdqu %xmm2, 0x20($r_ptr)
  2533. movdqu %xmm3, 0x30($r_ptr)
  2534. add \$32*15+8, %rsp
  2535. pop %r15
  2536. pop %r14
  2537. pop %r13
  2538. pop %r12
  2539. pop %rbx
  2540. pop %rbp
  2541. ret
  2542. .size ecp_nistz256_point_add_affine$sfx,.-ecp_nistz256_point_add_affine$sfx
  2543. ___
  2544. }
  2545. &gen_add_affine("q");
  2546. ########################################################################
  2547. # AD*X magic
  2548. #
  2549. if ($addx) { {
  2550. ########################################################################
  2551. # operate in 4-5-0-1 "name space" that matches multiplication output
  2552. #
  2553. my ($a0,$a1,$a2,$a3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
  2554. $code.=<<___;
  2555. .type __ecp_nistz256_add_tox,\@abi-omnipotent
  2556. .align 32
  2557. __ecp_nistz256_add_tox:
  2558. xor $t4, $t4
  2559. adc 8*0($b_ptr), $a0
  2560. adc 8*1($b_ptr), $a1
  2561. mov $a0, $t0
  2562. adc 8*2($b_ptr), $a2
  2563. adc 8*3($b_ptr), $a3
  2564. mov $a1, $t1
  2565. adc \$0, $t4
  2566. xor $t3, $t3
  2567. sbb \$-1, $a0
  2568. mov $a2, $t2
  2569. sbb $poly1, $a1
  2570. sbb \$0, $a2
  2571. mov $a3, $t3
  2572. sbb $poly3, $a3
  2573. bt \$0, $t4
  2574. cmovnc $t0, $a0
  2575. cmovnc $t1, $a1
  2576. mov $a0, 8*0($r_ptr)
  2577. cmovnc $t2, $a2
  2578. mov $a1, 8*1($r_ptr)
  2579. cmovnc $t3, $a3
  2580. mov $a2, 8*2($r_ptr)
  2581. mov $a3, 8*3($r_ptr)
  2582. ret
  2583. .size __ecp_nistz256_add_tox,.-__ecp_nistz256_add_tox
  2584. .type __ecp_nistz256_sub_fromx,\@abi-omnipotent
  2585. .align 32
  2586. __ecp_nistz256_sub_fromx:
  2587. xor $t4, $t4
  2588. sbb 8*0($b_ptr), $a0
  2589. sbb 8*1($b_ptr), $a1
  2590. mov $a0, $t0
  2591. sbb 8*2($b_ptr), $a2
  2592. sbb 8*3($b_ptr), $a3
  2593. mov $a1, $t1
  2594. sbb \$0, $t4
  2595. xor $t3, $t3
  2596. adc \$-1, $a0
  2597. mov $a2, $t2
  2598. adc $poly1, $a1
  2599. adc \$0, $a2
  2600. mov $a3, $t3
  2601. adc $poly3, $a3
  2602. bt \$0, $t4
  2603. cmovnc $t0, $a0
  2604. cmovnc $t1, $a1
  2605. mov $a0, 8*0($r_ptr)
  2606. cmovnc $t2, $a2
  2607. mov $a1, 8*1($r_ptr)
  2608. cmovnc $t3, $a3
  2609. mov $a2, 8*2($r_ptr)
  2610. mov $a3, 8*3($r_ptr)
  2611. ret
  2612. .size __ecp_nistz256_sub_fromx,.-__ecp_nistz256_sub_fromx
  2613. .type __ecp_nistz256_subx,\@abi-omnipotent
  2614. .align 32
  2615. __ecp_nistz256_subx:
  2616. xor $t4, $t4
  2617. sbb $a0, $t0
  2618. sbb $a1, $t1
  2619. mov $t0, $a0
  2620. sbb $a2, $t2
  2621. sbb $a3, $t3
  2622. mov $t1, $a1
  2623. sbb \$0, $t4
  2624. xor $a3 ,$a3
  2625. adc \$-1, $t0
  2626. mov $t2, $a2
  2627. adc $poly1, $t1
  2628. adc \$0, $t2
  2629. mov $t3, $a3
  2630. adc $poly3, $t3
  2631. bt \$0, $t4
  2632. cmovc $t0, $a0
  2633. cmovc $t1, $a1
  2634. cmovc $t2, $a2
  2635. cmovc $t3, $a3
  2636. ret
  2637. .size __ecp_nistz256_subx,.-__ecp_nistz256_subx
  2638. .type __ecp_nistz256_mul_by_2x,\@abi-omnipotent
  2639. .align 32
  2640. __ecp_nistz256_mul_by_2x:
  2641. xor $t4, $t4
  2642. adc $a0, $a0 # a0:a3+a0:a3
  2643. adc $a1, $a1
  2644. mov $a0, $t0
  2645. adc $a2, $a2
  2646. adc $a3, $a3
  2647. mov $a1, $t1
  2648. adc \$0, $t4
  2649. xor $t3, $t3
  2650. sbb \$-1, $a0
  2651. mov $a2, $t2
  2652. sbb $poly1, $a1
  2653. sbb \$0, $a2
  2654. mov $a3, $t3
  2655. sbb $poly3, $a3
  2656. bt \$0, $t4
  2657. cmovnc $t0, $a0
  2658. cmovnc $t1, $a1
  2659. mov $a0, 8*0($r_ptr)
  2660. cmovnc $t2, $a2
  2661. mov $a1, 8*1($r_ptr)
  2662. cmovnc $t3, $a3
  2663. mov $a2, 8*2($r_ptr)
  2664. mov $a3, 8*3($r_ptr)
  2665. ret
  2666. .size __ecp_nistz256_mul_by_2x,.-__ecp_nistz256_mul_by_2x
  2667. ___
  2668. }
  2669. &gen_double("x");
  2670. &gen_add("x");
  2671. &gen_add_affine("x");
  2672. }
  2673. }}}
  2674. ########################################################################
  2675. # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
  2676. #
  2677. open TABLE,"<ecp_nistz256_table.c" or
  2678. open TABLE,"<${dir}../ecp_nistz256_table.c" or
  2679. die "failed to open ecp_nistz256_table.c:",$!;
  2680. use integer;
  2681. foreach(<TABLE>) {
  2682. s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
  2683. }
  2684. close TABLE;
  2685. die "insane number of elements" if ($#arr != 64*16*37-1);
  2686. print <<___;
  2687. .text
  2688. .globl ecp_nistz256_precomputed
  2689. .type ecp_nistz256_precomputed,\@object
  2690. .align 4096
  2691. ecp_nistz256_precomputed:
  2692. ___
  2693. while (@line=splice(@arr,0,16)) {
  2694. print ".long\t",join(',',map { sprintf "0x%08x",$_} @line),"\n";
  2695. }
  2696. print <<___;
  2697. .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
  2698. ___
  2699. $code =~ s/\`([^\`]*)\`/eval $1/gem;
  2700. print $code;
  2701. close STDOUT;