2
0

ecp_nistz256-sparcv9.pl 76 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061
  1. #! /usr/bin/env perl
  2. # Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. # ====================================================================
  9. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  10. # project. The module is, however, dual licensed under OpenSSL and
  11. # CRYPTOGAMS licenses depending on where you obtain it. For further
  12. # details see http://www.openssl.org/~appro/cryptogams/.
  13. # ====================================================================
  14. #
  15. # ECP_NISTZ256 module for SPARCv9.
  16. #
  17. # February 2015.
  18. #
  19. # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
  20. # http://eprint.iacr.org/2013/816. In the process of adaptation
  21. # original .c module was made 32-bit savvy in order to make this
  22. # implementation possible.
  23. #
  24. # with/without -DECP_NISTZ256_ASM
  25. # UltraSPARC III +12-18%
  26. # SPARC T4 +99-550% (+66-150% on 32-bit Solaris)
  27. #
  28. # Ranges denote minimum and maximum improvement coefficients depending
  29. # on benchmark. Lower coefficients are for ECDSA sign, server-side
  30. # operation. Keep in mind that +200% means 3x improvement.
  31. $output = pop;
  32. open STDOUT,">$output";
  33. $code.=<<___;
  34. #include "sparc_arch.h"
  35. #define LOCALS (STACK_BIAS+STACK_FRAME)
  36. #ifdef __arch64__
  37. .register %g2,#scratch
  38. .register %g3,#scratch
  39. # define STACK64_FRAME STACK_FRAME
  40. # define LOCALS64 LOCALS
  41. #else
  42. # define STACK64_FRAME (2047+192)
  43. # define LOCALS64 STACK64_FRAME
  44. #endif
  45. .section ".text",#alloc,#execinstr
  46. ___
  47. ########################################################################
  48. # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
  49. #
  50. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  51. open TABLE,"<ecp_nistz256_table.c" or
  52. open TABLE,"<${dir}../ecp_nistz256_table.c" or
  53. die "failed to open ecp_nistz256_table.c:",$!;
  54. use integer;
  55. foreach(<TABLE>) {
  56. s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
  57. }
  58. close TABLE;
  59. # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
  60. # 64*16*37-1 is because $#arr returns last valid index or @arr, not
  61. # amount of elements.
  62. die "insane number of elements" if ($#arr != 64*16*37-1);
  63. $code.=<<___;
  64. .globl ecp_nistz256_precomputed
  65. .align 4096
  66. ecp_nistz256_precomputed:
  67. ___
  68. ########################################################################
  69. # this conversion smashes P256_POINT_AFFINE by individual bytes with
  70. # 64 byte interval, similar to
  71. # 1111222233334444
  72. # 1234123412341234
  73. for(1..37) {
  74. @tbl = splice(@arr,0,64*16);
  75. for($i=0;$i<64;$i++) {
  76. undef @line;
  77. for($j=0;$j<64;$j++) {
  78. push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
  79. }
  80. $code.=".byte\t";
  81. $code.=join(',',map { sprintf "0x%02x",$_} @line);
  82. $code.="\n";
  83. }
  84. }
  85. {{{
  86. my ($rp,$ap,$bp)=map("%i$_",(0..2));
  87. my @acc=map("%l$_",(0..7));
  88. my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7)=(map("%o$_",(0..5)),"%g4","%g5");
  89. my ($bi,$a0,$mask,$carry)=(map("%i$_",(3..5)),"%g1");
  90. my ($rp_real,$ap_real)=("%g2","%g3");
  91. $code.=<<___;
  92. .type ecp_nistz256_precomputed,#object
  93. .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
  94. .align 64
  95. .LRR: ! 2^512 mod P precomputed for NIST P256 polynomial
  96. .long 0x00000003, 0x00000000, 0xffffffff, 0xfffffffb
  97. .long 0xfffffffe, 0xffffffff, 0xfffffffd, 0x00000004
  98. .Lone:
  99. .long 1,0,0,0,0,0,0,0
  100. .asciz "ECP_NISTZ256 for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
  101. ! void ecp_nistz256_to_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
  102. .globl ecp_nistz256_to_mont
  103. .align 64
  104. ecp_nistz256_to_mont:
  105. save %sp,-STACK_FRAME,%sp
  106. nop
  107. 1: call .+8
  108. add %o7,.LRR-1b,$bp
  109. call __ecp_nistz256_mul_mont
  110. nop
  111. ret
  112. restore
  113. .type ecp_nistz256_to_mont,#function
  114. .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
  115. ! void ecp_nistz256_from_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
  116. .globl ecp_nistz256_from_mont
  117. .align 32
  118. ecp_nistz256_from_mont:
  119. save %sp,-STACK_FRAME,%sp
  120. nop
  121. 1: call .+8
  122. add %o7,.Lone-1b,$bp
  123. call __ecp_nistz256_mul_mont
  124. nop
  125. ret
  126. restore
  127. .type ecp_nistz256_from_mont,#function
  128. .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
  129. ! void ecp_nistz256_mul_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8],
  130. ! const BN_ULONG %i2[8]);
  131. .globl ecp_nistz256_mul_mont
  132. .align 32
  133. ecp_nistz256_mul_mont:
  134. save %sp,-STACK_FRAME,%sp
  135. nop
  136. call __ecp_nistz256_mul_mont
  137. nop
  138. ret
  139. restore
  140. .type ecp_nistz256_mul_mont,#function
  141. .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
  142. ! void ecp_nistz256_sqr_mont(BN_ULONG %i0[8],const BN_ULONG %i2[8]);
  143. .globl ecp_nistz256_sqr_mont
  144. .align 32
  145. ecp_nistz256_sqr_mont:
  146. save %sp,-STACK_FRAME,%sp
  147. mov $ap,$bp
  148. call __ecp_nistz256_mul_mont
  149. nop
  150. ret
  151. restore
  152. .type ecp_nistz256_sqr_mont,#function
  153. .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
  154. ___
  155. ########################################################################
  156. # Special thing to keep in mind is that $t0-$t7 hold 64-bit values,
  157. # while all others are meant to keep 32. "Meant to" means that additions
  158. # to @acc[0-7] do "contaminate" upper bits, but they are cleared before
  159. # they can affect outcome (follow 'and' with $mask). Also keep in mind
  160. # that addition with carry is addition with 32-bit carry, even though
  161. # CPU is 64-bit. [Addition with 64-bit carry was introduced in T3, see
  162. # below for VIS3 code paths.]
  163. $code.=<<___;
  164. .align 32
  165. __ecp_nistz256_mul_mont:
  166. ld [$bp+0],$bi ! b[0]
  167. mov -1,$mask
  168. ld [$ap+0],$a0
  169. srl $mask,0,$mask ! 0xffffffff
  170. ld [$ap+4],$t1
  171. ld [$ap+8],$t2
  172. ld [$ap+12],$t3
  173. ld [$ap+16],$t4
  174. ld [$ap+20],$t5
  175. ld [$ap+24],$t6
  176. ld [$ap+28],$t7
  177. mulx $a0,$bi,$t0 ! a[0-7]*b[0], 64-bit results
  178. mulx $t1,$bi,$t1
  179. mulx $t2,$bi,$t2
  180. mulx $t3,$bi,$t3
  181. mulx $t4,$bi,$t4
  182. mulx $t5,$bi,$t5
  183. mulx $t6,$bi,$t6
  184. mulx $t7,$bi,$t7
  185. srlx $t0,32,@acc[1] ! extract high parts
  186. srlx $t1,32,@acc[2]
  187. srlx $t2,32,@acc[3]
  188. srlx $t3,32,@acc[4]
  189. srlx $t4,32,@acc[5]
  190. srlx $t5,32,@acc[6]
  191. srlx $t6,32,@acc[7]
  192. srlx $t7,32,@acc[0] ! "@acc[8]"
  193. mov 0,$carry
  194. ___
  195. for($i=1;$i<8;$i++) {
  196. $code.=<<___;
  197. addcc @acc[1],$t1,@acc[1] ! accumulate high parts
  198. ld [$bp+4*$i],$bi ! b[$i]
  199. ld [$ap+4],$t1 ! re-load a[1-7]
  200. addccc @acc[2],$t2,@acc[2]
  201. addccc @acc[3],$t3,@acc[3]
  202. ld [$ap+8],$t2
  203. ld [$ap+12],$t3
  204. addccc @acc[4],$t4,@acc[4]
  205. addccc @acc[5],$t5,@acc[5]
  206. ld [$ap+16],$t4
  207. ld [$ap+20],$t5
  208. addccc @acc[6],$t6,@acc[6]
  209. addccc @acc[7],$t7,@acc[7]
  210. ld [$ap+24],$t6
  211. ld [$ap+28],$t7
  212. addccc @acc[0],$carry,@acc[0] ! "@acc[8]"
  213. addc %g0,%g0,$carry
  214. ___
  215. # Reduction iteration is normally performed by accumulating
  216. # result of multiplication of modulus by "magic" digit [and
  217. # omitting least significant word, which is guaranteed to
  218. # be 0], but thanks to special form of modulus and "magic"
  219. # digit being equal to least significant word, it can be
  220. # performed with additions and subtractions alone. Indeed:
  221. #
  222. # ffff.0001.0000.0000.0000.ffff.ffff.ffff
  223. # * abcd
  224. # + xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
  225. #
  226. # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
  227. # rewrite above as:
  228. #
  229. # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
  230. # + abcd.0000.abcd.0000.0000.abcd.0000.0000.0000
  231. # - abcd.0000.0000.0000.0000.0000.0000.abcd
  232. #
  233. # or marking redundant operations:
  234. #
  235. # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.----
  236. # + abcd.0000.abcd.0000.0000.abcd.----.----.----
  237. # - abcd.----.----.----.----.----.----.----
  238. $code.=<<___;
  239. ! multiplication-less reduction
  240. addcc @acc[3],$t0,@acc[3] ! r[3]+=r[0]
  241. addccc @acc[4],%g0,@acc[4] ! r[4]+=0
  242. and @acc[1],$mask,@acc[1]
  243. and @acc[2],$mask,@acc[2]
  244. addccc @acc[5],%g0,@acc[5] ! r[5]+=0
  245. addccc @acc[6],$t0,@acc[6] ! r[6]+=r[0]
  246. and @acc[3],$mask,@acc[3]
  247. and @acc[4],$mask,@acc[4]
  248. addccc @acc[7],%g0,@acc[7] ! r[7]+=0
  249. addccc @acc[0],$t0,@acc[0] ! r[8]+=r[0] "@acc[8]"
  250. and @acc[5],$mask,@acc[5]
  251. and @acc[6],$mask,@acc[6]
  252. addc $carry,%g0,$carry ! top-most carry
  253. subcc @acc[7],$t0,@acc[7] ! r[7]-=r[0]
  254. subccc @acc[0],%g0,@acc[0] ! r[8]-=0 "@acc[8]"
  255. subc $carry,%g0,$carry ! top-most carry
  256. and @acc[7],$mask,@acc[7]
  257. and @acc[0],$mask,@acc[0] ! "@acc[8]"
  258. ___
  259. push(@acc,shift(@acc)); # rotate registers to "omit" acc[0]
  260. $code.=<<___;
  261. mulx $a0,$bi,$t0 ! a[0-7]*b[$i], 64-bit results
  262. mulx $t1,$bi,$t1
  263. mulx $t2,$bi,$t2
  264. mulx $t3,$bi,$t3
  265. mulx $t4,$bi,$t4
  266. mulx $t5,$bi,$t5
  267. mulx $t6,$bi,$t6
  268. mulx $t7,$bi,$t7
  269. add @acc[0],$t0,$t0 ! accumulate low parts, can't overflow
  270. add @acc[1],$t1,$t1
  271. srlx $t0,32,@acc[1] ! extract high parts
  272. add @acc[2],$t2,$t2
  273. srlx $t1,32,@acc[2]
  274. add @acc[3],$t3,$t3
  275. srlx $t2,32,@acc[3]
  276. add @acc[4],$t4,$t4
  277. srlx $t3,32,@acc[4]
  278. add @acc[5],$t5,$t5
  279. srlx $t4,32,@acc[5]
  280. add @acc[6],$t6,$t6
  281. srlx $t5,32,@acc[6]
  282. add @acc[7],$t7,$t7
  283. srlx $t6,32,@acc[7]
  284. srlx $t7,32,@acc[0] ! "@acc[8]"
  285. ___
  286. }
  287. $code.=<<___;
  288. addcc @acc[1],$t1,@acc[1] ! accumulate high parts
  289. addccc @acc[2],$t2,@acc[2]
  290. addccc @acc[3],$t3,@acc[3]
  291. addccc @acc[4],$t4,@acc[4]
  292. addccc @acc[5],$t5,@acc[5]
  293. addccc @acc[6],$t6,@acc[6]
  294. addccc @acc[7],$t7,@acc[7]
  295. addccc @acc[0],$carry,@acc[0] ! "@acc[8]"
  296. addc %g0,%g0,$carry
  297. addcc @acc[3],$t0,@acc[3] ! multiplication-less reduction
  298. addccc @acc[4],%g0,@acc[4]
  299. addccc @acc[5],%g0,@acc[5]
  300. addccc @acc[6],$t0,@acc[6]
  301. addccc @acc[7],%g0,@acc[7]
  302. addccc @acc[0],$t0,@acc[0] ! "@acc[8]"
  303. addc $carry,%g0,$carry
  304. subcc @acc[7],$t0,@acc[7]
  305. subccc @acc[0],%g0,@acc[0] ! "@acc[8]"
  306. subc $carry,%g0,$carry ! top-most carry
  307. ___
  308. push(@acc,shift(@acc)); # rotate registers to omit acc[0]
  309. $code.=<<___;
  310. ! Final step is "if result > mod, subtract mod", but we do it
  311. ! "other way around", namely subtract modulus from result
  312. ! and if it borrowed, add modulus back.
  313. subcc @acc[0],-1,@acc[0] ! subtract modulus
  314. subccc @acc[1],-1,@acc[1]
  315. subccc @acc[2],-1,@acc[2]
  316. subccc @acc[3],0,@acc[3]
  317. subccc @acc[4],0,@acc[4]
  318. subccc @acc[5],0,@acc[5]
  319. subccc @acc[6],1,@acc[6]
  320. subccc @acc[7],-1,@acc[7]
  321. subc $carry,0,$carry ! broadcast borrow bit
  322. ! Note that because mod has special form, i.e. consists of
  323. ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
  324. ! using value of broadcasted borrow and the borrow bit itself.
  325. ! To minimize dependency chain we first broadcast and then
  326. ! extract the bit by negating (follow $bi).
  327. addcc @acc[0],$carry,@acc[0] ! add modulus or zero
  328. addccc @acc[1],$carry,@acc[1]
  329. neg $carry,$bi
  330. st @acc[0],[$rp]
  331. addccc @acc[2],$carry,@acc[2]
  332. st @acc[1],[$rp+4]
  333. addccc @acc[3],0,@acc[3]
  334. st @acc[2],[$rp+8]
  335. addccc @acc[4],0,@acc[4]
  336. st @acc[3],[$rp+12]
  337. addccc @acc[5],0,@acc[5]
  338. st @acc[4],[$rp+16]
  339. addccc @acc[6],$bi,@acc[6]
  340. st @acc[5],[$rp+20]
  341. addc @acc[7],$carry,@acc[7]
  342. st @acc[6],[$rp+24]
  343. retl
  344. st @acc[7],[$rp+28]
  345. .type __ecp_nistz256_mul_mont,#function
  346. .size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
  347. ! void ecp_nistz256_add(BN_ULONG %i0[8],const BN_ULONG %i1[8],
  348. ! const BN_ULONG %i2[8]);
  349. .globl ecp_nistz256_add
  350. .align 32
  351. ecp_nistz256_add:
  352. save %sp,-STACK_FRAME,%sp
  353. ld [$ap],@acc[0]
  354. ld [$ap+4],@acc[1]
  355. ld [$ap+8],@acc[2]
  356. ld [$ap+12],@acc[3]
  357. ld [$ap+16],@acc[4]
  358. ld [$ap+20],@acc[5]
  359. ld [$ap+24],@acc[6]
  360. call __ecp_nistz256_add
  361. ld [$ap+28],@acc[7]
  362. ret
  363. restore
  364. .type ecp_nistz256_add,#function
  365. .size ecp_nistz256_add,.-ecp_nistz256_add
  366. .align 32
  367. __ecp_nistz256_add:
  368. ld [$bp+0],$t0 ! b[0]
  369. ld [$bp+4],$t1
  370. ld [$bp+8],$t2
  371. ld [$bp+12],$t3
  372. addcc @acc[0],$t0,@acc[0]
  373. ld [$bp+16],$t4
  374. ld [$bp+20],$t5
  375. addccc @acc[1],$t1,@acc[1]
  376. ld [$bp+24],$t6
  377. ld [$bp+28],$t7
  378. addccc @acc[2],$t2,@acc[2]
  379. addccc @acc[3],$t3,@acc[3]
  380. addccc @acc[4],$t4,@acc[4]
  381. addccc @acc[5],$t5,@acc[5]
  382. addccc @acc[6],$t6,@acc[6]
  383. addccc @acc[7],$t7,@acc[7]
  384. addc %g0,%g0,$carry
  385. .Lreduce_by_sub:
  386. ! if a+b >= modulus, subtract modulus.
  387. !
  388. ! But since comparison implies subtraction, we subtract
  389. ! modulus and then add it back if subtraction borrowed.
  390. subcc @acc[0],-1,@acc[0]
  391. subccc @acc[1],-1,@acc[1]
  392. subccc @acc[2],-1,@acc[2]
  393. subccc @acc[3], 0,@acc[3]
  394. subccc @acc[4], 0,@acc[4]
  395. subccc @acc[5], 0,@acc[5]
  396. subccc @acc[6], 1,@acc[6]
  397. subccc @acc[7],-1,@acc[7]
  398. subc $carry,0,$carry
  399. ! Note that because mod has special form, i.e. consists of
  400. ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
  401. ! using value of borrow and its negative.
  402. addcc @acc[0],$carry,@acc[0] ! add synthesized modulus
  403. addccc @acc[1],$carry,@acc[1]
  404. neg $carry,$bi
  405. st @acc[0],[$rp]
  406. addccc @acc[2],$carry,@acc[2]
  407. st @acc[1],[$rp+4]
  408. addccc @acc[3],0,@acc[3]
  409. st @acc[2],[$rp+8]
  410. addccc @acc[4],0,@acc[4]
  411. st @acc[3],[$rp+12]
  412. addccc @acc[5],0,@acc[5]
  413. st @acc[4],[$rp+16]
  414. addccc @acc[6],$bi,@acc[6]
  415. st @acc[5],[$rp+20]
  416. addc @acc[7],$carry,@acc[7]
  417. st @acc[6],[$rp+24]
  418. retl
  419. st @acc[7],[$rp+28]
  420. .type __ecp_nistz256_add,#function
  421. .size __ecp_nistz256_add,.-__ecp_nistz256_add
  422. ! void ecp_nistz256_mul_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
  423. .globl ecp_nistz256_mul_by_2
  424. .align 32
  425. ecp_nistz256_mul_by_2:
  426. save %sp,-STACK_FRAME,%sp
  427. ld [$ap],@acc[0]
  428. ld [$ap+4],@acc[1]
  429. ld [$ap+8],@acc[2]
  430. ld [$ap+12],@acc[3]
  431. ld [$ap+16],@acc[4]
  432. ld [$ap+20],@acc[5]
  433. ld [$ap+24],@acc[6]
  434. call __ecp_nistz256_mul_by_2
  435. ld [$ap+28],@acc[7]
  436. ret
  437. restore
  438. .type ecp_nistz256_mul_by_2,#function
  439. .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
  440. .align 32
  441. __ecp_nistz256_mul_by_2:
  442. addcc @acc[0],@acc[0],@acc[0] ! a+a=2*a
  443. addccc @acc[1],@acc[1],@acc[1]
  444. addccc @acc[2],@acc[2],@acc[2]
  445. addccc @acc[3],@acc[3],@acc[3]
  446. addccc @acc[4],@acc[4],@acc[4]
  447. addccc @acc[5],@acc[5],@acc[5]
  448. addccc @acc[6],@acc[6],@acc[6]
  449. addccc @acc[7],@acc[7],@acc[7]
  450. b .Lreduce_by_sub
  451. addc %g0,%g0,$carry
  452. .type __ecp_nistz256_mul_by_2,#function
  453. .size __ecp_nistz256_mul_by_2,.-__ecp_nistz256_mul_by_2
  454. ! void ecp_nistz256_mul_by_3(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
  455. .globl ecp_nistz256_mul_by_3
  456. .align 32
  457. ecp_nistz256_mul_by_3:
  458. save %sp,-STACK_FRAME,%sp
  459. ld [$ap],@acc[0]
  460. ld [$ap+4],@acc[1]
  461. ld [$ap+8],@acc[2]
  462. ld [$ap+12],@acc[3]
  463. ld [$ap+16],@acc[4]
  464. ld [$ap+20],@acc[5]
  465. ld [$ap+24],@acc[6]
  466. call __ecp_nistz256_mul_by_3
  467. ld [$ap+28],@acc[7]
  468. ret
  469. restore
  470. .type ecp_nistz256_mul_by_3,#function
  471. .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
  472. .align 32
  473. __ecp_nistz256_mul_by_3:
  474. addcc @acc[0],@acc[0],$t0 ! a+a=2*a
  475. addccc @acc[1],@acc[1],$t1
  476. addccc @acc[2],@acc[2],$t2
  477. addccc @acc[3],@acc[3],$t3
  478. addccc @acc[4],@acc[4],$t4
  479. addccc @acc[5],@acc[5],$t5
  480. addccc @acc[6],@acc[6],$t6
  481. addccc @acc[7],@acc[7],$t7
  482. addc %g0,%g0,$carry
  483. subcc $t0,-1,$t0 ! .Lreduce_by_sub but without stores
  484. subccc $t1,-1,$t1
  485. subccc $t2,-1,$t2
  486. subccc $t3, 0,$t3
  487. subccc $t4, 0,$t4
  488. subccc $t5, 0,$t5
  489. subccc $t6, 1,$t6
  490. subccc $t7,-1,$t7
  491. subc $carry,0,$carry
  492. addcc $t0,$carry,$t0 ! add synthesized modulus
  493. addccc $t1,$carry,$t1
  494. neg $carry,$bi
  495. addccc $t2,$carry,$t2
  496. addccc $t3,0,$t3
  497. addccc $t4,0,$t4
  498. addccc $t5,0,$t5
  499. addccc $t6,$bi,$t6
  500. addc $t7,$carry,$t7
  501. addcc $t0,@acc[0],@acc[0] ! 2*a+a=3*a
  502. addccc $t1,@acc[1],@acc[1]
  503. addccc $t2,@acc[2],@acc[2]
  504. addccc $t3,@acc[3],@acc[3]
  505. addccc $t4,@acc[4],@acc[4]
  506. addccc $t5,@acc[5],@acc[5]
  507. addccc $t6,@acc[6],@acc[6]
  508. addccc $t7,@acc[7],@acc[7]
  509. b .Lreduce_by_sub
  510. addc %g0,%g0,$carry
  511. .type __ecp_nistz256_mul_by_3,#function
  512. .size __ecp_nistz256_mul_by_3,.-__ecp_nistz256_mul_by_3
  513. ! void ecp_nistz256_sub(BN_ULONG %i0[8],const BN_ULONG %i1[8],
  514. ! const BN_ULONG %i2[8]);
  515. .globl ecp_nistz256_sub
  516. .align 32
  517. ecp_nistz256_sub:
  518. save %sp,-STACK_FRAME,%sp
  519. ld [$ap],@acc[0]
  520. ld [$ap+4],@acc[1]
  521. ld [$ap+8],@acc[2]
  522. ld [$ap+12],@acc[3]
  523. ld [$ap+16],@acc[4]
  524. ld [$ap+20],@acc[5]
  525. ld [$ap+24],@acc[6]
  526. call __ecp_nistz256_sub_from
  527. ld [$ap+28],@acc[7]
  528. ret
  529. restore
  530. .type ecp_nistz256_sub,#function
  531. .size ecp_nistz256_sub,.-ecp_nistz256_sub
  532. ! void ecp_nistz256_neg(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
  533. .globl ecp_nistz256_neg
  534. .align 32
  535. ecp_nistz256_neg:
  536. save %sp,-STACK_FRAME,%sp
  537. mov $ap,$bp
  538. mov 0,@acc[0]
  539. mov 0,@acc[1]
  540. mov 0,@acc[2]
  541. mov 0,@acc[3]
  542. mov 0,@acc[4]
  543. mov 0,@acc[5]
  544. mov 0,@acc[6]
  545. call __ecp_nistz256_sub_from
  546. mov 0,@acc[7]
  547. ret
  548. restore
  549. .type ecp_nistz256_neg,#function
  550. .size ecp_nistz256_neg,.-ecp_nistz256_neg
  551. .align 32
  552. __ecp_nistz256_sub_from:
  553. ld [$bp+0],$t0 ! b[0]
  554. ld [$bp+4],$t1
  555. ld [$bp+8],$t2
  556. ld [$bp+12],$t3
  557. subcc @acc[0],$t0,@acc[0]
  558. ld [$bp+16],$t4
  559. ld [$bp+20],$t5
  560. subccc @acc[1],$t1,@acc[1]
  561. subccc @acc[2],$t2,@acc[2]
  562. ld [$bp+24],$t6
  563. ld [$bp+28],$t7
  564. subccc @acc[3],$t3,@acc[3]
  565. subccc @acc[4],$t4,@acc[4]
  566. subccc @acc[5],$t5,@acc[5]
  567. subccc @acc[6],$t6,@acc[6]
  568. subccc @acc[7],$t7,@acc[7]
  569. subc %g0,%g0,$carry ! broadcast borrow bit
  570. .Lreduce_by_add:
  571. ! if a-b borrows, add modulus.
  572. !
  573. ! Note that because mod has special form, i.e. consists of
  574. ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
  575. ! using value of broadcasted borrow and the borrow bit itself.
  576. ! To minimize dependency chain we first broadcast and then
  577. ! extract the bit by negating (follow $bi).
  578. addcc @acc[0],$carry,@acc[0] ! add synthesized modulus
  579. addccc @acc[1],$carry,@acc[1]
  580. neg $carry,$bi
  581. st @acc[0],[$rp]
  582. addccc @acc[2],$carry,@acc[2]
  583. st @acc[1],[$rp+4]
  584. addccc @acc[3],0,@acc[3]
  585. st @acc[2],[$rp+8]
  586. addccc @acc[4],0,@acc[4]
  587. st @acc[3],[$rp+12]
  588. addccc @acc[5],0,@acc[5]
  589. st @acc[4],[$rp+16]
  590. addccc @acc[6],$bi,@acc[6]
  591. st @acc[5],[$rp+20]
  592. addc @acc[7],$carry,@acc[7]
  593. st @acc[6],[$rp+24]
  594. retl
  595. st @acc[7],[$rp+28]
  596. .type __ecp_nistz256_sub_from,#function
  597. .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
  598. .align 32
  599. __ecp_nistz256_sub_morf:
  600. ld [$bp+0],$t0 ! b[0]
  601. ld [$bp+4],$t1
  602. ld [$bp+8],$t2
  603. ld [$bp+12],$t3
  604. subcc $t0,@acc[0],@acc[0]
  605. ld [$bp+16],$t4
  606. ld [$bp+20],$t5
  607. subccc $t1,@acc[1],@acc[1]
  608. subccc $t2,@acc[2],@acc[2]
  609. ld [$bp+24],$t6
  610. ld [$bp+28],$t7
  611. subccc $t3,@acc[3],@acc[3]
  612. subccc $t4,@acc[4],@acc[4]
  613. subccc $t5,@acc[5],@acc[5]
  614. subccc $t6,@acc[6],@acc[6]
  615. subccc $t7,@acc[7],@acc[7]
  616. b .Lreduce_by_add
  617. subc %g0,%g0,$carry ! broadcast borrow bit
  618. .type __ecp_nistz256_sub_morf,#function
  619. .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
  620. ! void ecp_nistz256_div_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
  621. .globl ecp_nistz256_div_by_2
  622. .align 32
  623. ecp_nistz256_div_by_2:
  624. save %sp,-STACK_FRAME,%sp
  625. ld [$ap],@acc[0]
  626. ld [$ap+4],@acc[1]
  627. ld [$ap+8],@acc[2]
  628. ld [$ap+12],@acc[3]
  629. ld [$ap+16],@acc[4]
  630. ld [$ap+20],@acc[5]
  631. ld [$ap+24],@acc[6]
  632. call __ecp_nistz256_div_by_2
  633. ld [$ap+28],@acc[7]
  634. ret
  635. restore
  636. .type ecp_nistz256_div_by_2,#function
  637. .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
  638. .align 32
  639. __ecp_nistz256_div_by_2:
  640. ! ret = (a is odd ? a+mod : a) >> 1
  641. and @acc[0],1,$bi
  642. neg $bi,$carry
  643. addcc @acc[0],$carry,@acc[0]
  644. addccc @acc[1],$carry,@acc[1]
  645. addccc @acc[2],$carry,@acc[2]
  646. addccc @acc[3],0,@acc[3]
  647. addccc @acc[4],0,@acc[4]
  648. addccc @acc[5],0,@acc[5]
  649. addccc @acc[6],$bi,@acc[6]
  650. addccc @acc[7],$carry,@acc[7]
  651. addc %g0,%g0,$carry
  652. ! ret >>= 1
  653. srl @acc[0],1,@acc[0]
  654. sll @acc[1],31,$t0
  655. srl @acc[1],1,@acc[1]
  656. or @acc[0],$t0,@acc[0]
  657. sll @acc[2],31,$t1
  658. srl @acc[2],1,@acc[2]
  659. or @acc[1],$t1,@acc[1]
  660. sll @acc[3],31,$t2
  661. st @acc[0],[$rp]
  662. srl @acc[3],1,@acc[3]
  663. or @acc[2],$t2,@acc[2]
  664. sll @acc[4],31,$t3
  665. st @acc[1],[$rp+4]
  666. srl @acc[4],1,@acc[4]
  667. or @acc[3],$t3,@acc[3]
  668. sll @acc[5],31,$t4
  669. st @acc[2],[$rp+8]
  670. srl @acc[5],1,@acc[5]
  671. or @acc[4],$t4,@acc[4]
  672. sll @acc[6],31,$t5
  673. st @acc[3],[$rp+12]
  674. srl @acc[6],1,@acc[6]
  675. or @acc[5],$t5,@acc[5]
  676. sll @acc[7],31,$t6
  677. st @acc[4],[$rp+16]
  678. srl @acc[7],1,@acc[7]
  679. or @acc[6],$t6,@acc[6]
  680. sll $carry,31,$t7
  681. st @acc[5],[$rp+20]
  682. or @acc[7],$t7,@acc[7]
  683. st @acc[6],[$rp+24]
  684. retl
  685. st @acc[7],[$rp+28]
  686. .type __ecp_nistz256_div_by_2,#function
  687. .size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
  688. ___
  689. ########################################################################
  690. # following subroutines are "literal" implementation of those found in
  691. # ecp_nistz256.c
  692. #
  693. ########################################################################
  694. # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
  695. #
  696. {
  697. my ($S,$M,$Zsqr,$tmp0)=map(32*$_,(0..3));
  698. # above map() describes stack layout with 4 temporary
  699. # 256-bit vectors on top.
  700. $code.=<<___;
  701. #ifdef __PIC__
  702. SPARC_PIC_THUNK(%g1)
  703. #endif
  704. .globl ecp_nistz256_point_double
  705. .align 32
  706. ecp_nistz256_point_double:
  707. SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
  708. ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
  709. and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
  710. cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
  711. be ecp_nistz256_point_double_vis3
  712. nop
  713. save %sp,-STACK_FRAME-32*4,%sp
  714. mov $rp,$rp_real
  715. mov $ap,$ap_real
  716. .Lpoint_double_shortcut:
  717. ld [$ap+32],@acc[0]
  718. ld [$ap+32+4],@acc[1]
  719. ld [$ap+32+8],@acc[2]
  720. ld [$ap+32+12],@acc[3]
  721. ld [$ap+32+16],@acc[4]
  722. ld [$ap+32+20],@acc[5]
  723. ld [$ap+32+24],@acc[6]
  724. ld [$ap+32+28],@acc[7]
  725. call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(S, in_y);
  726. add %sp,LOCALS+$S,$rp
  727. add $ap_real,64,$bp
  728. add $ap_real,64,$ap
  729. call __ecp_nistz256_mul_mont ! p256_sqr_mont(Zsqr, in_z);
  730. add %sp,LOCALS+$Zsqr,$rp
  731. add $ap_real,0,$bp
  732. call __ecp_nistz256_add ! p256_add(M, Zsqr, in_x);
  733. add %sp,LOCALS+$M,$rp
  734. add %sp,LOCALS+$S,$bp
  735. add %sp,LOCALS+$S,$ap
  736. call __ecp_nistz256_mul_mont ! p256_sqr_mont(S, S);
  737. add %sp,LOCALS+$S,$rp
  738. ld [$ap_real],@acc[0]
  739. add %sp,LOCALS+$Zsqr,$bp
  740. ld [$ap_real+4],@acc[1]
  741. ld [$ap_real+8],@acc[2]
  742. ld [$ap_real+12],@acc[3]
  743. ld [$ap_real+16],@acc[4]
  744. ld [$ap_real+20],@acc[5]
  745. ld [$ap_real+24],@acc[6]
  746. ld [$ap_real+28],@acc[7]
  747. call __ecp_nistz256_sub_from ! p256_sub(Zsqr, in_x, Zsqr);
  748. add %sp,LOCALS+$Zsqr,$rp
  749. add $ap_real,32,$bp
  750. add $ap_real,64,$ap
  751. call __ecp_nistz256_mul_mont ! p256_mul_mont(tmp0, in_z, in_y);
  752. add %sp,LOCALS+$tmp0,$rp
  753. call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(res_z, tmp0);
  754. add $rp_real,64,$rp
  755. add %sp,LOCALS+$Zsqr,$bp
  756. add %sp,LOCALS+$M,$ap
  757. call __ecp_nistz256_mul_mont ! p256_mul_mont(M, M, Zsqr);
  758. add %sp,LOCALS+$M,$rp
  759. call __ecp_nistz256_mul_by_3 ! p256_mul_by_3(M, M);
  760. add %sp,LOCALS+$M,$rp
  761. add %sp,LOCALS+$S,$bp
  762. add %sp,LOCALS+$S,$ap
  763. call __ecp_nistz256_mul_mont ! p256_sqr_mont(tmp0, S);
  764. add %sp,LOCALS+$tmp0,$rp
  765. call __ecp_nistz256_div_by_2 ! p256_div_by_2(res_y, tmp0);
  766. add $rp_real,32,$rp
  767. add $ap_real,0,$bp
  768. add %sp,LOCALS+$S,$ap
  769. call __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, in_x);
  770. add %sp,LOCALS+$S,$rp
  771. call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(tmp0, S);
  772. add %sp,LOCALS+$tmp0,$rp
  773. add %sp,LOCALS+$M,$bp
  774. add %sp,LOCALS+$M,$ap
  775. call __ecp_nistz256_mul_mont ! p256_sqr_mont(res_x, M);
  776. add $rp_real,0,$rp
  777. add %sp,LOCALS+$tmp0,$bp
  778. call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, tmp0);
  779. add $rp_real,0,$rp
  780. add %sp,LOCALS+$S,$bp
  781. call __ecp_nistz256_sub_morf ! p256_sub(S, S, res_x);
  782. add %sp,LOCALS+$S,$rp
  783. add %sp,LOCALS+$M,$bp
  784. add %sp,LOCALS+$S,$ap
  785. call __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, M);
  786. add %sp,LOCALS+$S,$rp
  787. add $rp_real,32,$bp
  788. call __ecp_nistz256_sub_from ! p256_sub(res_y, S, res_y);
  789. add $rp_real,32,$rp
  790. ret
  791. restore
  792. .type ecp_nistz256_point_double,#function
  793. .size ecp_nistz256_point_double,.-ecp_nistz256_point_double
  794. ___
  795. }
  796. ########################################################################
  797. # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
  798. # const P256_POINT *in2);
  799. {
  800. my ($res_x,$res_y,$res_z,
  801. $H,$Hsqr,$R,$Rsqr,$Hcub,
  802. $U1,$U2,$S1,$S2)=map(32*$_,(0..11));
  803. my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
  804. # above map() describes stack layout with 12 temporary
  805. # 256-bit vectors on top. Then we reserve some space for
  806. # !in1infty, !in2infty, result of check for zero and return pointer.
  807. my $bp_real=$rp_real;
  808. $code.=<<___;
  809. .globl ecp_nistz256_point_add
  810. .align 32
  811. ecp_nistz256_point_add:
  812. SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
  813. ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
  814. and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
  815. cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
  816. be ecp_nistz256_point_add_vis3
  817. nop
  818. save %sp,-STACK_FRAME-32*12-32,%sp
  819. stx $rp,[%fp+STACK_BIAS-8] ! off-load $rp
  820. mov $ap,$ap_real
  821. mov $bp,$bp_real
  822. ld [$bp+64],$t0 ! in2_z
  823. ld [$bp+64+4],$t1
  824. ld [$bp+64+8],$t2
  825. ld [$bp+64+12],$t3
  826. ld [$bp+64+16],$t4
  827. ld [$bp+64+20],$t5
  828. ld [$bp+64+24],$t6
  829. ld [$bp+64+28],$t7
  830. or $t1,$t0,$t0
  831. or $t3,$t2,$t2
  832. or $t5,$t4,$t4
  833. or $t7,$t6,$t6
  834. or $t2,$t0,$t0
  835. or $t6,$t4,$t4
  836. or $t4,$t0,$t0 ! !in2infty
  837. movrnz $t0,-1,$t0
  838. st $t0,[%fp+STACK_BIAS-12]
  839. ld [$ap+64],$t0 ! in1_z
  840. ld [$ap+64+4],$t1
  841. ld [$ap+64+8],$t2
  842. ld [$ap+64+12],$t3
  843. ld [$ap+64+16],$t4
  844. ld [$ap+64+20],$t5
  845. ld [$ap+64+24],$t6
  846. ld [$ap+64+28],$t7
  847. or $t1,$t0,$t0
  848. or $t3,$t2,$t2
  849. or $t5,$t4,$t4
  850. or $t7,$t6,$t6
  851. or $t2,$t0,$t0
  852. or $t6,$t4,$t4
  853. or $t4,$t0,$t0 ! !in1infty
  854. movrnz $t0,-1,$t0
  855. st $t0,[%fp+STACK_BIAS-16]
  856. add $bp_real,64,$bp
  857. add $bp_real,64,$ap
  858. call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z2sqr, in2_z);
  859. add %sp,LOCALS+$Z2sqr,$rp
  860. add $ap_real,64,$bp
  861. add $ap_real,64,$ap
  862. call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
  863. add %sp,LOCALS+$Z1sqr,$rp
  864. add $bp_real,64,$bp
  865. add %sp,LOCALS+$Z2sqr,$ap
  866. call __ecp_nistz256_mul_mont ! p256_mul_mont(S1, Z2sqr, in2_z);
  867. add %sp,LOCALS+$S1,$rp
  868. add $ap_real,64,$bp
  869. add %sp,LOCALS+$Z1sqr,$ap
  870. call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
  871. add %sp,LOCALS+$S2,$rp
  872. add $ap_real,32,$bp
  873. add %sp,LOCALS+$S1,$ap
  874. call __ecp_nistz256_mul_mont ! p256_mul_mont(S1, S1, in1_y);
  875. add %sp,LOCALS+$S1,$rp
  876. add $bp_real,32,$bp
  877. add %sp,LOCALS+$S2,$ap
  878. call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
  879. add %sp,LOCALS+$S2,$rp
  880. add %sp,LOCALS+$S1,$bp
  881. call __ecp_nistz256_sub_from ! p256_sub(R, S2, S1);
  882. add %sp,LOCALS+$R,$rp
  883. or @acc[1],@acc[0],@acc[0] ! see if result is zero
  884. or @acc[3],@acc[2],@acc[2]
  885. or @acc[5],@acc[4],@acc[4]
  886. or @acc[7],@acc[6],@acc[6]
  887. or @acc[2],@acc[0],@acc[0]
  888. or @acc[6],@acc[4],@acc[4]
  889. or @acc[4],@acc[0],@acc[0]
  890. st @acc[0],[%fp+STACK_BIAS-20]
  891. add $ap_real,0,$bp
  892. add %sp,LOCALS+$Z2sqr,$ap
  893. call __ecp_nistz256_mul_mont ! p256_mul_mont(U1, in1_x, Z2sqr);
  894. add %sp,LOCALS+$U1,$rp
  895. add $bp_real,0,$bp
  896. add %sp,LOCALS+$Z1sqr,$ap
  897. call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in2_x, Z1sqr);
  898. add %sp,LOCALS+$U2,$rp
  899. add %sp,LOCALS+$U1,$bp
  900. call __ecp_nistz256_sub_from ! p256_sub(H, U2, U1);
  901. add %sp,LOCALS+$H,$rp
  902. or @acc[1],@acc[0],@acc[0] ! see if result is zero
  903. or @acc[3],@acc[2],@acc[2]
  904. or @acc[5],@acc[4],@acc[4]
  905. or @acc[7],@acc[6],@acc[6]
  906. or @acc[2],@acc[0],@acc[0]
  907. or @acc[6],@acc[4],@acc[4]
  908. orcc @acc[4],@acc[0],@acc[0]
  909. bne,pt %icc,.Ladd_proceed ! is_equal(U1,U2)?
  910. nop
  911. ld [%fp+STACK_BIAS-12],$t0
  912. ld [%fp+STACK_BIAS-16],$t1
  913. ld [%fp+STACK_BIAS-20],$t2
  914. andcc $t0,$t1,%g0
  915. be,pt %icc,.Ladd_proceed ! (in1infty || in2infty)?
  916. nop
  917. andcc $t2,$t2,%g0
  918. be,pt %icc,.Ladd_double ! is_equal(S1,S2)?
  919. nop
  920. ldx [%fp+STACK_BIAS-8],$rp
  921. st %g0,[$rp]
  922. st %g0,[$rp+4]
  923. st %g0,[$rp+8]
  924. st %g0,[$rp+12]
  925. st %g0,[$rp+16]
  926. st %g0,[$rp+20]
  927. st %g0,[$rp+24]
  928. st %g0,[$rp+28]
  929. st %g0,[$rp+32]
  930. st %g0,[$rp+32+4]
  931. st %g0,[$rp+32+8]
  932. st %g0,[$rp+32+12]
  933. st %g0,[$rp+32+16]
  934. st %g0,[$rp+32+20]
  935. st %g0,[$rp+32+24]
  936. st %g0,[$rp+32+28]
  937. st %g0,[$rp+64]
  938. st %g0,[$rp+64+4]
  939. st %g0,[$rp+64+8]
  940. st %g0,[$rp+64+12]
  941. st %g0,[$rp+64+16]
  942. st %g0,[$rp+64+20]
  943. st %g0,[$rp+64+24]
  944. st %g0,[$rp+64+28]
  945. b .Ladd_done
  946. nop
  947. .align 16
  948. .Ladd_double:
  949. ldx [%fp+STACK_BIAS-8],$rp_real
  950. mov $ap_real,$ap
  951. b .Lpoint_double_shortcut
  952. add %sp,32*(12-4)+32,%sp ! difference in frame sizes
  953. .align 16
  954. .Ladd_proceed:
  955. add %sp,LOCALS+$R,$bp
  956. add %sp,LOCALS+$R,$ap
  957. call __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
  958. add %sp,LOCALS+$Rsqr,$rp
  959. add $ap_real,64,$bp
  960. add %sp,LOCALS+$H,$ap
  961. call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
  962. add %sp,LOCALS+$res_z,$rp
  963. add %sp,LOCALS+$H,$bp
  964. add %sp,LOCALS+$H,$ap
  965. call __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
  966. add %sp,LOCALS+$Hsqr,$rp
  967. add $bp_real,64,$bp
  968. add %sp,LOCALS+$res_z,$ap
  969. call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, res_z, in2_z);
  970. add %sp,LOCALS+$res_z,$rp
  971. add %sp,LOCALS+$H,$bp
  972. add %sp,LOCALS+$Hsqr,$ap
  973. call __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
  974. add %sp,LOCALS+$Hcub,$rp
  975. add %sp,LOCALS+$U1,$bp
  976. add %sp,LOCALS+$Hsqr,$ap
  977. call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, U1, Hsqr);
  978. add %sp,LOCALS+$U2,$rp
  979. call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
  980. add %sp,LOCALS+$Hsqr,$rp
  981. add %sp,LOCALS+$Rsqr,$bp
  982. call __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
  983. add %sp,LOCALS+$res_x,$rp
  984. add %sp,LOCALS+$Hcub,$bp
  985. call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, Hcub);
  986. add %sp,LOCALS+$res_x,$rp
  987. add %sp,LOCALS+$U2,$bp
  988. call __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
  989. add %sp,LOCALS+$res_y,$rp
  990. add %sp,LOCALS+$Hcub,$bp
  991. add %sp,LOCALS+$S1,$ap
  992. call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S1, Hcub);
  993. add %sp,LOCALS+$S2,$rp
  994. add %sp,LOCALS+$R,$bp
  995. add %sp,LOCALS+$res_y,$ap
  996. call __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
  997. add %sp,LOCALS+$res_y,$rp
  998. add %sp,LOCALS+$S2,$bp
  999. call __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
  1000. add %sp,LOCALS+$res_y,$rp
  1001. ld [%fp+STACK_BIAS-16],$t1 ! !in1infty
  1002. ld [%fp+STACK_BIAS-12],$t2 ! !in2infty
  1003. ldx [%fp+STACK_BIAS-8],$rp
  1004. ___
  1005. for($i=0;$i<96;$i+=8) { # conditional moves
  1006. $code.=<<___;
  1007. ld [%sp+LOCALS+$i],@acc[0] ! res
  1008. ld [%sp+LOCALS+$i+4],@acc[1]
  1009. ld [$bp_real+$i],@acc[2] ! in2
  1010. ld [$bp_real+$i+4],@acc[3]
  1011. ld [$ap_real+$i],@acc[4] ! in1
  1012. ld [$ap_real+$i+4],@acc[5]
  1013. movrz $t1,@acc[2],@acc[0]
  1014. movrz $t1,@acc[3],@acc[1]
  1015. movrz $t2,@acc[4],@acc[0]
  1016. movrz $t2,@acc[5],@acc[1]
  1017. st @acc[0],[$rp+$i]
  1018. st @acc[1],[$rp+$i+4]
  1019. ___
  1020. }
  1021. $code.=<<___;
  1022. .Ladd_done:
  1023. ret
  1024. restore
  1025. .type ecp_nistz256_point_add,#function
  1026. .size ecp_nistz256_point_add,.-ecp_nistz256_point_add
  1027. ___
  1028. }
  1029. ########################################################################
  1030. # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
  1031. # const P256_POINT_AFFINE *in2);
  1032. {
  1033. my ($res_x,$res_y,$res_z,
  1034. $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..9));
  1035. my $Z1sqr = $S2;
  1036. # above map() describes stack layout with 10 temporary
  1037. # 256-bit vectors on top. Then we reserve some space for
  1038. # !in1infty, !in2infty, result of check for zero and return pointer.
  1039. my @ONE_mont=(1,0,0,-1,-1,-1,-2,0);
  1040. my $bp_real=$rp_real;
  1041. $code.=<<___;
  1042. .globl ecp_nistz256_point_add_affine
  1043. .align 32
  1044. ecp_nistz256_point_add_affine:
  1045. SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
  1046. ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
  1047. and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
  1048. cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
  1049. be ecp_nistz256_point_add_affine_vis3
  1050. nop
  1051. save %sp,-STACK_FRAME-32*10-32,%sp
  1052. stx $rp,[%fp+STACK_BIAS-8] ! off-load $rp
  1053. mov $ap,$ap_real
  1054. mov $bp,$bp_real
  1055. ld [$ap+64],$t0 ! in1_z
  1056. ld [$ap+64+4],$t1
  1057. ld [$ap+64+8],$t2
  1058. ld [$ap+64+12],$t3
  1059. ld [$ap+64+16],$t4
  1060. ld [$ap+64+20],$t5
  1061. ld [$ap+64+24],$t6
  1062. ld [$ap+64+28],$t7
  1063. or $t1,$t0,$t0
  1064. or $t3,$t2,$t2
  1065. or $t5,$t4,$t4
  1066. or $t7,$t6,$t6
  1067. or $t2,$t0,$t0
  1068. or $t6,$t4,$t4
  1069. or $t4,$t0,$t0 ! !in1infty
  1070. movrnz $t0,-1,$t0
  1071. st $t0,[%fp+STACK_BIAS-16]
  1072. ld [$bp],@acc[0] ! in2_x
  1073. ld [$bp+4],@acc[1]
  1074. ld [$bp+8],@acc[2]
  1075. ld [$bp+12],@acc[3]
  1076. ld [$bp+16],@acc[4]
  1077. ld [$bp+20],@acc[5]
  1078. ld [$bp+24],@acc[6]
  1079. ld [$bp+28],@acc[7]
  1080. ld [$bp+32],$t0 ! in2_y
  1081. ld [$bp+32+4],$t1
  1082. ld [$bp+32+8],$t2
  1083. ld [$bp+32+12],$t3
  1084. ld [$bp+32+16],$t4
  1085. ld [$bp+32+20],$t5
  1086. ld [$bp+32+24],$t6
  1087. ld [$bp+32+28],$t7
  1088. or @acc[1],@acc[0],@acc[0]
  1089. or @acc[3],@acc[2],@acc[2]
  1090. or @acc[5],@acc[4],@acc[4]
  1091. or @acc[7],@acc[6],@acc[6]
  1092. or @acc[2],@acc[0],@acc[0]
  1093. or @acc[6],@acc[4],@acc[4]
  1094. or @acc[4],@acc[0],@acc[0]
  1095. or $t1,$t0,$t0
  1096. or $t3,$t2,$t2
  1097. or $t5,$t4,$t4
  1098. or $t7,$t6,$t6
  1099. or $t2,$t0,$t0
  1100. or $t6,$t4,$t4
  1101. or $t4,$t0,$t0
  1102. or @acc[0],$t0,$t0 ! !in2infty
  1103. movrnz $t0,-1,$t0
  1104. st $t0,[%fp+STACK_BIAS-12]
  1105. add $ap_real,64,$bp
  1106. add $ap_real,64,$ap
  1107. call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
  1108. add %sp,LOCALS+$Z1sqr,$rp
  1109. add $bp_real,0,$bp
  1110. add %sp,LOCALS+$Z1sqr,$ap
  1111. call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, Z1sqr, in2_x);
  1112. add %sp,LOCALS+$U2,$rp
  1113. add $ap_real,0,$bp
  1114. call __ecp_nistz256_sub_from ! p256_sub(H, U2, in1_x);
  1115. add %sp,LOCALS+$H,$rp
  1116. add $ap_real,64,$bp
  1117. add %sp,LOCALS+$Z1sqr,$ap
  1118. call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
  1119. add %sp,LOCALS+$S2,$rp
  1120. add $ap_real,64,$bp
  1121. add %sp,LOCALS+$H,$ap
  1122. call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
  1123. add %sp,LOCALS+$res_z,$rp
  1124. add $bp_real,32,$bp
  1125. add %sp,LOCALS+$S2,$ap
  1126. call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
  1127. add %sp,LOCALS+$S2,$rp
  1128. add $ap_real,32,$bp
  1129. call __ecp_nistz256_sub_from ! p256_sub(R, S2, in1_y);
  1130. add %sp,LOCALS+$R,$rp
  1131. add %sp,LOCALS+$H,$bp
  1132. add %sp,LOCALS+$H,$ap
  1133. call __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
  1134. add %sp,LOCALS+$Hsqr,$rp
  1135. add %sp,LOCALS+$R,$bp
  1136. add %sp,LOCALS+$R,$ap
  1137. call __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
  1138. add %sp,LOCALS+$Rsqr,$rp
  1139. add %sp,LOCALS+$H,$bp
  1140. add %sp,LOCALS+$Hsqr,$ap
  1141. call __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
  1142. add %sp,LOCALS+$Hcub,$rp
  1143. add $ap_real,0,$bp
  1144. add %sp,LOCALS+$Hsqr,$ap
  1145. call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in1_x, Hsqr);
  1146. add %sp,LOCALS+$U2,$rp
  1147. call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
  1148. add %sp,LOCALS+$Hsqr,$rp
  1149. add %sp,LOCALS+$Rsqr,$bp
  1150. call __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
  1151. add %sp,LOCALS+$res_x,$rp
  1152. add %sp,LOCALS+$Hcub,$bp
  1153. call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, Hcub);
  1154. add %sp,LOCALS+$res_x,$rp
  1155. add %sp,LOCALS+$U2,$bp
  1156. call __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
  1157. add %sp,LOCALS+$res_y,$rp
  1158. add $ap_real,32,$bp
  1159. add %sp,LOCALS+$Hcub,$ap
  1160. call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, in1_y, Hcub);
  1161. add %sp,LOCALS+$S2,$rp
  1162. add %sp,LOCALS+$R,$bp
  1163. add %sp,LOCALS+$res_y,$ap
  1164. call __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
  1165. add %sp,LOCALS+$res_y,$rp
  1166. add %sp,LOCALS+$S2,$bp
  1167. call __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
  1168. add %sp,LOCALS+$res_y,$rp
  1169. ld [%fp+STACK_BIAS-16],$t1 ! !in1infty
  1170. ld [%fp+STACK_BIAS-12],$t2 ! !in2infty
  1171. ldx [%fp+STACK_BIAS-8],$rp
  1172. ___
  1173. for($i=0;$i<64;$i+=8) { # conditional moves
  1174. $code.=<<___;
  1175. ld [%sp+LOCALS+$i],@acc[0] ! res
  1176. ld [%sp+LOCALS+$i+4],@acc[1]
  1177. ld [$bp_real+$i],@acc[2] ! in2
  1178. ld [$bp_real+$i+4],@acc[3]
  1179. ld [$ap_real+$i],@acc[4] ! in1
  1180. ld [$ap_real+$i+4],@acc[5]
  1181. movrz $t1,@acc[2],@acc[0]
  1182. movrz $t1,@acc[3],@acc[1]
  1183. movrz $t2,@acc[4],@acc[0]
  1184. movrz $t2,@acc[5],@acc[1]
  1185. st @acc[0],[$rp+$i]
  1186. st @acc[1],[$rp+$i+4]
  1187. ___
  1188. }
  1189. for(;$i<96;$i+=8) {
  1190. my $j=($i-64)/4;
  1191. $code.=<<___;
  1192. ld [%sp+LOCALS+$i],@acc[0] ! res
  1193. ld [%sp+LOCALS+$i+4],@acc[1]
  1194. ld [$ap_real+$i],@acc[4] ! in1
  1195. ld [$ap_real+$i+4],@acc[5]
  1196. movrz $t1,@ONE_mont[$j],@acc[0]
  1197. movrz $t1,@ONE_mont[$j+1],@acc[1]
  1198. movrz $t2,@acc[4],@acc[0]
  1199. movrz $t2,@acc[5],@acc[1]
  1200. st @acc[0],[$rp+$i]
  1201. st @acc[1],[$rp+$i+4]
  1202. ___
  1203. }
  1204. $code.=<<___;
  1205. ret
  1206. restore
  1207. .type ecp_nistz256_point_add_affine,#function
  1208. .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
  1209. ___
  1210. } }}}
  1211. {{{
  1212. my ($out,$inp,$index)=map("%i$_",(0..2));
  1213. my $mask="%o0";
  1214. $code.=<<___;
  1215. ! void ecp_nistz256_scatter_w5(void *%i0,const P256_POINT *%i1,
  1216. ! int %i2);
  1217. .globl ecp_nistz256_scatter_w5
  1218. .align 32
  1219. ecp_nistz256_scatter_w5:
  1220. save %sp,-STACK_FRAME,%sp
  1221. sll $index,2,$index
  1222. add $out,$index,$out
  1223. ld [$inp],%l0 ! X
  1224. ld [$inp+4],%l1
  1225. ld [$inp+8],%l2
  1226. ld [$inp+12],%l3
  1227. ld [$inp+16],%l4
  1228. ld [$inp+20],%l5
  1229. ld [$inp+24],%l6
  1230. ld [$inp+28],%l7
  1231. add $inp,32,$inp
  1232. st %l0,[$out+64*0-4]
  1233. st %l1,[$out+64*1-4]
  1234. st %l2,[$out+64*2-4]
  1235. st %l3,[$out+64*3-4]
  1236. st %l4,[$out+64*4-4]
  1237. st %l5,[$out+64*5-4]
  1238. st %l6,[$out+64*6-4]
  1239. st %l7,[$out+64*7-4]
  1240. add $out,64*8,$out
  1241. ld [$inp],%l0 ! Y
  1242. ld [$inp+4],%l1
  1243. ld [$inp+8],%l2
  1244. ld [$inp+12],%l3
  1245. ld [$inp+16],%l4
  1246. ld [$inp+20],%l5
  1247. ld [$inp+24],%l6
  1248. ld [$inp+28],%l7
  1249. add $inp,32,$inp
  1250. st %l0,[$out+64*0-4]
  1251. st %l1,[$out+64*1-4]
  1252. st %l2,[$out+64*2-4]
  1253. st %l3,[$out+64*3-4]
  1254. st %l4,[$out+64*4-4]
  1255. st %l5,[$out+64*5-4]
  1256. st %l6,[$out+64*6-4]
  1257. st %l7,[$out+64*7-4]
  1258. add $out,64*8,$out
  1259. ld [$inp],%l0 ! Z
  1260. ld [$inp+4],%l1
  1261. ld [$inp+8],%l2
  1262. ld [$inp+12],%l3
  1263. ld [$inp+16],%l4
  1264. ld [$inp+20],%l5
  1265. ld [$inp+24],%l6
  1266. ld [$inp+28],%l7
  1267. st %l0,[$out+64*0-4]
  1268. st %l1,[$out+64*1-4]
  1269. st %l2,[$out+64*2-4]
  1270. st %l3,[$out+64*3-4]
  1271. st %l4,[$out+64*4-4]
  1272. st %l5,[$out+64*5-4]
  1273. st %l6,[$out+64*6-4]
  1274. st %l7,[$out+64*7-4]
  1275. ret
  1276. restore
  1277. .type ecp_nistz256_scatter_w5,#function
  1278. .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
  1279. ! void ecp_nistz256_gather_w5(P256_POINT *%i0,const void *%i1,
  1280. ! int %i2);
  1281. .globl ecp_nistz256_gather_w5
  1282. .align 32
  1283. ecp_nistz256_gather_w5:
  1284. save %sp,-STACK_FRAME,%sp
  1285. neg $index,$mask
  1286. srax $mask,63,$mask
  1287. add $index,$mask,$index
  1288. sll $index,2,$index
  1289. add $inp,$index,$inp
  1290. ld [$inp+64*0],%l0
  1291. ld [$inp+64*1],%l1
  1292. ld [$inp+64*2],%l2
  1293. ld [$inp+64*3],%l3
  1294. ld [$inp+64*4],%l4
  1295. ld [$inp+64*5],%l5
  1296. ld [$inp+64*6],%l6
  1297. ld [$inp+64*7],%l7
  1298. add $inp,64*8,$inp
  1299. and %l0,$mask,%l0
  1300. and %l1,$mask,%l1
  1301. st %l0,[$out] ! X
  1302. and %l2,$mask,%l2
  1303. st %l1,[$out+4]
  1304. and %l3,$mask,%l3
  1305. st %l2,[$out+8]
  1306. and %l4,$mask,%l4
  1307. st %l3,[$out+12]
  1308. and %l5,$mask,%l5
  1309. st %l4,[$out+16]
  1310. and %l6,$mask,%l6
  1311. st %l5,[$out+20]
  1312. and %l7,$mask,%l7
  1313. st %l6,[$out+24]
  1314. st %l7,[$out+28]
  1315. add $out,32,$out
  1316. ld [$inp+64*0],%l0
  1317. ld [$inp+64*1],%l1
  1318. ld [$inp+64*2],%l2
  1319. ld [$inp+64*3],%l3
  1320. ld [$inp+64*4],%l4
  1321. ld [$inp+64*5],%l5
  1322. ld [$inp+64*6],%l6
  1323. ld [$inp+64*7],%l7
  1324. add $inp,64*8,$inp
  1325. and %l0,$mask,%l0
  1326. and %l1,$mask,%l1
  1327. st %l0,[$out] ! Y
  1328. and %l2,$mask,%l2
  1329. st %l1,[$out+4]
  1330. and %l3,$mask,%l3
  1331. st %l2,[$out+8]
  1332. and %l4,$mask,%l4
  1333. st %l3,[$out+12]
  1334. and %l5,$mask,%l5
  1335. st %l4,[$out+16]
  1336. and %l6,$mask,%l6
  1337. st %l5,[$out+20]
  1338. and %l7,$mask,%l7
  1339. st %l6,[$out+24]
  1340. st %l7,[$out+28]
  1341. add $out,32,$out
  1342. ld [$inp+64*0],%l0
  1343. ld [$inp+64*1],%l1
  1344. ld [$inp+64*2],%l2
  1345. ld [$inp+64*3],%l3
  1346. ld [$inp+64*4],%l4
  1347. ld [$inp+64*5],%l5
  1348. ld [$inp+64*6],%l6
  1349. ld [$inp+64*7],%l7
  1350. and %l0,$mask,%l0
  1351. and %l1,$mask,%l1
  1352. st %l0,[$out] ! Z
  1353. and %l2,$mask,%l2
  1354. st %l1,[$out+4]
  1355. and %l3,$mask,%l3
  1356. st %l2,[$out+8]
  1357. and %l4,$mask,%l4
  1358. st %l3,[$out+12]
  1359. and %l5,$mask,%l5
  1360. st %l4,[$out+16]
  1361. and %l6,$mask,%l6
  1362. st %l5,[$out+20]
  1363. and %l7,$mask,%l7
  1364. st %l6,[$out+24]
  1365. st %l7,[$out+28]
  1366. ret
  1367. restore
  1368. .type ecp_nistz256_gather_w5,#function
  1369. .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
  1370. ! void ecp_nistz256_scatter_w7(void *%i0,const P256_POINT_AFFINE *%i1,
  1371. ! int %i2);
  1372. .globl ecp_nistz256_scatter_w7
  1373. .align 32
  1374. ecp_nistz256_scatter_w7:
  1375. save %sp,-STACK_FRAME,%sp
  1376. nop
  1377. add $out,$index,$out
  1378. mov 64/4,$index
  1379. .Loop_scatter_w7:
  1380. ld [$inp],%l0
  1381. add $inp,4,$inp
  1382. subcc $index,1,$index
  1383. stb %l0,[$out+64*0-1]
  1384. srl %l0,8,%l1
  1385. stb %l1,[$out+64*1-1]
  1386. srl %l0,16,%l2
  1387. stb %l2,[$out+64*2-1]
  1388. srl %l0,24,%l3
  1389. stb %l3,[$out+64*3-1]
  1390. bne .Loop_scatter_w7
  1391. add $out,64*4,$out
  1392. ret
  1393. restore
  1394. .type ecp_nistz256_scatter_w7,#function
  1395. .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
  1396. ! void ecp_nistz256_gather_w7(P256_POINT_AFFINE *%i0,const void *%i1,
  1397. ! int %i2);
  1398. .globl ecp_nistz256_gather_w7
  1399. .align 32
  1400. ecp_nistz256_gather_w7:
  1401. save %sp,-STACK_FRAME,%sp
  1402. neg $index,$mask
  1403. srax $mask,63,$mask
  1404. add $index,$mask,$index
  1405. add $inp,$index,$inp
  1406. mov 64/4,$index
  1407. .Loop_gather_w7:
  1408. ldub [$inp+64*0],%l0
  1409. prefetch [$inp+3840+64*0],1
  1410. subcc $index,1,$index
  1411. ldub [$inp+64*1],%l1
  1412. prefetch [$inp+3840+64*1],1
  1413. ldub [$inp+64*2],%l2
  1414. prefetch [$inp+3840+64*2],1
  1415. ldub [$inp+64*3],%l3
  1416. prefetch [$inp+3840+64*3],1
  1417. add $inp,64*4,$inp
  1418. sll %l1,8,%l1
  1419. sll %l2,16,%l2
  1420. or %l0,%l1,%l0
  1421. sll %l3,24,%l3
  1422. or %l0,%l2,%l0
  1423. or %l0,%l3,%l0
  1424. and %l0,$mask,%l0
  1425. st %l0,[$out]
  1426. bne .Loop_gather_w7
  1427. add $out,4,$out
  1428. ret
  1429. restore
  1430. .type ecp_nistz256_gather_w7,#function
  1431. .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
  1432. ___
  1433. }}}
  1434. {{{
  1435. ########################################################################
  1436. # Following subroutines are VIS3 counterparts of those above that
  1437. # implement ones found in ecp_nistz256.c. Key difference is that they
  1438. # use 128-bit multiplication and addition with 64-bit carry, and in order
  1439. # to do that they perform conversion from uin32_t[8] to uint64_t[4] upon
  1440. # entry and vice versa on return.
  1441. #
  1442. my ($rp,$ap,$bp)=map("%i$_",(0..2));
  1443. my ($t0,$t1,$t2,$t3,$a0,$a1,$a2,$a3)=map("%l$_",(0..7));
  1444. my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5)=map("%o$_",(0..5));
  1445. my ($bi,$poly1,$poly3,$minus1)=(map("%i$_",(3..5)),"%g1");
  1446. my ($rp_real,$ap_real)=("%g2","%g3");
  1447. my ($acc6,$acc7)=($bp,$bi); # used in squaring
  1448. $code.=<<___;
  1449. .align 32
  1450. __ecp_nistz256_mul_by_2_vis3:
  1451. addcc $acc0,$acc0,$acc0
  1452. addxccc $acc1,$acc1,$acc1
  1453. addxccc $acc2,$acc2,$acc2
  1454. addxccc $acc3,$acc3,$acc3
  1455. b .Lreduce_by_sub_vis3
  1456. addxc %g0,%g0,$acc4 ! did it carry?
  1457. .type __ecp_nistz256_mul_by_2_vis3,#function
  1458. .size __ecp_nistz256_mul_by_2_vis3,.-__ecp_nistz256_mul_by_2_vis3
  1459. .align 32
  1460. __ecp_nistz256_add_vis3:
  1461. ldx [$bp+0],$t0
  1462. ldx [$bp+8],$t1
  1463. ldx [$bp+16],$t2
  1464. ldx [$bp+24],$t3
  1465. __ecp_nistz256_add_noload_vis3:
  1466. addcc $t0,$acc0,$acc0
  1467. addxccc $t1,$acc1,$acc1
  1468. addxccc $t2,$acc2,$acc2
  1469. addxccc $t3,$acc3,$acc3
  1470. addxc %g0,%g0,$acc4 ! did it carry?
  1471. .Lreduce_by_sub_vis3:
  1472. addcc $acc0,1,$t0 ! add -modulus, i.e. subtract
  1473. addxccc $acc1,$poly1,$t1
  1474. addxccc $acc2,$minus1,$t2
  1475. addxccc $acc3,$poly3,$t3
  1476. addxc $acc4,$minus1,$acc4
  1477. movrz $acc4,$t0,$acc0 ! ret = borrow ? ret : ret-modulus
  1478. movrz $acc4,$t1,$acc1
  1479. stx $acc0,[$rp]
  1480. movrz $acc4,$t2,$acc2
  1481. stx $acc1,[$rp+8]
  1482. movrz $acc4,$t3,$acc3
  1483. stx $acc2,[$rp+16]
  1484. retl
  1485. stx $acc3,[$rp+24]
  1486. .type __ecp_nistz256_add_vis3,#function
  1487. .size __ecp_nistz256_add_vis3,.-__ecp_nistz256_add_vis3
  1488. ! Trouble with subtraction is that there is no subtraction with 64-bit
  1489. ! borrow, only with 32-bit one. For this reason we "decompose" 64-bit
  1490. ! $acc0-$acc3 to 32-bit values and pick b[4] in 32-bit pieces. But
  1491. ! recall that SPARC is big-endian, which is why you'll observe that
  1492. ! b[4] is accessed as 4-0-12-8-20-16-28-24. And prior reduction we
  1493. ! "collect" result back to 64-bit $acc0-$acc3.
  1494. .align 32
  1495. __ecp_nistz256_sub_from_vis3:
  1496. ld [$bp+4],$t0
  1497. ld [$bp+0],$t1
  1498. ld [$bp+12],$t2
  1499. ld [$bp+8],$t3
  1500. srlx $acc0,32,$acc4
  1501. not $poly1,$poly1
  1502. srlx $acc1,32,$acc5
  1503. subcc $acc0,$t0,$acc0
  1504. ld [$bp+20],$t0
  1505. subccc $acc4,$t1,$acc4
  1506. ld [$bp+16],$t1
  1507. subccc $acc1,$t2,$acc1
  1508. ld [$bp+28],$t2
  1509. and $acc0,$poly1,$acc0
  1510. subccc $acc5,$t3,$acc5
  1511. ld [$bp+24],$t3
  1512. sllx $acc4,32,$acc4
  1513. and $acc1,$poly1,$acc1
  1514. sllx $acc5,32,$acc5
  1515. or $acc0,$acc4,$acc0
  1516. srlx $acc2,32,$acc4
  1517. or $acc1,$acc5,$acc1
  1518. srlx $acc3,32,$acc5
  1519. subccc $acc2,$t0,$acc2
  1520. subccc $acc4,$t1,$acc4
  1521. subccc $acc3,$t2,$acc3
  1522. and $acc2,$poly1,$acc2
  1523. subccc $acc5,$t3,$acc5
  1524. sllx $acc4,32,$acc4
  1525. and $acc3,$poly1,$acc3
  1526. sllx $acc5,32,$acc5
  1527. or $acc2,$acc4,$acc2
  1528. subc %g0,%g0,$acc4 ! did it borrow?
  1529. b .Lreduce_by_add_vis3
  1530. or $acc3,$acc5,$acc3
  1531. .type __ecp_nistz256_sub_from_vis3,#function
  1532. .size __ecp_nistz256_sub_from_vis3,.-__ecp_nistz256_sub_from_vis3
  1533. .align 32
  1534. __ecp_nistz256_sub_morf_vis3:
  1535. ld [$bp+4],$t0
  1536. ld [$bp+0],$t1
  1537. ld [$bp+12],$t2
  1538. ld [$bp+8],$t3
  1539. srlx $acc0,32,$acc4
  1540. not $poly1,$poly1
  1541. srlx $acc1,32,$acc5
  1542. subcc $t0,$acc0,$acc0
  1543. ld [$bp+20],$t0
  1544. subccc $t1,$acc4,$acc4
  1545. ld [$bp+16],$t1
  1546. subccc $t2,$acc1,$acc1
  1547. ld [$bp+28],$t2
  1548. and $acc0,$poly1,$acc0
  1549. subccc $t3,$acc5,$acc5
  1550. ld [$bp+24],$t3
  1551. sllx $acc4,32,$acc4
  1552. and $acc1,$poly1,$acc1
  1553. sllx $acc5,32,$acc5
  1554. or $acc0,$acc4,$acc0
  1555. srlx $acc2,32,$acc4
  1556. or $acc1,$acc5,$acc1
  1557. srlx $acc3,32,$acc5
  1558. subccc $t0,$acc2,$acc2
  1559. subccc $t1,$acc4,$acc4
  1560. subccc $t2,$acc3,$acc3
  1561. and $acc2,$poly1,$acc2
  1562. subccc $t3,$acc5,$acc5
  1563. sllx $acc4,32,$acc4
  1564. and $acc3,$poly1,$acc3
  1565. sllx $acc5,32,$acc5
  1566. or $acc2,$acc4,$acc2
  1567. subc %g0,%g0,$acc4 ! did it borrow?
  1568. or $acc3,$acc5,$acc3
  1569. .Lreduce_by_add_vis3:
  1570. addcc $acc0,-1,$t0 ! add modulus
  1571. not $poly3,$t3
  1572. addxccc $acc1,$poly1,$t1
  1573. not $poly1,$poly1 ! restore $poly1
  1574. addxccc $acc2,%g0,$t2
  1575. addxc $acc3,$t3,$t3
  1576. movrnz $acc4,$t0,$acc0 ! if a-b borrowed, ret = ret+mod
  1577. movrnz $acc4,$t1,$acc1
  1578. stx $acc0,[$rp]
  1579. movrnz $acc4,$t2,$acc2
  1580. stx $acc1,[$rp+8]
  1581. movrnz $acc4,$t3,$acc3
  1582. stx $acc2,[$rp+16]
  1583. retl
  1584. stx $acc3,[$rp+24]
  1585. .type __ecp_nistz256_sub_morf_vis3,#function
  1586. .size __ecp_nistz256_sub_morf_vis3,.-__ecp_nistz256_sub_morf_vis3
  1587. .align 32
  1588. __ecp_nistz256_div_by_2_vis3:
  1589. ! ret = (a is odd ? a+mod : a) >> 1
  1590. not $poly1,$t1
  1591. not $poly3,$t3
  1592. and $acc0,1,$acc5
  1593. addcc $acc0,-1,$t0 ! add modulus
  1594. addxccc $acc1,$t1,$t1
  1595. addxccc $acc2,%g0,$t2
  1596. addxccc $acc3,$t3,$t3
  1597. addxc %g0,%g0,$acc4 ! carry bit
  1598. movrnz $acc5,$t0,$acc0
  1599. movrnz $acc5,$t1,$acc1
  1600. movrnz $acc5,$t2,$acc2
  1601. movrnz $acc5,$t3,$acc3
  1602. movrz $acc5,%g0,$acc4
  1603. ! ret >>= 1
  1604. srlx $acc0,1,$acc0
  1605. sllx $acc1,63,$t0
  1606. srlx $acc1,1,$acc1
  1607. or $acc0,$t0,$acc0
  1608. sllx $acc2,63,$t1
  1609. srlx $acc2,1,$acc2
  1610. or $acc1,$t1,$acc1
  1611. sllx $acc3,63,$t2
  1612. stx $acc0,[$rp]
  1613. srlx $acc3,1,$acc3
  1614. or $acc2,$t2,$acc2
  1615. sllx $acc4,63,$t3 ! don't forget carry bit
  1616. stx $acc1,[$rp+8]
  1617. or $acc3,$t3,$acc3
  1618. stx $acc2,[$rp+16]
  1619. retl
  1620. stx $acc3,[$rp+24]
  1621. .type __ecp_nistz256_div_by_2_vis3,#function
  1622. .size __ecp_nistz256_div_by_2_vis3,.-__ecp_nistz256_div_by_2_vis3
  1623. ! compared to __ecp_nistz256_mul_mont it's almost 4x smaller and
  1624. ! 4x faster [on T4]...
  1625. .align 32
  1626. __ecp_nistz256_mul_mont_vis3:
  1627. mulx $a0,$bi,$acc0
  1628. not $poly3,$poly3 ! 0xFFFFFFFF00000001
  1629. umulxhi $a0,$bi,$t0
  1630. mulx $a1,$bi,$acc1
  1631. umulxhi $a1,$bi,$t1
  1632. mulx $a2,$bi,$acc2
  1633. umulxhi $a2,$bi,$t2
  1634. mulx $a3,$bi,$acc3
  1635. umulxhi $a3,$bi,$t3
  1636. ldx [$bp+8],$bi ! b[1]
  1637. addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication
  1638. sllx $acc0,32,$t0
  1639. addxccc $acc2,$t1,$acc2
  1640. srlx $acc0,32,$t1
  1641. addxccc $acc3,$t2,$acc3
  1642. addxc %g0,$t3,$acc4
  1643. mov 0,$acc5
  1644. ___
  1645. for($i=1;$i<4;$i++) {
  1646. # Reduction iteration is normally performed by accumulating
  1647. # result of multiplication of modulus by "magic" digit [and
  1648. # omitting least significant word, which is guaranteed to
  1649. # be 0], but thanks to special form of modulus and "magic"
  1650. # digit being equal to least significant word, it can be
  1651. # performed with additions and subtractions alone. Indeed:
  1652. #
  1653. # ffff0001.00000000.0000ffff.ffffffff
  1654. # * abcdefgh
  1655. # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
  1656. #
  1657. # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
  1658. # rewrite above as:
  1659. #
  1660. # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
  1661. # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
  1662. # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
  1663. #
  1664. # or marking redundant operations:
  1665. #
  1666. # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
  1667. # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
  1668. # - 0000abcd.efgh0000.--------.--------.--------
  1669. # ^^^^^^^^ but this word is calculated with umulxhi, because
  1670. # there is no subtract with 64-bit borrow:-(
  1671. $code.=<<___;
  1672. sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
  1673. umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
  1674. addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
  1675. mulx $a0,$bi,$t0
  1676. addxccc $acc2,$t1,$acc1
  1677. mulx $a1,$bi,$t1
  1678. addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
  1679. mulx $a2,$bi,$t2
  1680. addxccc $acc4,$t3,$acc3
  1681. mulx $a3,$bi,$t3
  1682. addxc $acc5,%g0,$acc4
  1683. addcc $acc0,$t0,$acc0 ! accumulate low parts of multiplication
  1684. umulxhi $a0,$bi,$t0
  1685. addxccc $acc1,$t1,$acc1
  1686. umulxhi $a1,$bi,$t1
  1687. addxccc $acc2,$t2,$acc2
  1688. umulxhi $a2,$bi,$t2
  1689. addxccc $acc3,$t3,$acc3
  1690. umulxhi $a3,$bi,$t3
  1691. addxc $acc4,%g0,$acc4
  1692. ___
  1693. $code.=<<___ if ($i<3);
  1694. ldx [$bp+8*($i+1)],$bi ! bp[$i+1]
  1695. ___
  1696. $code.=<<___;
  1697. addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication
  1698. sllx $acc0,32,$t0
  1699. addxccc $acc2,$t1,$acc2
  1700. srlx $acc0,32,$t1
  1701. addxccc $acc3,$t2,$acc3
  1702. addxccc $acc4,$t3,$acc4
  1703. addxc %g0,%g0,$acc5
  1704. ___
  1705. }
  1706. $code.=<<___;
  1707. sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
  1708. umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
  1709. addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
  1710. addxccc $acc2,$t1,$acc1
  1711. addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
  1712. addxccc $acc4,$t3,$acc3
  1713. b .Lmul_final_vis3 ! see below
  1714. addxc $acc5,%g0,$acc4
  1715. .type __ecp_nistz256_mul_mont_vis3,#function
  1716. .size __ecp_nistz256_mul_mont_vis3,.-__ecp_nistz256_mul_mont_vis3
  1717. ! compared to above __ecp_nistz256_mul_mont_vis3 it's 21% less
  1718. ! instructions, but only 14% faster [on T4]...
  1719. .align 32
  1720. __ecp_nistz256_sqr_mont_vis3:
  1721. ! | | | | | |a1*a0| |
  1722. ! | | | | |a2*a0| | |
  1723. ! | |a3*a2|a3*a0| | | |
  1724. ! | | | |a2*a1| | | |
  1725. ! | | |a3*a1| | | | |
  1726. ! *| | | | | | | | 2|
  1727. ! +|a3*a3|a2*a2|a1*a1|a0*a0|
  1728. ! |--+--+--+--+--+--+--+--|
  1729. ! |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
  1730. !
  1731. ! "can't overflow" below mark carrying into high part of
  1732. ! multiplication result, which can't overflow, because it
  1733. ! can never be all ones.
  1734. mulx $a1,$a0,$acc1 ! a[1]*a[0]
  1735. umulxhi $a1,$a0,$t1
  1736. mulx $a2,$a0,$acc2 ! a[2]*a[0]
  1737. umulxhi $a2,$a0,$t2
  1738. mulx $a3,$a0,$acc3 ! a[3]*a[0]
  1739. umulxhi $a3,$a0,$acc4
  1740. addcc $acc2,$t1,$acc2 ! accumulate high parts of multiplication
  1741. mulx $a2,$a1,$t0 ! a[2]*a[1]
  1742. umulxhi $a2,$a1,$t1
  1743. addxccc $acc3,$t2,$acc3
  1744. mulx $a3,$a1,$t2 ! a[3]*a[1]
  1745. umulxhi $a3,$a1,$t3
  1746. addxc $acc4,%g0,$acc4 ! can't overflow
  1747. mulx $a3,$a2,$acc5 ! a[3]*a[2]
  1748. not $poly3,$poly3 ! 0xFFFFFFFF00000001
  1749. umulxhi $a3,$a2,$acc6
  1750. addcc $t2,$t1,$t1 ! accumulate high parts of multiplication
  1751. mulx $a0,$a0,$acc0 ! a[0]*a[0]
  1752. addxc $t3,%g0,$t2 ! can't overflow
  1753. addcc $acc3,$t0,$acc3 ! accumulate low parts of multiplication
  1754. umulxhi $a0,$a0,$a0
  1755. addxccc $acc4,$t1,$acc4
  1756. mulx $a1,$a1,$t1 ! a[1]*a[1]
  1757. addxccc $acc5,$t2,$acc5
  1758. umulxhi $a1,$a1,$a1
  1759. addxc $acc6,%g0,$acc6 ! can't overflow
  1760. addcc $acc1,$acc1,$acc1 ! acc[1-6]*=2
  1761. mulx $a2,$a2,$t2 ! a[2]*a[2]
  1762. addxccc $acc2,$acc2,$acc2
  1763. umulxhi $a2,$a2,$a2
  1764. addxccc $acc3,$acc3,$acc3
  1765. mulx $a3,$a3,$t3 ! a[3]*a[3]
  1766. addxccc $acc4,$acc4,$acc4
  1767. umulxhi $a3,$a3,$a3
  1768. addxccc $acc5,$acc5,$acc5
  1769. addxccc $acc6,$acc6,$acc6
  1770. addxc %g0,%g0,$acc7
  1771. addcc $acc1,$a0,$acc1 ! +a[i]*a[i]
  1772. addxccc $acc2,$t1,$acc2
  1773. addxccc $acc3,$a1,$acc3
  1774. addxccc $acc4,$t2,$acc4
  1775. sllx $acc0,32,$t0
  1776. addxccc $acc5,$a2,$acc5
  1777. srlx $acc0,32,$t1
  1778. addxccc $acc6,$t3,$acc6
  1779. sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
  1780. addxc $acc7,$a3,$acc7
  1781. ___
  1782. for($i=0;$i<3;$i++) { # reductions, see commentary
  1783. # in multiplication for details
  1784. $code.=<<___;
  1785. umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
  1786. addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
  1787. sllx $acc0,32,$t0
  1788. addxccc $acc2,$t1,$acc1
  1789. srlx $acc0,32,$t1
  1790. addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
  1791. sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
  1792. addxc %g0,$t3,$acc3 ! can't overflow
  1793. ___
  1794. }
  1795. $code.=<<___;
  1796. umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
  1797. addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
  1798. addxccc $acc2,$t1,$acc1
  1799. addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
  1800. addxc %g0,$t3,$acc3 ! can't overflow
  1801. addcc $acc0,$acc4,$acc0 ! accumulate upper half
  1802. addxccc $acc1,$acc5,$acc1
  1803. addxccc $acc2,$acc6,$acc2
  1804. addxccc $acc3,$acc7,$acc3
  1805. addxc %g0,%g0,$acc4
  1806. .Lmul_final_vis3:
  1807. ! Final step is "if result > mod, subtract mod", but as comparison
  1808. ! means subtraction, we do the subtraction and then copy outcome
  1809. ! if it didn't borrow. But note that as we [have to] replace
  1810. ! subtraction with addition with negative, carry/borrow logic is
  1811. ! inverse.
  1812. addcc $acc0,1,$t0 ! add -modulus, i.e. subtract
  1813. not $poly3,$poly3 ! restore 0x00000000FFFFFFFE
  1814. addxccc $acc1,$poly1,$t1
  1815. addxccc $acc2,$minus1,$t2
  1816. addxccc $acc3,$poly3,$t3
  1817. addxccc $acc4,$minus1,%g0 ! did it carry?
  1818. movcs %xcc,$t0,$acc0
  1819. movcs %xcc,$t1,$acc1
  1820. stx $acc0,[$rp]
  1821. movcs %xcc,$t2,$acc2
  1822. stx $acc1,[$rp+8]
  1823. movcs %xcc,$t3,$acc3
  1824. stx $acc2,[$rp+16]
  1825. retl
  1826. stx $acc3,[$rp+24]
  1827. .type __ecp_nistz256_sqr_mont_vis3,#function
  1828. .size __ecp_nistz256_sqr_mont_vis3,.-__ecp_nistz256_sqr_mont_vis3
  1829. ___
  1830. ########################################################################
  1831. # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
  1832. #
  1833. {
  1834. my ($res_x,$res_y,$res_z,
  1835. $in_x,$in_y,$in_z,
  1836. $S,$M,$Zsqr,$tmp0)=map(32*$_,(0..9));
  1837. # above map() describes stack layout with 10 temporary
  1838. # 256-bit vectors on top.
  1839. $code.=<<___;
  1840. .align 32
  1841. ecp_nistz256_point_double_vis3:
  1842. save %sp,-STACK64_FRAME-32*10,%sp
  1843. mov $rp,$rp_real
  1844. .Ldouble_shortcut_vis3:
  1845. mov -1,$minus1
  1846. mov -2,$poly3
  1847. sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
  1848. srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
  1849. ! convert input to uint64_t[4]
  1850. ld [$ap],$a0 ! in_x
  1851. ld [$ap+4],$t0
  1852. ld [$ap+8],$a1
  1853. ld [$ap+12],$t1
  1854. ld [$ap+16],$a2
  1855. ld [$ap+20],$t2
  1856. ld [$ap+24],$a3
  1857. ld [$ap+28],$t3
  1858. sllx $t0,32,$t0
  1859. sllx $t1,32,$t1
  1860. ld [$ap+32],$acc0 ! in_y
  1861. or $a0,$t0,$a0
  1862. ld [$ap+32+4],$t0
  1863. sllx $t2,32,$t2
  1864. ld [$ap+32+8],$acc1
  1865. or $a1,$t1,$a1
  1866. ld [$ap+32+12],$t1
  1867. sllx $t3,32,$t3
  1868. ld [$ap+32+16],$acc2
  1869. or $a2,$t2,$a2
  1870. ld [$ap+32+20],$t2
  1871. or $a3,$t3,$a3
  1872. ld [$ap+32+24],$acc3
  1873. sllx $t0,32,$t0
  1874. ld [$ap+32+28],$t3
  1875. sllx $t1,32,$t1
  1876. stx $a0,[%sp+LOCALS64+$in_x]
  1877. sllx $t2,32,$t2
  1878. stx $a1,[%sp+LOCALS64+$in_x+8]
  1879. sllx $t3,32,$t3
  1880. stx $a2,[%sp+LOCALS64+$in_x+16]
  1881. or $acc0,$t0,$acc0
  1882. stx $a3,[%sp+LOCALS64+$in_x+24]
  1883. or $acc1,$t1,$acc1
  1884. stx $acc0,[%sp+LOCALS64+$in_y]
  1885. or $acc2,$t2,$acc2
  1886. stx $acc1,[%sp+LOCALS64+$in_y+8]
  1887. or $acc3,$t3,$acc3
  1888. stx $acc2,[%sp+LOCALS64+$in_y+16]
  1889. stx $acc3,[%sp+LOCALS64+$in_y+24]
  1890. ld [$ap+64],$a0 ! in_z
  1891. ld [$ap+64+4],$t0
  1892. ld [$ap+64+8],$a1
  1893. ld [$ap+64+12],$t1
  1894. ld [$ap+64+16],$a2
  1895. ld [$ap+64+20],$t2
  1896. ld [$ap+64+24],$a3
  1897. ld [$ap+64+28],$t3
  1898. sllx $t0,32,$t0
  1899. sllx $t1,32,$t1
  1900. or $a0,$t0,$a0
  1901. sllx $t2,32,$t2
  1902. or $a1,$t1,$a1
  1903. sllx $t3,32,$t3
  1904. or $a2,$t2,$a2
  1905. or $a3,$t3,$a3
  1906. sllx $t0,32,$t0
  1907. sllx $t1,32,$t1
  1908. stx $a0,[%sp+LOCALS64+$in_z]
  1909. sllx $t2,32,$t2
  1910. stx $a1,[%sp+LOCALS64+$in_z+8]
  1911. sllx $t3,32,$t3
  1912. stx $a2,[%sp+LOCALS64+$in_z+16]
  1913. stx $a3,[%sp+LOCALS64+$in_z+24]
  1914. ! in_y is still in $acc0-$acc3
  1915. call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(S, in_y);
  1916. add %sp,LOCALS64+$S,$rp
  1917. ! in_z is still in $a0-$a3
  1918. call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Zsqr, in_z);
  1919. add %sp,LOCALS64+$Zsqr,$rp
  1920. mov $acc0,$a0 ! put Zsqr aside
  1921. mov $acc1,$a1
  1922. mov $acc2,$a2
  1923. mov $acc3,$a3
  1924. add %sp,LOCALS64+$in_x,$bp
  1925. call __ecp_nistz256_add_vis3 ! p256_add(M, Zsqr, in_x);
  1926. add %sp,LOCALS64+$M,$rp
  1927. mov $a0,$acc0 ! restore Zsqr
  1928. ldx [%sp+LOCALS64+$S],$a0 ! forward load
  1929. mov $a1,$acc1
  1930. ldx [%sp+LOCALS64+$S+8],$a1
  1931. mov $a2,$acc2
  1932. ldx [%sp+LOCALS64+$S+16],$a2
  1933. mov $a3,$acc3
  1934. ldx [%sp+LOCALS64+$S+24],$a3
  1935. add %sp,LOCALS64+$in_x,$bp
  1936. call __ecp_nistz256_sub_morf_vis3 ! p256_sub(Zsqr, in_x, Zsqr);
  1937. add %sp,LOCALS64+$Zsqr,$rp
  1938. call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(S, S);
  1939. add %sp,LOCALS64+$S,$rp
  1940. ldx [%sp+LOCALS64+$in_z],$bi
  1941. ldx [%sp+LOCALS64+$in_y],$a0
  1942. ldx [%sp+LOCALS64+$in_y+8],$a1
  1943. ldx [%sp+LOCALS64+$in_y+16],$a2
  1944. ldx [%sp+LOCALS64+$in_y+24],$a3
  1945. add %sp,LOCALS64+$in_z,$bp
  1946. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(tmp0, in_z, in_y);
  1947. add %sp,LOCALS64+$tmp0,$rp
  1948. ldx [%sp+LOCALS64+$M],$bi ! forward load
  1949. ldx [%sp+LOCALS64+$Zsqr],$a0
  1950. ldx [%sp+LOCALS64+$Zsqr+8],$a1
  1951. ldx [%sp+LOCALS64+$Zsqr+16],$a2
  1952. ldx [%sp+LOCALS64+$Zsqr+24],$a3
  1953. call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(res_z, tmp0);
  1954. add %sp,LOCALS64+$res_z,$rp
  1955. add %sp,LOCALS64+$M,$bp
  1956. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(M, M, Zsqr);
  1957. add %sp,LOCALS64+$M,$rp
  1958. mov $acc0,$a0 ! put aside M
  1959. mov $acc1,$a1
  1960. mov $acc2,$a2
  1961. mov $acc3,$a3
  1962. call __ecp_nistz256_mul_by_2_vis3
  1963. add %sp,LOCALS64+$M,$rp
  1964. mov $a0,$t0 ! copy M
  1965. ldx [%sp+LOCALS64+$S],$a0 ! forward load
  1966. mov $a1,$t1
  1967. ldx [%sp+LOCALS64+$S+8],$a1
  1968. mov $a2,$t2
  1969. ldx [%sp+LOCALS64+$S+16],$a2
  1970. mov $a3,$t3
  1971. ldx [%sp+LOCALS64+$S+24],$a3
  1972. call __ecp_nistz256_add_noload_vis3 ! p256_mul_by_3(M, M);
  1973. add %sp,LOCALS64+$M,$rp
  1974. call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(tmp0, S);
  1975. add %sp,LOCALS64+$tmp0,$rp
  1976. ldx [%sp+LOCALS64+$S],$bi ! forward load
  1977. ldx [%sp+LOCALS64+$in_x],$a0
  1978. ldx [%sp+LOCALS64+$in_x+8],$a1
  1979. ldx [%sp+LOCALS64+$in_x+16],$a2
  1980. ldx [%sp+LOCALS64+$in_x+24],$a3
  1981. call __ecp_nistz256_div_by_2_vis3 ! p256_div_by_2(res_y, tmp0);
  1982. add %sp,LOCALS64+$res_y,$rp
  1983. add %sp,LOCALS64+$S,$bp
  1984. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S, S, in_x);
  1985. add %sp,LOCALS64+$S,$rp
  1986. ldx [%sp+LOCALS64+$M],$a0 ! forward load
  1987. ldx [%sp+LOCALS64+$M+8],$a1
  1988. ldx [%sp+LOCALS64+$M+16],$a2
  1989. ldx [%sp+LOCALS64+$M+24],$a3
  1990. call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(tmp0, S);
  1991. add %sp,LOCALS64+$tmp0,$rp
  1992. call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(res_x, M);
  1993. add %sp,LOCALS64+$res_x,$rp
  1994. add %sp,LOCALS64+$tmp0,$bp
  1995. call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, tmp0);
  1996. add %sp,LOCALS64+$res_x,$rp
  1997. ldx [%sp+LOCALS64+$M],$a0 ! forward load
  1998. ldx [%sp+LOCALS64+$M+8],$a1
  1999. ldx [%sp+LOCALS64+$M+16],$a2
  2000. ldx [%sp+LOCALS64+$M+24],$a3
  2001. add %sp,LOCALS64+$S,$bp
  2002. call __ecp_nistz256_sub_morf_vis3 ! p256_sub(S, S, res_x);
  2003. add %sp,LOCALS64+$S,$rp
  2004. mov $acc0,$bi
  2005. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S, S, M);
  2006. add %sp,LOCALS64+$S,$rp
  2007. ldx [%sp+LOCALS64+$res_x],$a0 ! forward load
  2008. ldx [%sp+LOCALS64+$res_x+8],$a1
  2009. ldx [%sp+LOCALS64+$res_x+16],$a2
  2010. ldx [%sp+LOCALS64+$res_x+24],$a3
  2011. add %sp,LOCALS64+$res_y,$bp
  2012. call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, S, res_y);
  2013. add %sp,LOCALS64+$res_y,$bp
  2014. ! convert output to uint_32[8]
  2015. srlx $a0,32,$t0
  2016. srlx $a1,32,$t1
  2017. st $a0,[$rp_real] ! res_x
  2018. srlx $a2,32,$t2
  2019. st $t0,[$rp_real+4]
  2020. srlx $a3,32,$t3
  2021. st $a1,[$rp_real+8]
  2022. st $t1,[$rp_real+12]
  2023. st $a2,[$rp_real+16]
  2024. st $t2,[$rp_real+20]
  2025. st $a3,[$rp_real+24]
  2026. st $t3,[$rp_real+28]
  2027. ldx [%sp+LOCALS64+$res_z],$a0 ! forward load
  2028. srlx $acc0,32,$t0
  2029. ldx [%sp+LOCALS64+$res_z+8],$a1
  2030. srlx $acc1,32,$t1
  2031. ldx [%sp+LOCALS64+$res_z+16],$a2
  2032. srlx $acc2,32,$t2
  2033. ldx [%sp+LOCALS64+$res_z+24],$a3
  2034. srlx $acc3,32,$t3
  2035. st $acc0,[$rp_real+32] ! res_y
  2036. st $t0, [$rp_real+32+4]
  2037. st $acc1,[$rp_real+32+8]
  2038. st $t1, [$rp_real+32+12]
  2039. st $acc2,[$rp_real+32+16]
  2040. st $t2, [$rp_real+32+20]
  2041. st $acc3,[$rp_real+32+24]
  2042. st $t3, [$rp_real+32+28]
  2043. srlx $a0,32,$t0
  2044. srlx $a1,32,$t1
  2045. st $a0,[$rp_real+64] ! res_z
  2046. srlx $a2,32,$t2
  2047. st $t0,[$rp_real+64+4]
  2048. srlx $a3,32,$t3
  2049. st $a1,[$rp_real+64+8]
  2050. st $t1,[$rp_real+64+12]
  2051. st $a2,[$rp_real+64+16]
  2052. st $t2,[$rp_real+64+20]
  2053. st $a3,[$rp_real+64+24]
  2054. st $t3,[$rp_real+64+28]
  2055. ret
  2056. restore
  2057. .type ecp_nistz256_point_double_vis3,#function
  2058. .size ecp_nistz256_point_double_vis3,.-ecp_nistz256_point_double_vis3
  2059. ___
  2060. }
  2061. ########################################################################
  2062. # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
  2063. # const P256_POINT *in2);
  2064. {
  2065. my ($res_x,$res_y,$res_z,
  2066. $in1_x,$in1_y,$in1_z,
  2067. $in2_x,$in2_y,$in2_z,
  2068. $H,$Hsqr,$R,$Rsqr,$Hcub,
  2069. $U1,$U2,$S1,$S2)=map(32*$_,(0..17));
  2070. my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
  2071. # above map() describes stack layout with 18 temporary
  2072. # 256-bit vectors on top. Then we reserve some space for
  2073. # !in1infty, !in2infty and result of check for zero.
  2074. $code.=<<___;
  2075. .globl ecp_nistz256_point_add_vis3
  2076. .align 32
  2077. ecp_nistz256_point_add_vis3:
  2078. save %sp,-STACK64_FRAME-32*18-32,%sp
  2079. mov $rp,$rp_real
  2080. mov -1,$minus1
  2081. mov -2,$poly3
  2082. sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
  2083. srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
  2084. ! convert input to uint64_t[4]
  2085. ld [$bp],$a0 ! in2_x
  2086. ld [$bp+4],$t0
  2087. ld [$bp+8],$a1
  2088. ld [$bp+12],$t1
  2089. ld [$bp+16],$a2
  2090. ld [$bp+20],$t2
  2091. ld [$bp+24],$a3
  2092. ld [$bp+28],$t3
  2093. sllx $t0,32,$t0
  2094. sllx $t1,32,$t1
  2095. ld [$bp+32],$acc0 ! in2_y
  2096. or $a0,$t0,$a0
  2097. ld [$bp+32+4],$t0
  2098. sllx $t2,32,$t2
  2099. ld [$bp+32+8],$acc1
  2100. or $a1,$t1,$a1
  2101. ld [$bp+32+12],$t1
  2102. sllx $t3,32,$t3
  2103. ld [$bp+32+16],$acc2
  2104. or $a2,$t2,$a2
  2105. ld [$bp+32+20],$t2
  2106. or $a3,$t3,$a3
  2107. ld [$bp+32+24],$acc3
  2108. sllx $t0,32,$t0
  2109. ld [$bp+32+28],$t3
  2110. sllx $t1,32,$t1
  2111. stx $a0,[%sp+LOCALS64+$in2_x]
  2112. sllx $t2,32,$t2
  2113. stx $a1,[%sp+LOCALS64+$in2_x+8]
  2114. sllx $t3,32,$t3
  2115. stx $a2,[%sp+LOCALS64+$in2_x+16]
  2116. or $acc0,$t0,$acc0
  2117. stx $a3,[%sp+LOCALS64+$in2_x+24]
  2118. or $acc1,$t1,$acc1
  2119. stx $acc0,[%sp+LOCALS64+$in2_y]
  2120. or $acc2,$t2,$acc2
  2121. stx $acc1,[%sp+LOCALS64+$in2_y+8]
  2122. or $acc3,$t3,$acc3
  2123. stx $acc2,[%sp+LOCALS64+$in2_y+16]
  2124. stx $acc3,[%sp+LOCALS64+$in2_y+24]
  2125. ld [$bp+64],$acc0 ! in2_z
  2126. ld [$bp+64+4],$t0
  2127. ld [$bp+64+8],$acc1
  2128. ld [$bp+64+12],$t1
  2129. ld [$bp+64+16],$acc2
  2130. ld [$bp+64+20],$t2
  2131. ld [$bp+64+24],$acc3
  2132. ld [$bp+64+28],$t3
  2133. sllx $t0,32,$t0
  2134. sllx $t1,32,$t1
  2135. ld [$ap],$a0 ! in1_x
  2136. or $acc0,$t0,$acc0
  2137. ld [$ap+4],$t0
  2138. sllx $t2,32,$t2
  2139. ld [$ap+8],$a1
  2140. or $acc1,$t1,$acc1
  2141. ld [$ap+12],$t1
  2142. sllx $t3,32,$t3
  2143. ld [$ap+16],$a2
  2144. or $acc2,$t2,$acc2
  2145. ld [$ap+20],$t2
  2146. or $acc3,$t3,$acc3
  2147. ld [$ap+24],$a3
  2148. sllx $t0,32,$t0
  2149. ld [$ap+28],$t3
  2150. sllx $t1,32,$t1
  2151. stx $acc0,[%sp+LOCALS64+$in2_z]
  2152. sllx $t2,32,$t2
  2153. stx $acc1,[%sp+LOCALS64+$in2_z+8]
  2154. sllx $t3,32,$t3
  2155. stx $acc2,[%sp+LOCALS64+$in2_z+16]
  2156. stx $acc3,[%sp+LOCALS64+$in2_z+24]
  2157. or $acc1,$acc0,$acc0
  2158. or $acc3,$acc2,$acc2
  2159. or $acc2,$acc0,$acc0
  2160. movrnz $acc0,-1,$acc0 ! !in2infty
  2161. stx $acc0,[%fp+STACK_BIAS-8]
  2162. or $a0,$t0,$a0
  2163. ld [$ap+32],$acc0 ! in1_y
  2164. or $a1,$t1,$a1
  2165. ld [$ap+32+4],$t0
  2166. or $a2,$t2,$a2
  2167. ld [$ap+32+8],$acc1
  2168. or $a3,$t3,$a3
  2169. ld [$ap+32+12],$t1
  2170. ld [$ap+32+16],$acc2
  2171. ld [$ap+32+20],$t2
  2172. ld [$ap+32+24],$acc3
  2173. sllx $t0,32,$t0
  2174. ld [$ap+32+28],$t3
  2175. sllx $t1,32,$t1
  2176. stx $a0,[%sp+LOCALS64+$in1_x]
  2177. sllx $t2,32,$t2
  2178. stx $a1,[%sp+LOCALS64+$in1_x+8]
  2179. sllx $t3,32,$t3
  2180. stx $a2,[%sp+LOCALS64+$in1_x+16]
  2181. or $acc0,$t0,$acc0
  2182. stx $a3,[%sp+LOCALS64+$in1_x+24]
  2183. or $acc1,$t1,$acc1
  2184. stx $acc0,[%sp+LOCALS64+$in1_y]
  2185. or $acc2,$t2,$acc2
  2186. stx $acc1,[%sp+LOCALS64+$in1_y+8]
  2187. or $acc3,$t3,$acc3
  2188. stx $acc2,[%sp+LOCALS64+$in1_y+16]
  2189. stx $acc3,[%sp+LOCALS64+$in1_y+24]
  2190. ldx [%sp+LOCALS64+$in2_z],$a0 ! forward load
  2191. ldx [%sp+LOCALS64+$in2_z+8],$a1
  2192. ldx [%sp+LOCALS64+$in2_z+16],$a2
  2193. ldx [%sp+LOCALS64+$in2_z+24],$a3
  2194. ld [$ap+64],$acc0 ! in1_z
  2195. ld [$ap+64+4],$t0
  2196. ld [$ap+64+8],$acc1
  2197. ld [$ap+64+12],$t1
  2198. ld [$ap+64+16],$acc2
  2199. ld [$ap+64+20],$t2
  2200. ld [$ap+64+24],$acc3
  2201. ld [$ap+64+28],$t3
  2202. sllx $t0,32,$t0
  2203. sllx $t1,32,$t1
  2204. or $acc0,$t0,$acc0
  2205. sllx $t2,32,$t2
  2206. or $acc1,$t1,$acc1
  2207. sllx $t3,32,$t3
  2208. stx $acc0,[%sp+LOCALS64+$in1_z]
  2209. or $acc2,$t2,$acc2
  2210. stx $acc1,[%sp+LOCALS64+$in1_z+8]
  2211. or $acc3,$t3,$acc3
  2212. stx $acc2,[%sp+LOCALS64+$in1_z+16]
  2213. stx $acc3,[%sp+LOCALS64+$in1_z+24]
  2214. or $acc1,$acc0,$acc0
  2215. or $acc3,$acc2,$acc2
  2216. or $acc2,$acc0,$acc0
  2217. movrnz $acc0,-1,$acc0 ! !in1infty
  2218. stx $acc0,[%fp+STACK_BIAS-16]
  2219. call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z2sqr, in2_z);
  2220. add %sp,LOCALS64+$Z2sqr,$rp
  2221. ldx [%sp+LOCALS64+$in1_z],$a0
  2222. ldx [%sp+LOCALS64+$in1_z+8],$a1
  2223. ldx [%sp+LOCALS64+$in1_z+16],$a2
  2224. ldx [%sp+LOCALS64+$in1_z+24],$a3
  2225. call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z1sqr, in1_z);
  2226. add %sp,LOCALS64+$Z1sqr,$rp
  2227. ldx [%sp+LOCALS64+$Z2sqr],$bi
  2228. ldx [%sp+LOCALS64+$in2_z],$a0
  2229. ldx [%sp+LOCALS64+$in2_z+8],$a1
  2230. ldx [%sp+LOCALS64+$in2_z+16],$a2
  2231. ldx [%sp+LOCALS64+$in2_z+24],$a3
  2232. add %sp,LOCALS64+$Z2sqr,$bp
  2233. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S1, Z2sqr, in2_z);
  2234. add %sp,LOCALS64+$S1,$rp
  2235. ldx [%sp+LOCALS64+$Z1sqr],$bi
  2236. ldx [%sp+LOCALS64+$in1_z],$a0
  2237. ldx [%sp+LOCALS64+$in1_z+8],$a1
  2238. ldx [%sp+LOCALS64+$in1_z+16],$a2
  2239. ldx [%sp+LOCALS64+$in1_z+24],$a3
  2240. add %sp,LOCALS64+$Z1sqr,$bp
  2241. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, Z1sqr, in1_z);
  2242. add %sp,LOCALS64+$S2,$rp
  2243. ldx [%sp+LOCALS64+$S1],$bi
  2244. ldx [%sp+LOCALS64+$in1_y],$a0
  2245. ldx [%sp+LOCALS64+$in1_y+8],$a1
  2246. ldx [%sp+LOCALS64+$in1_y+16],$a2
  2247. ldx [%sp+LOCALS64+$in1_y+24],$a3
  2248. add %sp,LOCALS64+$S1,$bp
  2249. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S1, S1, in1_y);
  2250. add %sp,LOCALS64+$S1,$rp
  2251. ldx [%sp+LOCALS64+$S2],$bi
  2252. ldx [%sp+LOCALS64+$in2_y],$a0
  2253. ldx [%sp+LOCALS64+$in2_y+8],$a1
  2254. ldx [%sp+LOCALS64+$in2_y+16],$a2
  2255. ldx [%sp+LOCALS64+$in2_y+24],$a3
  2256. add %sp,LOCALS64+$S2,$bp
  2257. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S2, in2_y);
  2258. add %sp,LOCALS64+$S2,$rp
  2259. ldx [%sp+LOCALS64+$Z2sqr],$bi ! forward load
  2260. ldx [%sp+LOCALS64+$in1_x],$a0
  2261. ldx [%sp+LOCALS64+$in1_x+8],$a1
  2262. ldx [%sp+LOCALS64+$in1_x+16],$a2
  2263. ldx [%sp+LOCALS64+$in1_x+24],$a3
  2264. add %sp,LOCALS64+$S1,$bp
  2265. call __ecp_nistz256_sub_from_vis3 ! p256_sub(R, S2, S1);
  2266. add %sp,LOCALS64+$R,$rp
  2267. or $acc1,$acc0,$acc0 ! see if result is zero
  2268. or $acc3,$acc2,$acc2
  2269. or $acc2,$acc0,$acc0
  2270. stx $acc0,[%fp+STACK_BIAS-24]
  2271. add %sp,LOCALS64+$Z2sqr,$bp
  2272. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U1, in1_x, Z2sqr);
  2273. add %sp,LOCALS64+$U1,$rp
  2274. ldx [%sp+LOCALS64+$Z1sqr],$bi
  2275. ldx [%sp+LOCALS64+$in2_x],$a0
  2276. ldx [%sp+LOCALS64+$in2_x+8],$a1
  2277. ldx [%sp+LOCALS64+$in2_x+16],$a2
  2278. ldx [%sp+LOCALS64+$in2_x+24],$a3
  2279. add %sp,LOCALS64+$Z1sqr,$bp
  2280. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, in2_x, Z1sqr);
  2281. add %sp,LOCALS64+$U2,$rp
  2282. ldx [%sp+LOCALS64+$R],$a0 ! forward load
  2283. ldx [%sp+LOCALS64+$R+8],$a1
  2284. ldx [%sp+LOCALS64+$R+16],$a2
  2285. ldx [%sp+LOCALS64+$R+24],$a3
  2286. add %sp,LOCALS64+$U1,$bp
  2287. call __ecp_nistz256_sub_from_vis3 ! p256_sub(H, U2, U1);
  2288. add %sp,LOCALS64+$H,$rp
  2289. or $acc1,$acc0,$acc0 ! see if result is zero
  2290. or $acc3,$acc2,$acc2
  2291. orcc $acc2,$acc0,$acc0
  2292. bne,pt %xcc,.Ladd_proceed_vis3 ! is_equal(U1,U2)?
  2293. nop
  2294. ldx [%fp+STACK_BIAS-8],$t0
  2295. ldx [%fp+STACK_BIAS-16],$t1
  2296. ldx [%fp+STACK_BIAS-24],$t2
  2297. andcc $t0,$t1,%g0
  2298. be,pt %xcc,.Ladd_proceed_vis3 ! (in1infty || in2infty)?
  2299. nop
  2300. andcc $t2,$t2,%g0
  2301. be,a,pt %xcc,.Ldouble_shortcut_vis3 ! is_equal(S1,S2)?
  2302. add %sp,32*(12-10)+32,%sp ! difference in frame sizes
  2303. st %g0,[$rp_real]
  2304. st %g0,[$rp_real+4]
  2305. st %g0,[$rp_real+8]
  2306. st %g0,[$rp_real+12]
  2307. st %g0,[$rp_real+16]
  2308. st %g0,[$rp_real+20]
  2309. st %g0,[$rp_real+24]
  2310. st %g0,[$rp_real+28]
  2311. st %g0,[$rp_real+32]
  2312. st %g0,[$rp_real+32+4]
  2313. st %g0,[$rp_real+32+8]
  2314. st %g0,[$rp_real+32+12]
  2315. st %g0,[$rp_real+32+16]
  2316. st %g0,[$rp_real+32+20]
  2317. st %g0,[$rp_real+32+24]
  2318. st %g0,[$rp_real+32+28]
  2319. st %g0,[$rp_real+64]
  2320. st %g0,[$rp_real+64+4]
  2321. st %g0,[$rp_real+64+8]
  2322. st %g0,[$rp_real+64+12]
  2323. st %g0,[$rp_real+64+16]
  2324. st %g0,[$rp_real+64+20]
  2325. st %g0,[$rp_real+64+24]
  2326. st %g0,[$rp_real+64+28]
  2327. b .Ladd_done_vis3
  2328. nop
  2329. .align 16
  2330. .Ladd_proceed_vis3:
  2331. call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Rsqr, R);
  2332. add %sp,LOCALS64+$Rsqr,$rp
  2333. ldx [%sp+LOCALS64+$H],$bi
  2334. ldx [%sp+LOCALS64+$in1_z],$a0
  2335. ldx [%sp+LOCALS64+$in1_z+8],$a1
  2336. ldx [%sp+LOCALS64+$in1_z+16],$a2
  2337. ldx [%sp+LOCALS64+$in1_z+24],$a3
  2338. add %sp,LOCALS64+$H,$bp
  2339. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, H, in1_z);
  2340. add %sp,LOCALS64+$res_z,$rp
  2341. ldx [%sp+LOCALS64+$H],$a0
  2342. ldx [%sp+LOCALS64+$H+8],$a1
  2343. ldx [%sp+LOCALS64+$H+16],$a2
  2344. ldx [%sp+LOCALS64+$H+24],$a3
  2345. call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Hsqr, H);
  2346. add %sp,LOCALS64+$Hsqr,$rp
  2347. ldx [%sp+LOCALS64+$res_z],$bi
  2348. ldx [%sp+LOCALS64+$in2_z],$a0
  2349. ldx [%sp+LOCALS64+$in2_z+8],$a1
  2350. ldx [%sp+LOCALS64+$in2_z+16],$a2
  2351. ldx [%sp+LOCALS64+$in2_z+24],$a3
  2352. add %sp,LOCALS64+$res_z,$bp
  2353. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, res_z, in2_z);
  2354. add %sp,LOCALS64+$res_z,$rp
  2355. ldx [%sp+LOCALS64+$H],$bi
  2356. ldx [%sp+LOCALS64+$Hsqr],$a0
  2357. ldx [%sp+LOCALS64+$Hsqr+8],$a1
  2358. ldx [%sp+LOCALS64+$Hsqr+16],$a2
  2359. ldx [%sp+LOCALS64+$Hsqr+24],$a3
  2360. add %sp,LOCALS64+$H,$bp
  2361. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(Hcub, Hsqr, H);
  2362. add %sp,LOCALS64+$Hcub,$rp
  2363. ldx [%sp+LOCALS64+$U1],$bi
  2364. ldx [%sp+LOCALS64+$Hsqr],$a0
  2365. ldx [%sp+LOCALS64+$Hsqr+8],$a1
  2366. ldx [%sp+LOCALS64+$Hsqr+16],$a2
  2367. ldx [%sp+LOCALS64+$Hsqr+24],$a3
  2368. add %sp,LOCALS64+$U1,$bp
  2369. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, U1, Hsqr);
  2370. add %sp,LOCALS64+$U2,$rp
  2371. call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(Hsqr, U2);
  2372. add %sp,LOCALS64+$Hsqr,$rp
  2373. add %sp,LOCALS64+$Rsqr,$bp
  2374. call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_x, Rsqr, Hsqr);
  2375. add %sp,LOCALS64+$res_x,$rp
  2376. add %sp,LOCALS64+$Hcub,$bp
  2377. call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, Hcub);
  2378. add %sp,LOCALS64+$res_x,$rp
  2379. ldx [%sp+LOCALS64+$S1],$bi ! forward load
  2380. ldx [%sp+LOCALS64+$Hcub],$a0
  2381. ldx [%sp+LOCALS64+$Hcub+8],$a1
  2382. ldx [%sp+LOCALS64+$Hcub+16],$a2
  2383. ldx [%sp+LOCALS64+$Hcub+24],$a3
  2384. add %sp,LOCALS64+$U2,$bp
  2385. call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_y, U2, res_x);
  2386. add %sp,LOCALS64+$res_y,$rp
  2387. add %sp,LOCALS64+$S1,$bp
  2388. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S1, Hcub);
  2389. add %sp,LOCALS64+$S2,$rp
  2390. ldx [%sp+LOCALS64+$R],$bi
  2391. ldx [%sp+LOCALS64+$res_y],$a0
  2392. ldx [%sp+LOCALS64+$res_y+8],$a1
  2393. ldx [%sp+LOCALS64+$res_y+16],$a2
  2394. ldx [%sp+LOCALS64+$res_y+24],$a3
  2395. add %sp,LOCALS64+$R,$bp
  2396. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_y, res_y, R);
  2397. add %sp,LOCALS64+$res_y,$rp
  2398. add %sp,LOCALS64+$S2,$bp
  2399. call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, res_y, S2);
  2400. add %sp,LOCALS64+$res_y,$rp
  2401. ldx [%fp+STACK_BIAS-16],$t1 ! !in1infty
  2402. ldx [%fp+STACK_BIAS-8],$t2 ! !in2infty
  2403. ___
  2404. for($i=0;$i<96;$i+=16) { # conditional moves
  2405. $code.=<<___;
  2406. ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
  2407. ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
  2408. ldx [%sp+LOCALS64+$in2_x+$i],$acc2 ! in2
  2409. ldx [%sp+LOCALS64+$in2_x+$i+8],$acc3
  2410. ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
  2411. ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
  2412. movrz $t1,$acc2,$acc0
  2413. movrz $t1,$acc3,$acc1
  2414. movrz $t2,$acc4,$acc0
  2415. movrz $t2,$acc5,$acc1
  2416. srlx $acc0,32,$acc2
  2417. srlx $acc1,32,$acc3
  2418. st $acc0,[$rp_real+$i]
  2419. st $acc2,[$rp_real+$i+4]
  2420. st $acc1,[$rp_real+$i+8]
  2421. st $acc3,[$rp_real+$i+12]
  2422. ___
  2423. }
  2424. $code.=<<___;
  2425. .Ladd_done_vis3:
  2426. ret
  2427. restore
  2428. .type ecp_nistz256_point_add_vis3,#function
  2429. .size ecp_nistz256_point_add_vis3,.-ecp_nistz256_point_add_vis3
  2430. ___
  2431. }
  2432. ########################################################################
  2433. # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
  2434. # const P256_POINT_AFFINE *in2);
  2435. {
  2436. my ($res_x,$res_y,$res_z,
  2437. $in1_x,$in1_y,$in1_z,
  2438. $in2_x,$in2_y,
  2439. $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..14));
  2440. my $Z1sqr = $S2;
  2441. # above map() describes stack layout with 15 temporary
  2442. # 256-bit vectors on top. Then we reserve some space for
  2443. # !in1infty and !in2infty.
  2444. $code.=<<___;
  2445. .align 32
  2446. ecp_nistz256_point_add_affine_vis3:
  2447. save %sp,-STACK64_FRAME-32*15-32,%sp
  2448. mov $rp,$rp_real
  2449. mov -1,$minus1
  2450. mov -2,$poly3
  2451. sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
  2452. srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
  2453. ! convert input to uint64_t[4]
  2454. ld [$bp],$a0 ! in2_x
  2455. ld [$bp+4],$t0
  2456. ld [$bp+8],$a1
  2457. ld [$bp+12],$t1
  2458. ld [$bp+16],$a2
  2459. ld [$bp+20],$t2
  2460. ld [$bp+24],$a3
  2461. ld [$bp+28],$t3
  2462. sllx $t0,32,$t0
  2463. sllx $t1,32,$t1
  2464. ld [$bp+32],$acc0 ! in2_y
  2465. or $a0,$t0,$a0
  2466. ld [$bp+32+4],$t0
  2467. sllx $t2,32,$t2
  2468. ld [$bp+32+8],$acc1
  2469. or $a1,$t1,$a1
  2470. ld [$bp+32+12],$t1
  2471. sllx $t3,32,$t3
  2472. ld [$bp+32+16],$acc2
  2473. or $a2,$t2,$a2
  2474. ld [$bp+32+20],$t2
  2475. or $a3,$t3,$a3
  2476. ld [$bp+32+24],$acc3
  2477. sllx $t0,32,$t0
  2478. ld [$bp+32+28],$t3
  2479. sllx $t1,32,$t1
  2480. stx $a0,[%sp+LOCALS64+$in2_x]
  2481. sllx $t2,32,$t2
  2482. stx $a1,[%sp+LOCALS64+$in2_x+8]
  2483. sllx $t3,32,$t3
  2484. stx $a2,[%sp+LOCALS64+$in2_x+16]
  2485. or $acc0,$t0,$acc0
  2486. stx $a3,[%sp+LOCALS64+$in2_x+24]
  2487. or $acc1,$t1,$acc1
  2488. stx $acc0,[%sp+LOCALS64+$in2_y]
  2489. or $acc2,$t2,$acc2
  2490. stx $acc1,[%sp+LOCALS64+$in2_y+8]
  2491. or $acc3,$t3,$acc3
  2492. stx $acc2,[%sp+LOCALS64+$in2_y+16]
  2493. stx $acc3,[%sp+LOCALS64+$in2_y+24]
  2494. or $a1,$a0,$a0
  2495. or $a3,$a2,$a2
  2496. or $acc1,$acc0,$acc0
  2497. or $acc3,$acc2,$acc2
  2498. or $a2,$a0,$a0
  2499. or $acc2,$acc0,$acc0
  2500. or $acc0,$a0,$a0
  2501. movrnz $a0,-1,$a0 ! !in2infty
  2502. stx $a0,[%fp+STACK_BIAS-8]
  2503. ld [$ap],$a0 ! in1_x
  2504. ld [$ap+4],$t0
  2505. ld [$ap+8],$a1
  2506. ld [$ap+12],$t1
  2507. ld [$ap+16],$a2
  2508. ld [$ap+20],$t2
  2509. ld [$ap+24],$a3
  2510. ld [$ap+28],$t3
  2511. sllx $t0,32,$t0
  2512. sllx $t1,32,$t1
  2513. ld [$ap+32],$acc0 ! in1_y
  2514. or $a0,$t0,$a0
  2515. ld [$ap+32+4],$t0
  2516. sllx $t2,32,$t2
  2517. ld [$ap+32+8],$acc1
  2518. or $a1,$t1,$a1
  2519. ld [$ap+32+12],$t1
  2520. sllx $t3,32,$t3
  2521. ld [$ap+32+16],$acc2
  2522. or $a2,$t2,$a2
  2523. ld [$ap+32+20],$t2
  2524. or $a3,$t3,$a3
  2525. ld [$ap+32+24],$acc3
  2526. sllx $t0,32,$t0
  2527. ld [$ap+32+28],$t3
  2528. sllx $t1,32,$t1
  2529. stx $a0,[%sp+LOCALS64+$in1_x]
  2530. sllx $t2,32,$t2
  2531. stx $a1,[%sp+LOCALS64+$in1_x+8]
  2532. sllx $t3,32,$t3
  2533. stx $a2,[%sp+LOCALS64+$in1_x+16]
  2534. or $acc0,$t0,$acc0
  2535. stx $a3,[%sp+LOCALS64+$in1_x+24]
  2536. or $acc1,$t1,$acc1
  2537. stx $acc0,[%sp+LOCALS64+$in1_y]
  2538. or $acc2,$t2,$acc2
  2539. stx $acc1,[%sp+LOCALS64+$in1_y+8]
  2540. or $acc3,$t3,$acc3
  2541. stx $acc2,[%sp+LOCALS64+$in1_y+16]
  2542. stx $acc3,[%sp+LOCALS64+$in1_y+24]
  2543. ld [$ap+64],$a0 ! in1_z
  2544. ld [$ap+64+4],$t0
  2545. ld [$ap+64+8],$a1
  2546. ld [$ap+64+12],$t1
  2547. ld [$ap+64+16],$a2
  2548. ld [$ap+64+20],$t2
  2549. ld [$ap+64+24],$a3
  2550. ld [$ap+64+28],$t3
  2551. sllx $t0,32,$t0
  2552. sllx $t1,32,$t1
  2553. or $a0,$t0,$a0
  2554. sllx $t2,32,$t2
  2555. or $a1,$t1,$a1
  2556. sllx $t3,32,$t3
  2557. stx $a0,[%sp+LOCALS64+$in1_z]
  2558. or $a2,$t2,$a2
  2559. stx $a1,[%sp+LOCALS64+$in1_z+8]
  2560. or $a3,$t3,$a3
  2561. stx $a2,[%sp+LOCALS64+$in1_z+16]
  2562. stx $a3,[%sp+LOCALS64+$in1_z+24]
  2563. or $a1,$a0,$t0
  2564. or $a3,$a2,$t2
  2565. or $t2,$t0,$t0
  2566. movrnz $t0,-1,$t0 ! !in1infty
  2567. stx $t0,[%fp+STACK_BIAS-16]
  2568. call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z1sqr, in1_z);
  2569. add %sp,LOCALS64+$Z1sqr,$rp
  2570. ldx [%sp+LOCALS64+$in2_x],$bi
  2571. mov $acc0,$a0
  2572. mov $acc1,$a1
  2573. mov $acc2,$a2
  2574. mov $acc3,$a3
  2575. add %sp,LOCALS64+$in2_x,$bp
  2576. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, Z1sqr, in2_x);
  2577. add %sp,LOCALS64+$U2,$rp
  2578. ldx [%sp+LOCALS64+$Z1sqr],$bi ! forward load
  2579. ldx [%sp+LOCALS64+$in1_z],$a0
  2580. ldx [%sp+LOCALS64+$in1_z+8],$a1
  2581. ldx [%sp+LOCALS64+$in1_z+16],$a2
  2582. ldx [%sp+LOCALS64+$in1_z+24],$a3
  2583. add %sp,LOCALS64+$in1_x,$bp
  2584. call __ecp_nistz256_sub_from_vis3 ! p256_sub(H, U2, in1_x);
  2585. add %sp,LOCALS64+$H,$rp
  2586. add %sp,LOCALS64+$Z1sqr,$bp
  2587. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, Z1sqr, in1_z);
  2588. add %sp,LOCALS64+$S2,$rp
  2589. ldx [%sp+LOCALS64+$H],$bi
  2590. ldx [%sp+LOCALS64+$in1_z],$a0
  2591. ldx [%sp+LOCALS64+$in1_z+8],$a1
  2592. ldx [%sp+LOCALS64+$in1_z+16],$a2
  2593. ldx [%sp+LOCALS64+$in1_z+24],$a3
  2594. add %sp,LOCALS64+$H,$bp
  2595. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, H, in1_z);
  2596. add %sp,LOCALS64+$res_z,$rp
  2597. ldx [%sp+LOCALS64+$S2],$bi
  2598. ldx [%sp+LOCALS64+$in2_y],$a0
  2599. ldx [%sp+LOCALS64+$in2_y+8],$a1
  2600. ldx [%sp+LOCALS64+$in2_y+16],$a2
  2601. ldx [%sp+LOCALS64+$in2_y+24],$a3
  2602. add %sp,LOCALS64+$S2,$bp
  2603. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S2, in2_y);
  2604. add %sp,LOCALS64+$S2,$rp
  2605. ldx [%sp+LOCALS64+$H],$a0 ! forward load
  2606. ldx [%sp+LOCALS64+$H+8],$a1
  2607. ldx [%sp+LOCALS64+$H+16],$a2
  2608. ldx [%sp+LOCALS64+$H+24],$a3
  2609. add %sp,LOCALS64+$in1_y,$bp
  2610. call __ecp_nistz256_sub_from_vis3 ! p256_sub(R, S2, in1_y);
  2611. add %sp,LOCALS64+$R,$rp
  2612. call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Hsqr, H);
  2613. add %sp,LOCALS64+$Hsqr,$rp
  2614. ldx [%sp+LOCALS64+$R],$a0
  2615. ldx [%sp+LOCALS64+$R+8],$a1
  2616. ldx [%sp+LOCALS64+$R+16],$a2
  2617. ldx [%sp+LOCALS64+$R+24],$a3
  2618. call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Rsqr, R);
  2619. add %sp,LOCALS64+$Rsqr,$rp
  2620. ldx [%sp+LOCALS64+$H],$bi
  2621. ldx [%sp+LOCALS64+$Hsqr],$a0
  2622. ldx [%sp+LOCALS64+$Hsqr+8],$a1
  2623. ldx [%sp+LOCALS64+$Hsqr+16],$a2
  2624. ldx [%sp+LOCALS64+$Hsqr+24],$a3
  2625. add %sp,LOCALS64+$H,$bp
  2626. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(Hcub, Hsqr, H);
  2627. add %sp,LOCALS64+$Hcub,$rp
  2628. ldx [%sp+LOCALS64+$Hsqr],$bi
  2629. ldx [%sp+LOCALS64+$in1_x],$a0
  2630. ldx [%sp+LOCALS64+$in1_x+8],$a1
  2631. ldx [%sp+LOCALS64+$in1_x+16],$a2
  2632. ldx [%sp+LOCALS64+$in1_x+24],$a3
  2633. add %sp,LOCALS64+$Hsqr,$bp
  2634. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, in1_x, Hsqr);
  2635. add %sp,LOCALS64+$U2,$rp
  2636. call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(Hsqr, U2);
  2637. add %sp,LOCALS64+$Hsqr,$rp
  2638. add %sp,LOCALS64+$Rsqr,$bp
  2639. call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_x, Rsqr, Hsqr);
  2640. add %sp,LOCALS64+$res_x,$rp
  2641. add %sp,LOCALS64+$Hcub,$bp
  2642. call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, Hcub);
  2643. add %sp,LOCALS64+$res_x,$rp
  2644. ldx [%sp+LOCALS64+$Hcub],$bi ! forward load
  2645. ldx [%sp+LOCALS64+$in1_y],$a0
  2646. ldx [%sp+LOCALS64+$in1_y+8],$a1
  2647. ldx [%sp+LOCALS64+$in1_y+16],$a2
  2648. ldx [%sp+LOCALS64+$in1_y+24],$a3
  2649. add %sp,LOCALS64+$U2,$bp
  2650. call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_y, U2, res_x);
  2651. add %sp,LOCALS64+$res_y,$rp
  2652. add %sp,LOCALS64+$Hcub,$bp
  2653. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, in1_y, Hcub);
  2654. add %sp,LOCALS64+$S2,$rp
  2655. ldx [%sp+LOCALS64+$R],$bi
  2656. ldx [%sp+LOCALS64+$res_y],$a0
  2657. ldx [%sp+LOCALS64+$res_y+8],$a1
  2658. ldx [%sp+LOCALS64+$res_y+16],$a2
  2659. ldx [%sp+LOCALS64+$res_y+24],$a3
  2660. add %sp,LOCALS64+$R,$bp
  2661. call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_y, res_y, R);
  2662. add %sp,LOCALS64+$res_y,$rp
  2663. add %sp,LOCALS64+$S2,$bp
  2664. call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, res_y, S2);
  2665. add %sp,LOCALS64+$res_y,$rp
  2666. ldx [%fp+STACK_BIAS-16],$t1 ! !in1infty
  2667. ldx [%fp+STACK_BIAS-8],$t2 ! !in2infty
  2668. 1: call .+8
  2669. add %o7,.Lone_mont_vis3-1b,$bp
  2670. ___
  2671. for($i=0;$i<64;$i+=16) { # conditional moves
  2672. $code.=<<___;
  2673. ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
  2674. ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
  2675. ldx [%sp+LOCALS64+$in2_x+$i],$acc2 ! in2
  2676. ldx [%sp+LOCALS64+$in2_x+$i+8],$acc3
  2677. ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
  2678. ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
  2679. movrz $t1,$acc2,$acc0
  2680. movrz $t1,$acc3,$acc1
  2681. movrz $t2,$acc4,$acc0
  2682. movrz $t2,$acc5,$acc1
  2683. srlx $acc0,32,$acc2
  2684. srlx $acc1,32,$acc3
  2685. st $acc0,[$rp_real+$i]
  2686. st $acc2,[$rp_real+$i+4]
  2687. st $acc1,[$rp_real+$i+8]
  2688. st $acc3,[$rp_real+$i+12]
  2689. ___
  2690. }
  2691. for(;$i<96;$i+=16) {
  2692. $code.=<<___;
  2693. ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
  2694. ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
  2695. ldx [$bp+$i-64],$acc2 ! "in2"
  2696. ldx [$bp+$i-64+8],$acc3
  2697. ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
  2698. ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
  2699. movrz $t1,$acc2,$acc0
  2700. movrz $t1,$acc3,$acc1
  2701. movrz $t2,$acc4,$acc0
  2702. movrz $t2,$acc5,$acc1
  2703. srlx $acc0,32,$acc2
  2704. srlx $acc1,32,$acc3
  2705. st $acc0,[$rp_real+$i]
  2706. st $acc2,[$rp_real+$i+4]
  2707. st $acc1,[$rp_real+$i+8]
  2708. st $acc3,[$rp_real+$i+12]
  2709. ___
  2710. }
  2711. $code.=<<___;
  2712. ret
  2713. restore
  2714. .type ecp_nistz256_point_add_affine_vis3,#function
  2715. .size ecp_nistz256_point_add_affine_vis3,.-ecp_nistz256_point_add_affine_vis3
  2716. .align 64
  2717. .Lone_mont_vis3:
  2718. .long 0x00000000,0x00000001, 0xffffffff,0x00000000
  2719. .long 0xffffffff,0xffffffff, 0x00000000,0xfffffffe
  2720. .align 64
  2721. ___
  2722. } }}}
  2723. # Purpose of these subroutines is to explicitly encode VIS instructions,
  2724. # so that one can compile the module without having to specify VIS
  2725. # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
  2726. # Idea is to reserve for option to produce "universal" binary and let
  2727. # programmer detect if current CPU is VIS capable at run-time.
  2728. sub unvis3 {
  2729. my ($mnemonic,$rs1,$rs2,$rd)=@_;
  2730. my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
  2731. my ($ref,$opf);
  2732. my %visopf = ( "addxc" => 0x011,
  2733. "addxccc" => 0x013,
  2734. "umulxhi" => 0x016 );
  2735. $ref = "$mnemonic\t$rs1,$rs2,$rd";
  2736. if ($opf=$visopf{$mnemonic}) {
  2737. foreach ($rs1,$rs2,$rd) {
  2738. return $ref if (!/%([goli])([0-9])/);
  2739. $_=$bias{$1}+$2;
  2740. }
  2741. return sprintf ".word\t0x%08x !%s",
  2742. 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
  2743. $ref;
  2744. } else {
  2745. return $ref;
  2746. }
  2747. }
  2748. foreach (split("\n",$code)) {
  2749. s/\`([^\`]*)\`/eval $1/ge;
  2750. s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
  2751. &unvis3($1,$2,$3,$4)
  2752. /ge;
  2753. print $_,"\n";
  2754. }
  2755. close STDOUT;