ppc-mont.pl 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994
  1. #! /usr/bin/env perl
  2. # Copyright 2006-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. # ====================================================================
  9. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  10. # project. The module is, however, dual licensed under OpenSSL and
  11. # CRYPTOGAMS licenses depending on where you obtain it. For further
  12. # details see http://www.openssl.org/~appro/cryptogams/.
  13. # ====================================================================
  14. # April 2006
  15. # "Teaser" Montgomery multiplication module for PowerPC. It's possible
  16. # to gain a bit more by modulo-scheduling outer loop, then dedicated
  17. # squaring procedure should give further 20% and code can be adapted
  18. # for 32-bit application running on 64-bit CPU. As for the latter.
  19. # It won't be able to achieve "native" 64-bit performance, because in
  20. # 32-bit application context every addc instruction will have to be
  21. # expanded as addc, twice right shift by 32 and finally adde, etc.
  22. # So far RSA *sign* performance improvement over pre-bn_mul_mont asm
  23. # for 64-bit application running on PPC970/G5 is:
  24. #
  25. # 512-bit +65%
  26. # 1024-bit +35%
  27. # 2048-bit +18%
  28. # 4096-bit +4%
  29. # September 2016
  30. #
  31. # Add multiplication procedure operating on lengths divisible by 4
  32. # and squaring procedure operating on lengths divisible by 8. Length
  33. # is expressed in number of limbs. RSA private key operations are
  34. # ~35-50% faster (more for longer keys) on contemporary high-end POWER
  35. # processors in 64-bit builds, [mysteriously enough] more in 32-bit
  36. # builds. On low-end 32-bit processors performance improvement turned
  37. # to be marginal...
  38. # $output is the last argument if it looks like a file (it has an extension)
  39. # $flavour is the first argument if it doesn't look like a file
  40. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  41. $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  42. if ($flavour =~ /32/) {
  43. $BITS= 32;
  44. $BNSZ= $BITS/8;
  45. $SIZE_T=4;
  46. $RZONE= 224;
  47. $LD= "lwz"; # load
  48. $LDU= "lwzu"; # load and update
  49. $LDX= "lwzx"; # load indexed
  50. $ST= "stw"; # store
  51. $STU= "stwu"; # store and update
  52. $STX= "stwx"; # store indexed
  53. $STUX= "stwux"; # store indexed and update
  54. $UMULL= "mullw"; # unsigned multiply low
  55. $UMULH= "mulhwu"; # unsigned multiply high
  56. $UCMP= "cmplw"; # unsigned compare
  57. $SHRI= "srwi"; # unsigned shift right by immediate
  58. $SHLI= "slwi"; # unsigned shift left by immediate
  59. $PUSH= $ST;
  60. $POP= $LD;
  61. } elsif ($flavour =~ /64/) {
  62. $BITS= 64;
  63. $BNSZ= $BITS/8;
  64. $SIZE_T=8;
  65. $RZONE= 288;
  66. # same as above, but 64-bit mnemonics...
  67. $LD= "ld"; # load
  68. $LDU= "ldu"; # load and update
  69. $LDX= "ldx"; # load indexed
  70. $ST= "std"; # store
  71. $STU= "stdu"; # store and update
  72. $STX= "stdx"; # store indexed
  73. $STUX= "stdux"; # store indexed and update
  74. $UMULL= "mulld"; # unsigned multiply low
  75. $UMULH= "mulhdu"; # unsigned multiply high
  76. $UCMP= "cmpld"; # unsigned compare
  77. $SHRI= "srdi"; # unsigned shift right by immediate
  78. $SHLI= "sldi"; # unsigned shift left by immediate
  79. $PUSH= $ST;
  80. $POP= $LD;
  81. } else { die "nonsense $flavour"; }
  82. $FRAME=8*$SIZE_T+$RZONE;
  83. $LOCALS=8*$SIZE_T;
  84. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  85. ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
  86. ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
  87. die "can't locate ppc-xlate.pl";
  88. open STDOUT,"| $^X $xlate $flavour \"$output\""
  89. or die "can't call $xlate: $!";
  90. $sp="r1";
  91. $toc="r2";
  92. $rp="r3";
  93. $ap="r4";
  94. $bp="r5";
  95. $np="r6";
  96. $n0="r7";
  97. $num="r8";
  98. {
  99. my $ovf=$rp;
  100. my $rp="r9"; # $rp is reassigned
  101. my $aj="r10";
  102. my $nj="r11";
  103. my $tj="r12";
  104. # non-volatile registers
  105. my $i="r20";
  106. my $j="r21";
  107. my $tp="r22";
  108. my $m0="r23";
  109. my $m1="r24";
  110. my $lo0="r25";
  111. my $hi0="r26";
  112. my $lo1="r27";
  113. my $hi1="r28";
  114. my $alo="r29";
  115. my $ahi="r30";
  116. my $nlo="r31";
  117. #
  118. my $nhi="r0";
  119. $code=<<___;
  120. .machine "any"
  121. .text
  122. .globl .bn_mul_mont_int
  123. .align 5
  124. .bn_mul_mont_int:
  125. mr $rp,r3 ; $rp is reassigned
  126. li r3,0
  127. ___
  128. $code.=<<___ if ($BNSZ==4);
  129. cmpwi $num,32 ; longer key performance is not better
  130. bgelr
  131. ___
  132. $code.=<<___;
  133. slwi $num,$num,`log($BNSZ)/log(2)`
  134. li $tj,-4096
  135. addi $ovf,$num,$FRAME
  136. subf $ovf,$ovf,$sp ; $sp-$ovf
  137. and $ovf,$ovf,$tj ; minimize TLB usage
  138. subf $ovf,$sp,$ovf ; $ovf-$sp
  139. mr $tj,$sp
  140. srwi $num,$num,`log($BNSZ)/log(2)`
  141. $STUX $sp,$sp,$ovf
  142. $PUSH r20,`-12*$SIZE_T`($tj)
  143. $PUSH r21,`-11*$SIZE_T`($tj)
  144. $PUSH r22,`-10*$SIZE_T`($tj)
  145. $PUSH r23,`-9*$SIZE_T`($tj)
  146. $PUSH r24,`-8*$SIZE_T`($tj)
  147. $PUSH r25,`-7*$SIZE_T`($tj)
  148. $PUSH r26,`-6*$SIZE_T`($tj)
  149. $PUSH r27,`-5*$SIZE_T`($tj)
  150. $PUSH r28,`-4*$SIZE_T`($tj)
  151. $PUSH r29,`-3*$SIZE_T`($tj)
  152. $PUSH r30,`-2*$SIZE_T`($tj)
  153. $PUSH r31,`-1*$SIZE_T`($tj)
  154. $LD $n0,0($n0) ; pull n0[0] value
  155. addi $num,$num,-2 ; adjust $num for counter register
  156. $LD $m0,0($bp) ; m0=bp[0]
  157. $LD $aj,0($ap) ; ap[0]
  158. addi $tp,$sp,$LOCALS
  159. $UMULL $lo0,$aj,$m0 ; ap[0]*bp[0]
  160. $UMULH $hi0,$aj,$m0
  161. $LD $aj,$BNSZ($ap) ; ap[1]
  162. $LD $nj,0($np) ; np[0]
  163. $UMULL $m1,$lo0,$n0 ; "tp[0]"*n0
  164. $UMULL $alo,$aj,$m0 ; ap[1]*bp[0]
  165. $UMULH $ahi,$aj,$m0
  166. $UMULL $lo1,$nj,$m1 ; np[0]*m1
  167. $UMULH $hi1,$nj,$m1
  168. $LD $nj,$BNSZ($np) ; np[1]
  169. addc $lo1,$lo1,$lo0
  170. addze $hi1,$hi1
  171. $UMULL $nlo,$nj,$m1 ; np[1]*m1
  172. $UMULH $nhi,$nj,$m1
  173. mtctr $num
  174. li $j,`2*$BNSZ`
  175. .align 4
  176. L1st:
  177. $LDX $aj,$ap,$j ; ap[j]
  178. addc $lo0,$alo,$hi0
  179. $LDX $nj,$np,$j ; np[j]
  180. addze $hi0,$ahi
  181. $UMULL $alo,$aj,$m0 ; ap[j]*bp[0]
  182. addc $lo1,$nlo,$hi1
  183. $UMULH $ahi,$aj,$m0
  184. addze $hi1,$nhi
  185. $UMULL $nlo,$nj,$m1 ; np[j]*m1
  186. addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
  187. $UMULH $nhi,$nj,$m1
  188. addze $hi1,$hi1
  189. $ST $lo1,0($tp) ; tp[j-1]
  190. addi $j,$j,$BNSZ ; j++
  191. addi $tp,$tp,$BNSZ ; tp++
  192. bdnz L1st
  193. ;L1st
  194. addc $lo0,$alo,$hi0
  195. addze $hi0,$ahi
  196. addc $lo1,$nlo,$hi1
  197. addze $hi1,$nhi
  198. addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
  199. addze $hi1,$hi1
  200. $ST $lo1,0($tp) ; tp[j-1]
  201. li $ovf,0
  202. addc $hi1,$hi1,$hi0
  203. addze $ovf,$ovf ; upmost overflow bit
  204. $ST $hi1,$BNSZ($tp)
  205. li $i,$BNSZ
  206. .align 4
  207. Louter:
  208. $LDX $m0,$bp,$i ; m0=bp[i]
  209. $LD $aj,0($ap) ; ap[0]
  210. addi $tp,$sp,$LOCALS
  211. $LD $tj,$LOCALS($sp); tp[0]
  212. $UMULL $lo0,$aj,$m0 ; ap[0]*bp[i]
  213. $UMULH $hi0,$aj,$m0
  214. $LD $aj,$BNSZ($ap) ; ap[1]
  215. $LD $nj,0($np) ; np[0]
  216. addc $lo0,$lo0,$tj ; ap[0]*bp[i]+tp[0]
  217. $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
  218. addze $hi0,$hi0
  219. $UMULL $m1,$lo0,$n0 ; tp[0]*n0
  220. $UMULH $ahi,$aj,$m0
  221. $UMULL $lo1,$nj,$m1 ; np[0]*m1
  222. $UMULH $hi1,$nj,$m1
  223. $LD $nj,$BNSZ($np) ; np[1]
  224. addc $lo1,$lo1,$lo0
  225. $UMULL $nlo,$nj,$m1 ; np[1]*m1
  226. addze $hi1,$hi1
  227. $UMULH $nhi,$nj,$m1
  228. mtctr $num
  229. li $j,`2*$BNSZ`
  230. .align 4
  231. Linner:
  232. $LDX $aj,$ap,$j ; ap[j]
  233. addc $lo0,$alo,$hi0
  234. $LD $tj,$BNSZ($tp) ; tp[j]
  235. addze $hi0,$ahi
  236. $LDX $nj,$np,$j ; np[j]
  237. addc $lo1,$nlo,$hi1
  238. $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
  239. addze $hi1,$nhi
  240. $UMULH $ahi,$aj,$m0
  241. addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
  242. $UMULL $nlo,$nj,$m1 ; np[j]*m1
  243. addze $hi0,$hi0
  244. $UMULH $nhi,$nj,$m1
  245. addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
  246. addi $j,$j,$BNSZ ; j++
  247. addze $hi1,$hi1
  248. $ST $lo1,0($tp) ; tp[j-1]
  249. addi $tp,$tp,$BNSZ ; tp++
  250. bdnz Linner
  251. ;Linner
  252. $LD $tj,$BNSZ($tp) ; tp[j]
  253. addc $lo0,$alo,$hi0
  254. addze $hi0,$ahi
  255. addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
  256. addze $hi0,$hi0
  257. addc $lo1,$nlo,$hi1
  258. addze $hi1,$nhi
  259. addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
  260. addze $hi1,$hi1
  261. $ST $lo1,0($tp) ; tp[j-1]
  262. addic $ovf,$ovf,-1 ; move upmost overflow to XER[CA]
  263. li $ovf,0
  264. adde $hi1,$hi1,$hi0
  265. addze $ovf,$ovf
  266. $ST $hi1,$BNSZ($tp)
  267. ;
  268. slwi $tj,$num,`log($BNSZ)/log(2)`
  269. $UCMP $i,$tj
  270. addi $i,$i,$BNSZ
  271. ble Louter
  272. addi $num,$num,2 ; restore $num
  273. subfc $j,$j,$j ; j=0 and "clear" XER[CA]
  274. addi $tp,$sp,$LOCALS
  275. mtctr $num
  276. .align 4
  277. Lsub: $LDX $tj,$tp,$j
  278. $LDX $nj,$np,$j
  279. subfe $aj,$nj,$tj ; tp[j]-np[j]
  280. $STX $aj,$rp,$j
  281. addi $j,$j,$BNSZ
  282. bdnz Lsub
  283. li $j,0
  284. mtctr $num
  285. subfe $ovf,$j,$ovf ; handle upmost overflow bit
  286. .align 4
  287. Lcopy: ; conditional copy
  288. $LDX $tj,$tp,$j
  289. $LDX $aj,$rp,$j
  290. and $tj,$tj,$ovf
  291. andc $aj,$aj,$ovf
  292. $STX $j,$tp,$j ; zap at once
  293. or $aj,$aj,$tj
  294. $STX $aj,$rp,$j
  295. addi $j,$j,$BNSZ
  296. bdnz Lcopy
  297. $POP $tj,0($sp)
  298. li r3,1
  299. $POP r20,`-12*$SIZE_T`($tj)
  300. $POP r21,`-11*$SIZE_T`($tj)
  301. $POP r22,`-10*$SIZE_T`($tj)
  302. $POP r23,`-9*$SIZE_T`($tj)
  303. $POP r24,`-8*$SIZE_T`($tj)
  304. $POP r25,`-7*$SIZE_T`($tj)
  305. $POP r26,`-6*$SIZE_T`($tj)
  306. $POP r27,`-5*$SIZE_T`($tj)
  307. $POP r28,`-4*$SIZE_T`($tj)
  308. $POP r29,`-3*$SIZE_T`($tj)
  309. $POP r30,`-2*$SIZE_T`($tj)
  310. $POP r31,`-1*$SIZE_T`($tj)
  311. mr $sp,$tj
  312. blr
  313. .long 0
  314. .byte 0,12,4,0,0x80,12,6,0
  315. .long 0
  316. .size .bn_mul_mont_int,.-.bn_mul_mont_int
  317. ___
  318. }
  319. if (1) {
  320. my ($a0,$a1,$a2,$a3,
  321. $t0,$t1,$t2,$t3,
  322. $m0,$m1,$m2,$m3,
  323. $acc0,$acc1,$acc2,$acc3,$acc4,
  324. $bi,$mi,$tp,$ap_end,$cnt) = map("r$_",(9..12,14..31));
  325. my ($carry,$zero) = ($rp,"r0");
  326. # sp----------->+-------------------------------+
  327. # | saved sp |
  328. # +-------------------------------+
  329. # . .
  330. # +8*size_t +-------------------------------+
  331. # | 4 "n0*t0" |
  332. # . .
  333. # . .
  334. # +12*size_t +-------------------------------+
  335. # | size_t tmp[num] |
  336. # . .
  337. # . .
  338. # . .
  339. # +-------------------------------+
  340. # | topmost carry |
  341. # . .
  342. # -18*size_t +-------------------------------+
  343. # | 18 saved gpr, r14-r31 |
  344. # . .
  345. # . .
  346. # +-------------------------------+
  347. $code.=<<___;
  348. .globl .bn_mul4x_mont_int
  349. .align 5
  350. .bn_mul4x_mont_int:
  351. andi. r0,$num,7
  352. bne .Lmul4x_do
  353. $UCMP $ap,$bp
  354. bne .Lmul4x_do
  355. b .Lsqr8x_do
  356. .Lmul4x_do:
  357. slwi $num,$num,`log($SIZE_T)/log(2)`
  358. mr $a0,$sp
  359. li $a1,-32*$SIZE_T
  360. sub $a1,$a1,$num
  361. $STUX $sp,$sp,$a1 # alloca
  362. $PUSH r14,-$SIZE_T*18($a0)
  363. $PUSH r15,-$SIZE_T*17($a0)
  364. $PUSH r16,-$SIZE_T*16($a0)
  365. $PUSH r17,-$SIZE_T*15($a0)
  366. $PUSH r18,-$SIZE_T*14($a0)
  367. $PUSH r19,-$SIZE_T*13($a0)
  368. $PUSH r20,-$SIZE_T*12($a0)
  369. $PUSH r21,-$SIZE_T*11($a0)
  370. $PUSH r22,-$SIZE_T*10($a0)
  371. $PUSH r23,-$SIZE_T*9($a0)
  372. $PUSH r24,-$SIZE_T*8($a0)
  373. $PUSH r25,-$SIZE_T*7($a0)
  374. $PUSH r26,-$SIZE_T*6($a0)
  375. $PUSH r27,-$SIZE_T*5($a0)
  376. $PUSH r28,-$SIZE_T*4($a0)
  377. $PUSH r29,-$SIZE_T*3($a0)
  378. $PUSH r30,-$SIZE_T*2($a0)
  379. $PUSH r31,-$SIZE_T*1($a0)
  380. subi $ap,$ap,$SIZE_T # bias by -1
  381. subi $np,$np,$SIZE_T # bias by -1
  382. subi $rp,$rp,$SIZE_T # bias by -1
  383. $LD $n0,0($n0) # *n0
  384. add $t0,$bp,$num
  385. add $ap_end,$ap,$num
  386. subi $t0,$t0,$SIZE_T*4 # &b[num-4]
  387. $LD $bi,$SIZE_T*0($bp) # b[0]
  388. li $acc0,0
  389. $LD $a0,$SIZE_T*1($ap) # a[0..3]
  390. li $acc1,0
  391. $LD $a1,$SIZE_T*2($ap)
  392. li $acc2,0
  393. $LD $a2,$SIZE_T*3($ap)
  394. li $acc3,0
  395. $LDU $a3,$SIZE_T*4($ap)
  396. $LD $m0,$SIZE_T*1($np) # n[0..3]
  397. $LD $m1,$SIZE_T*2($np)
  398. $LD $m2,$SIZE_T*3($np)
  399. $LDU $m3,$SIZE_T*4($np)
  400. $PUSH $rp,$SIZE_T*6($sp) # offload rp and &b[num-4]
  401. $PUSH $t0,$SIZE_T*7($sp)
  402. li $carry,0
  403. addic $tp,$sp,$SIZE_T*7 # &t[-1], clear carry bit
  404. li $cnt,0
  405. li $zero,0
  406. b .Loop_mul4x_1st_reduction
  407. .align 5
  408. .Loop_mul4x_1st_reduction:
  409. $UMULL $t0,$a0,$bi # lo(a[0..3]*b[0])
  410. addze $carry,$carry # modulo-scheduled
  411. $UMULL $t1,$a1,$bi
  412. addi $cnt,$cnt,$SIZE_T
  413. $UMULL $t2,$a2,$bi
  414. andi. $cnt,$cnt,$SIZE_T*4-1
  415. $UMULL $t3,$a3,$bi
  416. addc $acc0,$acc0,$t0
  417. $UMULH $t0,$a0,$bi # hi(a[0..3]*b[0])
  418. adde $acc1,$acc1,$t1
  419. $UMULH $t1,$a1,$bi
  420. adde $acc2,$acc2,$t2
  421. $UMULL $mi,$acc0,$n0 # t[0]*n0
  422. adde $acc3,$acc3,$t3
  423. $UMULH $t2,$a2,$bi
  424. addze $acc4,$zero
  425. $UMULH $t3,$a3,$bi
  426. $LDX $bi,$bp,$cnt # next b[i] (or b[0])
  427. addc $acc1,$acc1,$t0
  428. # (*) mul $t0,$m0,$mi # lo(n[0..3]*t[0]*n0)
  429. $STU $mi,$SIZE_T($tp) # put aside t[0]*n0 for tail processing
  430. adde $acc2,$acc2,$t1
  431. $UMULL $t1,$m1,$mi
  432. adde $acc3,$acc3,$t2
  433. $UMULL $t2,$m2,$mi
  434. adde $acc4,$acc4,$t3 # can't overflow
  435. $UMULL $t3,$m3,$mi
  436. # (*) addc $acc0,$acc0,$t0
  437. # (*) As for removal of first multiplication and addition
  438. # instructions. The outcome of first addition is
  439. # guaranteed to be zero, which leaves two computationally
  440. # significant outcomes: it either carries or not. Then
  441. # question is when does it carry? Is there alternative
  442. # way to deduce it? If you follow operations, you can
  443. # observe that condition for carry is quite simple:
  444. # $acc0 being non-zero. So that carry can be calculated
  445. # by adding -1 to $acc0. That's what next instruction does.
  446. addic $acc0,$acc0,-1 # (*), discarded
  447. $UMULH $t0,$m0,$mi # hi(n[0..3]*t[0]*n0)
  448. adde $acc0,$acc1,$t1
  449. $UMULH $t1,$m1,$mi
  450. adde $acc1,$acc2,$t2
  451. $UMULH $t2,$m2,$mi
  452. adde $acc2,$acc3,$t3
  453. $UMULH $t3,$m3,$mi
  454. adde $acc3,$acc4,$carry
  455. addze $carry,$zero
  456. addc $acc0,$acc0,$t0
  457. adde $acc1,$acc1,$t1
  458. adde $acc2,$acc2,$t2
  459. adde $acc3,$acc3,$t3
  460. #addze $carry,$carry
  461. bne .Loop_mul4x_1st_reduction
  462. $UCMP $ap_end,$ap
  463. beq .Lmul4x4_post_condition
  464. $LD $a0,$SIZE_T*1($ap) # a[4..7]
  465. $LD $a1,$SIZE_T*2($ap)
  466. $LD $a2,$SIZE_T*3($ap)
  467. $LDU $a3,$SIZE_T*4($ap)
  468. $LD $mi,$SIZE_T*8($sp) # a[0]*n0
  469. $LD $m0,$SIZE_T*1($np) # n[4..7]
  470. $LD $m1,$SIZE_T*2($np)
  471. $LD $m2,$SIZE_T*3($np)
  472. $LDU $m3,$SIZE_T*4($np)
  473. b .Loop_mul4x_1st_tail
  474. .align 5
  475. .Loop_mul4x_1st_tail:
  476. $UMULL $t0,$a0,$bi # lo(a[4..7]*b[i])
  477. addze $carry,$carry # modulo-scheduled
  478. $UMULL $t1,$a1,$bi
  479. addi $cnt,$cnt,$SIZE_T
  480. $UMULL $t2,$a2,$bi
  481. andi. $cnt,$cnt,$SIZE_T*4-1
  482. $UMULL $t3,$a3,$bi
  483. addc $acc0,$acc0,$t0
  484. $UMULH $t0,$a0,$bi # hi(a[4..7]*b[i])
  485. adde $acc1,$acc1,$t1
  486. $UMULH $t1,$a1,$bi
  487. adde $acc2,$acc2,$t2
  488. $UMULH $t2,$a2,$bi
  489. adde $acc3,$acc3,$t3
  490. $UMULH $t3,$a3,$bi
  491. addze $acc4,$zero
  492. $LDX $bi,$bp,$cnt # next b[i] (or b[0])
  493. addc $acc1,$acc1,$t0
  494. $UMULL $t0,$m0,$mi # lo(n[4..7]*a[0]*n0)
  495. adde $acc2,$acc2,$t1
  496. $UMULL $t1,$m1,$mi
  497. adde $acc3,$acc3,$t2
  498. $UMULL $t2,$m2,$mi
  499. adde $acc4,$acc4,$t3 # can't overflow
  500. $UMULL $t3,$m3,$mi
  501. addc $acc0,$acc0,$t0
  502. $UMULH $t0,$m0,$mi # hi(n[4..7]*a[0]*n0)
  503. adde $acc1,$acc1,$t1
  504. $UMULH $t1,$m1,$mi
  505. adde $acc2,$acc2,$t2
  506. $UMULH $t2,$m2,$mi
  507. adde $acc3,$acc3,$t3
  508. adde $acc4,$acc4,$carry
  509. $UMULH $t3,$m3,$mi
  510. addze $carry,$zero
  511. addi $mi,$sp,$SIZE_T*8
  512. $LDX $mi,$mi,$cnt # next t[0]*n0
  513. $STU $acc0,$SIZE_T($tp) # word of result
  514. addc $acc0,$acc1,$t0
  515. adde $acc1,$acc2,$t1
  516. adde $acc2,$acc3,$t2
  517. adde $acc3,$acc4,$t3
  518. #addze $carry,$carry
  519. bne .Loop_mul4x_1st_tail
  520. sub $t1,$ap_end,$num # rewinded $ap
  521. $UCMP $ap_end,$ap # done yet?
  522. beq .Lmul4x_proceed
  523. $LD $a0,$SIZE_T*1($ap)
  524. $LD $a1,$SIZE_T*2($ap)
  525. $LD $a2,$SIZE_T*3($ap)
  526. $LDU $a3,$SIZE_T*4($ap)
  527. $LD $m0,$SIZE_T*1($np)
  528. $LD $m1,$SIZE_T*2($np)
  529. $LD $m2,$SIZE_T*3($np)
  530. $LDU $m3,$SIZE_T*4($np)
  531. b .Loop_mul4x_1st_tail
  532. .align 5
  533. .Lmul4x_proceed:
  534. $LDU $bi,$SIZE_T*4($bp) # *++b
  535. addze $carry,$carry # topmost carry
  536. $LD $a0,$SIZE_T*1($t1)
  537. $LD $a1,$SIZE_T*2($t1)
  538. $LD $a2,$SIZE_T*3($t1)
  539. $LD $a3,$SIZE_T*4($t1)
  540. addi $ap,$t1,$SIZE_T*4
  541. sub $np,$np,$num # rewind np
  542. $ST $acc0,$SIZE_T*1($tp) # result
  543. $ST $acc1,$SIZE_T*2($tp)
  544. $ST $acc2,$SIZE_T*3($tp)
  545. $ST $acc3,$SIZE_T*4($tp)
  546. $ST $carry,$SIZE_T*5($tp) # save topmost carry
  547. $LD $acc0,$SIZE_T*12($sp) # t[0..3]
  548. $LD $acc1,$SIZE_T*13($sp)
  549. $LD $acc2,$SIZE_T*14($sp)
  550. $LD $acc3,$SIZE_T*15($sp)
  551. $LD $m0,$SIZE_T*1($np) # n[0..3]
  552. $LD $m1,$SIZE_T*2($np)
  553. $LD $m2,$SIZE_T*3($np)
  554. $LDU $m3,$SIZE_T*4($np)
  555. addic $tp,$sp,$SIZE_T*7 # &t[-1], clear carry bit
  556. li $carry,0
  557. b .Loop_mul4x_reduction
  558. .align 5
  559. .Loop_mul4x_reduction:
  560. $UMULL $t0,$a0,$bi # lo(a[0..3]*b[4])
  561. addze $carry,$carry # modulo-scheduled
  562. $UMULL $t1,$a1,$bi
  563. addi $cnt,$cnt,$SIZE_T
  564. $UMULL $t2,$a2,$bi
  565. andi. $cnt,$cnt,$SIZE_T*4-1
  566. $UMULL $t3,$a3,$bi
  567. addc $acc0,$acc0,$t0
  568. $UMULH $t0,$a0,$bi # hi(a[0..3]*b[4])
  569. adde $acc1,$acc1,$t1
  570. $UMULH $t1,$a1,$bi
  571. adde $acc2,$acc2,$t2
  572. $UMULL $mi,$acc0,$n0 # t[0]*n0
  573. adde $acc3,$acc3,$t3
  574. $UMULH $t2,$a2,$bi
  575. addze $acc4,$zero
  576. $UMULH $t3,$a3,$bi
  577. $LDX $bi,$bp,$cnt # next b[i]
  578. addc $acc1,$acc1,$t0
  579. # (*) mul $t0,$m0,$mi
  580. $STU $mi,$SIZE_T($tp) # put aside t[0]*n0 for tail processing
  581. adde $acc2,$acc2,$t1
  582. $UMULL $t1,$m1,$mi # lo(n[0..3]*t[0]*n0
  583. adde $acc3,$acc3,$t2
  584. $UMULL $t2,$m2,$mi
  585. adde $acc4,$acc4,$t3 # can't overflow
  586. $UMULL $t3,$m3,$mi
  587. # (*) addc $acc0,$acc0,$t0
  588. addic $acc0,$acc0,-1 # (*), discarded
  589. $UMULH $t0,$m0,$mi # hi(n[0..3]*t[0]*n0
  590. adde $acc0,$acc1,$t1
  591. $UMULH $t1,$m1,$mi
  592. adde $acc1,$acc2,$t2
  593. $UMULH $t2,$m2,$mi
  594. adde $acc2,$acc3,$t3
  595. $UMULH $t3,$m3,$mi
  596. adde $acc3,$acc4,$carry
  597. addze $carry,$zero
  598. addc $acc0,$acc0,$t0
  599. adde $acc1,$acc1,$t1
  600. adde $acc2,$acc2,$t2
  601. adde $acc3,$acc3,$t3
  602. #addze $carry,$carry
  603. bne .Loop_mul4x_reduction
  604. $LD $t0,$SIZE_T*5($tp) # t[4..7]
  605. addze $carry,$carry
  606. $LD $t1,$SIZE_T*6($tp)
  607. $LD $t2,$SIZE_T*7($tp)
  608. $LD $t3,$SIZE_T*8($tp)
  609. $LD $a0,$SIZE_T*1($ap) # a[4..7]
  610. $LD $a1,$SIZE_T*2($ap)
  611. $LD $a2,$SIZE_T*3($ap)
  612. $LDU $a3,$SIZE_T*4($ap)
  613. addc $acc0,$acc0,$t0
  614. adde $acc1,$acc1,$t1
  615. adde $acc2,$acc2,$t2
  616. adde $acc3,$acc3,$t3
  617. #addze $carry,$carry
  618. $LD $mi,$SIZE_T*8($sp) # t[0]*n0
  619. $LD $m0,$SIZE_T*1($np) # n[4..7]
  620. $LD $m1,$SIZE_T*2($np)
  621. $LD $m2,$SIZE_T*3($np)
  622. $LDU $m3,$SIZE_T*4($np)
  623. b .Loop_mul4x_tail
  624. .align 5
  625. .Loop_mul4x_tail:
  626. $UMULL $t0,$a0,$bi # lo(a[4..7]*b[4])
  627. addze $carry,$carry # modulo-scheduled
  628. $UMULL $t1,$a1,$bi
  629. addi $cnt,$cnt,$SIZE_T
  630. $UMULL $t2,$a2,$bi
  631. andi. $cnt,$cnt,$SIZE_T*4-1
  632. $UMULL $t3,$a3,$bi
  633. addc $acc0,$acc0,$t0
  634. $UMULH $t0,$a0,$bi # hi(a[4..7]*b[4])
  635. adde $acc1,$acc1,$t1
  636. $UMULH $t1,$a1,$bi
  637. adde $acc2,$acc2,$t2
  638. $UMULH $t2,$a2,$bi
  639. adde $acc3,$acc3,$t3
  640. $UMULH $t3,$a3,$bi
  641. addze $acc4,$zero
  642. $LDX $bi,$bp,$cnt # next b[i]
  643. addc $acc1,$acc1,$t0
  644. $UMULL $t0,$m0,$mi # lo(n[4..7]*t[0]*n0)
  645. adde $acc2,$acc2,$t1
  646. $UMULL $t1,$m1,$mi
  647. adde $acc3,$acc3,$t2
  648. $UMULL $t2,$m2,$mi
  649. adde $acc4,$acc4,$t3 # can't overflow
  650. $UMULL $t3,$m3,$mi
  651. addc $acc0,$acc0,$t0
  652. $UMULH $t0,$m0,$mi # hi(n[4..7]*t[0]*n0)
  653. adde $acc1,$acc1,$t1
  654. $UMULH $t1,$m1,$mi
  655. adde $acc2,$acc2,$t2
  656. $UMULH $t2,$m2,$mi
  657. adde $acc3,$acc3,$t3
  658. $UMULH $t3,$m3,$mi
  659. adde $acc4,$acc4,$carry
  660. addi $mi,$sp,$SIZE_T*8
  661. $LDX $mi,$mi,$cnt # next a[0]*n0
  662. addze $carry,$zero
  663. $STU $acc0,$SIZE_T($tp) # word of result
  664. addc $acc0,$acc1,$t0
  665. adde $acc1,$acc2,$t1
  666. adde $acc2,$acc3,$t2
  667. adde $acc3,$acc4,$t3
  668. #addze $carry,$carry
  669. bne .Loop_mul4x_tail
  670. $LD $t0,$SIZE_T*5($tp) # next t[i] or topmost carry
  671. sub $t1,$np,$num # rewinded np?
  672. addze $carry,$carry
  673. $UCMP $ap_end,$ap # done yet?
  674. beq .Loop_mul4x_break
  675. $LD $t1,$SIZE_T*6($tp)
  676. $LD $t2,$SIZE_T*7($tp)
  677. $LD $t3,$SIZE_T*8($tp)
  678. $LD $a0,$SIZE_T*1($ap)
  679. $LD $a1,$SIZE_T*2($ap)
  680. $LD $a2,$SIZE_T*3($ap)
  681. $LDU $a3,$SIZE_T*4($ap)
  682. addc $acc0,$acc0,$t0
  683. adde $acc1,$acc1,$t1
  684. adde $acc2,$acc2,$t2
  685. adde $acc3,$acc3,$t3
  686. #addze $carry,$carry
  687. $LD $m0,$SIZE_T*1($np) # n[4..7]
  688. $LD $m1,$SIZE_T*2($np)
  689. $LD $m2,$SIZE_T*3($np)
  690. $LDU $m3,$SIZE_T*4($np)
  691. b .Loop_mul4x_tail
  692. .align 5
  693. .Loop_mul4x_break:
  694. $POP $t2,$SIZE_T*6($sp) # pull rp and &b[num-4]
  695. $POP $t3,$SIZE_T*7($sp)
  696. addc $a0,$acc0,$t0 # accumulate topmost carry
  697. $LD $acc0,$SIZE_T*12($sp) # t[0..3]
  698. addze $a1,$acc1
  699. $LD $acc1,$SIZE_T*13($sp)
  700. addze $a2,$acc2
  701. $LD $acc2,$SIZE_T*14($sp)
  702. addze $a3,$acc3
  703. $LD $acc3,$SIZE_T*15($sp)
  704. addze $carry,$carry # topmost carry
  705. $ST $a0,$SIZE_T*1($tp) # result
  706. sub $ap,$ap_end,$num # rewind ap
  707. $ST $a1,$SIZE_T*2($tp)
  708. $ST $a2,$SIZE_T*3($tp)
  709. $ST $a3,$SIZE_T*4($tp)
  710. $ST $carry,$SIZE_T*5($tp) # store topmost carry
  711. $LD $m0,$SIZE_T*1($t1) # n[0..3]
  712. $LD $m1,$SIZE_T*2($t1)
  713. $LD $m2,$SIZE_T*3($t1)
  714. $LD $m3,$SIZE_T*4($t1)
  715. addi $np,$t1,$SIZE_T*4
  716. $UCMP $bp,$t3 # done yet?
  717. beq .Lmul4x_post
  718. $LDU $bi,$SIZE_T*4($bp)
  719. $LD $a0,$SIZE_T*1($ap) # a[0..3]
  720. $LD $a1,$SIZE_T*2($ap)
  721. $LD $a2,$SIZE_T*3($ap)
  722. $LDU $a3,$SIZE_T*4($ap)
  723. li $carry,0
  724. addic $tp,$sp,$SIZE_T*7 # &t[-1], clear carry bit
  725. b .Loop_mul4x_reduction
  726. .align 5
  727. .Lmul4x_post:
  728. # Final step. We see if result is larger than modulus, and
  729. # if it is, subtract the modulus. But comparison implies
  730. # subtraction. So we subtract modulus, see if it borrowed,
  731. # and conditionally copy original value.
  732. srwi $cnt,$num,`log($SIZE_T)/log(2)+2`
  733. mr $bp,$t2 # &rp[-1]
  734. subi $cnt,$cnt,1
  735. mr $ap_end,$t2 # &rp[-1] copy
  736. subfc $t0,$m0,$acc0
  737. addi $tp,$sp,$SIZE_T*15
  738. subfe $t1,$m1,$acc1
  739. mtctr $cnt
  740. .Lmul4x_sub:
  741. $LD $m0,$SIZE_T*1($np)
  742. $LD $acc0,$SIZE_T*1($tp)
  743. subfe $t2,$m2,$acc2
  744. $LD $m1,$SIZE_T*2($np)
  745. $LD $acc1,$SIZE_T*2($tp)
  746. subfe $t3,$m3,$acc3
  747. $LD $m2,$SIZE_T*3($np)
  748. $LD $acc2,$SIZE_T*3($tp)
  749. $LDU $m3,$SIZE_T*4($np)
  750. $LDU $acc3,$SIZE_T*4($tp)
  751. $ST $t0,$SIZE_T*1($bp)
  752. $ST $t1,$SIZE_T*2($bp)
  753. subfe $t0,$m0,$acc0
  754. $ST $t2,$SIZE_T*3($bp)
  755. $STU $t3,$SIZE_T*4($bp)
  756. subfe $t1,$m1,$acc1
  757. bdnz .Lmul4x_sub
  758. $LD $a0,$SIZE_T*1($ap_end)
  759. $ST $t0,$SIZE_T*1($bp)
  760. $LD $t0,$SIZE_T*12($sp)
  761. subfe $t2,$m2,$acc2
  762. $LD $a1,$SIZE_T*2($ap_end)
  763. $ST $t1,$SIZE_T*2($bp)
  764. $LD $t1,$SIZE_T*13($sp)
  765. subfe $t3,$m3,$acc3
  766. subfe $carry,$zero,$carry # did it borrow?
  767. addi $tp,$sp,$SIZE_T*12
  768. $LD $a2,$SIZE_T*3($ap_end)
  769. $ST $t2,$SIZE_T*3($bp)
  770. $LD $t2,$SIZE_T*14($sp)
  771. $LD $a3,$SIZE_T*4($ap_end)
  772. $ST $t3,$SIZE_T*4($bp)
  773. $LD $t3,$SIZE_T*15($sp)
  774. mtctr $cnt
  775. .Lmul4x_cond_copy:
  776. and $t0,$t0,$carry
  777. andc $a0,$a0,$carry
  778. $ST $zero,$SIZE_T*0($tp) # wipe stack clean
  779. and $t1,$t1,$carry
  780. andc $a1,$a1,$carry
  781. $ST $zero,$SIZE_T*1($tp)
  782. and $t2,$t2,$carry
  783. andc $a2,$a2,$carry
  784. $ST $zero,$SIZE_T*2($tp)
  785. and $t3,$t3,$carry
  786. andc $a3,$a3,$carry
  787. $ST $zero,$SIZE_T*3($tp)
  788. or $acc0,$t0,$a0
  789. $LD $a0,$SIZE_T*5($ap_end)
  790. $LD $t0,$SIZE_T*4($tp)
  791. or $acc1,$t1,$a1
  792. $LD $a1,$SIZE_T*6($ap_end)
  793. $LD $t1,$SIZE_T*5($tp)
  794. or $acc2,$t2,$a2
  795. $LD $a2,$SIZE_T*7($ap_end)
  796. $LD $t2,$SIZE_T*6($tp)
  797. or $acc3,$t3,$a3
  798. $LD $a3,$SIZE_T*8($ap_end)
  799. $LD $t3,$SIZE_T*7($tp)
  800. addi $tp,$tp,$SIZE_T*4
  801. $ST $acc0,$SIZE_T*1($ap_end)
  802. $ST $acc1,$SIZE_T*2($ap_end)
  803. $ST $acc2,$SIZE_T*3($ap_end)
  804. $STU $acc3,$SIZE_T*4($ap_end)
  805. bdnz .Lmul4x_cond_copy
  806. $POP $bp,0($sp) # pull saved sp
  807. and $t0,$t0,$carry
  808. andc $a0,$a0,$carry
  809. $ST $zero,$SIZE_T*0($tp)
  810. and $t1,$t1,$carry
  811. andc $a1,$a1,$carry
  812. $ST $zero,$SIZE_T*1($tp)
  813. and $t2,$t2,$carry
  814. andc $a2,$a2,$carry
  815. $ST $zero,$SIZE_T*2($tp)
  816. and $t3,$t3,$carry
  817. andc $a3,$a3,$carry
  818. $ST $zero,$SIZE_T*3($tp)
  819. or $acc0,$t0,$a0
  820. or $acc1,$t1,$a1
  821. $ST $zero,$SIZE_T*4($tp)
  822. or $acc2,$t2,$a2
  823. or $acc3,$t3,$a3
  824. $ST $acc0,$SIZE_T*1($ap_end)
  825. $ST $acc1,$SIZE_T*2($ap_end)
  826. $ST $acc2,$SIZE_T*3($ap_end)
  827. $ST $acc3,$SIZE_T*4($ap_end)
  828. b .Lmul4x_done
  829. .align 4
  830. .Lmul4x4_post_condition:
  831. $POP $ap,$SIZE_T*6($sp) # pull &rp[-1]
  832. $POP $bp,0($sp) # pull saved sp
  833. addze $carry,$carry # modulo-scheduled
  834. # $acc0-3,$carry hold result, $m0-3 hold modulus
  835. subfc $a0,$m0,$acc0
  836. subfe $a1,$m1,$acc1
  837. subfe $a2,$m2,$acc2
  838. subfe $a3,$m3,$acc3
  839. subfe $carry,$zero,$carry # did it borrow?
  840. and $m0,$m0,$carry
  841. and $m1,$m1,$carry
  842. addc $a0,$a0,$m0
  843. and $m2,$m2,$carry
  844. adde $a1,$a1,$m1
  845. and $m3,$m3,$carry
  846. adde $a2,$a2,$m2
  847. adde $a3,$a3,$m3
  848. $ST $a0,$SIZE_T*1($ap) # write result
  849. $ST $a1,$SIZE_T*2($ap)
  850. $ST $a2,$SIZE_T*3($ap)
  851. $ST $a3,$SIZE_T*4($ap)
  852. .Lmul4x_done:
  853. $ST $zero,$SIZE_T*8($sp) # wipe stack clean
  854. $ST $zero,$SIZE_T*9($sp)
  855. $ST $zero,$SIZE_T*10($sp)
  856. $ST $zero,$SIZE_T*11($sp)
  857. li r3,1 # signal "done"
  858. $POP r14,-$SIZE_T*18($bp)
  859. $POP r15,-$SIZE_T*17($bp)
  860. $POP r16,-$SIZE_T*16($bp)
  861. $POP r17,-$SIZE_T*15($bp)
  862. $POP r18,-$SIZE_T*14($bp)
  863. $POP r19,-$SIZE_T*13($bp)
  864. $POP r20,-$SIZE_T*12($bp)
  865. $POP r21,-$SIZE_T*11($bp)
  866. $POP r22,-$SIZE_T*10($bp)
  867. $POP r23,-$SIZE_T*9($bp)
  868. $POP r24,-$SIZE_T*8($bp)
  869. $POP r25,-$SIZE_T*7($bp)
  870. $POP r26,-$SIZE_T*6($bp)
  871. $POP r27,-$SIZE_T*5($bp)
  872. $POP r28,-$SIZE_T*4($bp)
  873. $POP r29,-$SIZE_T*3($bp)
  874. $POP r30,-$SIZE_T*2($bp)
  875. $POP r31,-$SIZE_T*1($bp)
  876. mr $sp,$bp
  877. blr
  878. .long 0
  879. .byte 0,12,4,0x20,0x80,18,6,0
  880. .long 0
  881. .size .bn_mul4x_mont_int,.-.bn_mul4x_mont_int
  882. ___
  883. }
  884. if (1) {
  885. ########################################################################
  886. # Following is PPC adaptation of sqrx8x_mont from x86_64-mont5 module.
  887. my ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("r$_",(9..12,14..17));
  888. my ($t0,$t1,$t2,$t3)=map("r$_",(18..21));
  889. my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("r$_",(22..29));
  890. my ($cnt,$carry,$zero)=("r30","r31","r0");
  891. my ($tp,$ap_end,$na0)=($bp,$np,$carry);
  892. # sp----------->+-------------------------------+
  893. # | saved sp |
  894. # +-------------------------------+
  895. # . .
  896. # +12*size_t +-------------------------------+
  897. # | size_t tmp[2*num] |
  898. # . .
  899. # . .
  900. # . .
  901. # +-------------------------------+
  902. # . .
  903. # -18*size_t +-------------------------------+
  904. # | 18 saved gpr, r14-r31 |
  905. # . .
  906. # . .
  907. # +-------------------------------+
  908. $code.=<<___;
  909. .align 5
  910. __bn_sqr8x_mont:
  911. .Lsqr8x_do:
  912. mr $a0,$sp
  913. slwi $a1,$num,`log($SIZE_T)/log(2)+1`
  914. li $a2,-32*$SIZE_T
  915. sub $a1,$a2,$a1
  916. slwi $num,$num,`log($SIZE_T)/log(2)`
  917. $STUX $sp,$sp,$a1 # alloca
  918. $PUSH r14,-$SIZE_T*18($a0)
  919. $PUSH r15,-$SIZE_T*17($a0)
  920. $PUSH r16,-$SIZE_T*16($a0)
  921. $PUSH r17,-$SIZE_T*15($a0)
  922. $PUSH r18,-$SIZE_T*14($a0)
  923. $PUSH r19,-$SIZE_T*13($a0)
  924. $PUSH r20,-$SIZE_T*12($a0)
  925. $PUSH r21,-$SIZE_T*11($a0)
  926. $PUSH r22,-$SIZE_T*10($a0)
  927. $PUSH r23,-$SIZE_T*9($a0)
  928. $PUSH r24,-$SIZE_T*8($a0)
  929. $PUSH r25,-$SIZE_T*7($a0)
  930. $PUSH r26,-$SIZE_T*6($a0)
  931. $PUSH r27,-$SIZE_T*5($a0)
  932. $PUSH r28,-$SIZE_T*4($a0)
  933. $PUSH r29,-$SIZE_T*3($a0)
  934. $PUSH r30,-$SIZE_T*2($a0)
  935. $PUSH r31,-$SIZE_T*1($a0)
  936. subi $ap,$ap,$SIZE_T # bias by -1
  937. subi $t0,$np,$SIZE_T # bias by -1
  938. subi $rp,$rp,$SIZE_T # bias by -1
  939. $LD $n0,0($n0) # *n0
  940. li $zero,0
  941. add $ap_end,$ap,$num
  942. $LD $a0,$SIZE_T*1($ap)
  943. #li $acc0,0
  944. $LD $a1,$SIZE_T*2($ap)
  945. li $acc1,0
  946. $LD $a2,$SIZE_T*3($ap)
  947. li $acc2,0
  948. $LD $a3,$SIZE_T*4($ap)
  949. li $acc3,0
  950. $LD $a4,$SIZE_T*5($ap)
  951. li $acc4,0
  952. $LD $a5,$SIZE_T*6($ap)
  953. li $acc5,0
  954. $LD $a6,$SIZE_T*7($ap)
  955. li $acc6,0
  956. $LDU $a7,$SIZE_T*8($ap)
  957. li $acc7,0
  958. addi $tp,$sp,$SIZE_T*11 # &tp[-1]
  959. subic. $cnt,$num,$SIZE_T*8
  960. b .Lsqr8x_zero_start
  961. .align 5
  962. .Lsqr8x_zero:
  963. subic. $cnt,$cnt,$SIZE_T*8
  964. $ST $zero,$SIZE_T*1($tp)
  965. $ST $zero,$SIZE_T*2($tp)
  966. $ST $zero,$SIZE_T*3($tp)
  967. $ST $zero,$SIZE_T*4($tp)
  968. $ST $zero,$SIZE_T*5($tp)
  969. $ST $zero,$SIZE_T*6($tp)
  970. $ST $zero,$SIZE_T*7($tp)
  971. $ST $zero,$SIZE_T*8($tp)
  972. .Lsqr8x_zero_start:
  973. $ST $zero,$SIZE_T*9($tp)
  974. $ST $zero,$SIZE_T*10($tp)
  975. $ST $zero,$SIZE_T*11($tp)
  976. $ST $zero,$SIZE_T*12($tp)
  977. $ST $zero,$SIZE_T*13($tp)
  978. $ST $zero,$SIZE_T*14($tp)
  979. $ST $zero,$SIZE_T*15($tp)
  980. $STU $zero,$SIZE_T*16($tp)
  981. bne .Lsqr8x_zero
  982. $PUSH $rp,$SIZE_T*6($sp) # offload &rp[-1]
  983. $PUSH $t0,$SIZE_T*7($sp) # offload &np[-1]
  984. $PUSH $n0,$SIZE_T*8($sp) # offload n0
  985. $PUSH $tp,$SIZE_T*9($sp) # &tp[2*num-1]
  986. $PUSH $zero,$SIZE_T*10($sp) # initial top-most carry
  987. addi $tp,$sp,$SIZE_T*11 # &tp[-1]
  988. # Multiply everything but a[i]*a[i]
  989. .align 5
  990. .Lsqr8x_outer_loop:
  991. # a[1]a[0] (i)
  992. # a[2]a[0]
  993. # a[3]a[0]
  994. # a[4]a[0]
  995. # a[5]a[0]
  996. # a[6]a[0]
  997. # a[7]a[0]
  998. # a[2]a[1] (ii)
  999. # a[3]a[1]
  1000. # a[4]a[1]
  1001. # a[5]a[1]
  1002. # a[6]a[1]
  1003. # a[7]a[1]
  1004. # a[3]a[2] (iii)
  1005. # a[4]a[2]
  1006. # a[5]a[2]
  1007. # a[6]a[2]
  1008. # a[7]a[2]
  1009. # a[4]a[3] (iv)
  1010. # a[5]a[3]
  1011. # a[6]a[3]
  1012. # a[7]a[3]
  1013. # a[5]a[4] (v)
  1014. # a[6]a[4]
  1015. # a[7]a[4]
  1016. # a[6]a[5] (vi)
  1017. # a[7]a[5]
  1018. # a[7]a[6] (vii)
  1019. $UMULL $t0,$a1,$a0 # lo(a[1..7]*a[0]) (i)
  1020. $UMULL $t1,$a2,$a0
  1021. $UMULL $t2,$a3,$a0
  1022. $UMULL $t3,$a4,$a0
  1023. addc $acc1,$acc1,$t0 # t[1]+lo(a[1]*a[0])
  1024. $UMULL $t0,$a5,$a0
  1025. adde $acc2,$acc2,$t1
  1026. $UMULL $t1,$a6,$a0
  1027. adde $acc3,$acc3,$t2
  1028. $UMULL $t2,$a7,$a0
  1029. adde $acc4,$acc4,$t3
  1030. $UMULH $t3,$a1,$a0 # hi(a[1..7]*a[0])
  1031. adde $acc5,$acc5,$t0
  1032. $UMULH $t0,$a2,$a0
  1033. adde $acc6,$acc6,$t1
  1034. $UMULH $t1,$a3,$a0
  1035. adde $acc7,$acc7,$t2
  1036. $UMULH $t2,$a4,$a0
  1037. $ST $acc0,$SIZE_T*1($tp) # t[0]
  1038. addze $acc0,$zero # t[8]
  1039. $ST $acc1,$SIZE_T*2($tp) # t[1]
  1040. addc $acc2,$acc2,$t3 # t[2]+lo(a[1]*a[0])
  1041. $UMULH $t3,$a5,$a0
  1042. adde $acc3,$acc3,$t0
  1043. $UMULH $t0,$a6,$a0
  1044. adde $acc4,$acc4,$t1
  1045. $UMULH $t1,$a7,$a0
  1046. adde $acc5,$acc5,$t2
  1047. $UMULL $t2,$a2,$a1 # lo(a[2..7]*a[1]) (ii)
  1048. adde $acc6,$acc6,$t3
  1049. $UMULL $t3,$a3,$a1
  1050. adde $acc7,$acc7,$t0
  1051. $UMULL $t0,$a4,$a1
  1052. adde $acc0,$acc0,$t1
  1053. $UMULL $t1,$a5,$a1
  1054. addc $acc3,$acc3,$t2
  1055. $UMULL $t2,$a6,$a1
  1056. adde $acc4,$acc4,$t3
  1057. $UMULL $t3,$a7,$a1
  1058. adde $acc5,$acc5,$t0
  1059. $UMULH $t0,$a2,$a1 # hi(a[2..7]*a[1])
  1060. adde $acc6,$acc6,$t1
  1061. $UMULH $t1,$a3,$a1
  1062. adde $acc7,$acc7,$t2
  1063. $UMULH $t2,$a4,$a1
  1064. adde $acc0,$acc0,$t3
  1065. $UMULH $t3,$a5,$a1
  1066. $ST $acc2,$SIZE_T*3($tp) # t[2]
  1067. addze $acc1,$zero # t[9]
  1068. $ST $acc3,$SIZE_T*4($tp) # t[3]
  1069. addc $acc4,$acc4,$t0
  1070. $UMULH $t0,$a6,$a1
  1071. adde $acc5,$acc5,$t1
  1072. $UMULH $t1,$a7,$a1
  1073. adde $acc6,$acc6,$t2
  1074. $UMULL $t2,$a3,$a2 # lo(a[3..7]*a[2]) (iii)
  1075. adde $acc7,$acc7,$t3
  1076. $UMULL $t3,$a4,$a2
  1077. adde $acc0,$acc0,$t0
  1078. $UMULL $t0,$a5,$a2
  1079. adde $acc1,$acc1,$t1
  1080. $UMULL $t1,$a6,$a2
  1081. addc $acc5,$acc5,$t2
  1082. $UMULL $t2,$a7,$a2
  1083. adde $acc6,$acc6,$t3
  1084. $UMULH $t3,$a3,$a2 # hi(a[3..7]*a[2])
  1085. adde $acc7,$acc7,$t0
  1086. $UMULH $t0,$a4,$a2
  1087. adde $acc0,$acc0,$t1
  1088. $UMULH $t1,$a5,$a2
  1089. adde $acc1,$acc1,$t2
  1090. $UMULH $t2,$a6,$a2
  1091. $ST $acc4,$SIZE_T*5($tp) # t[4]
  1092. addze $acc2,$zero # t[10]
  1093. $ST $acc5,$SIZE_T*6($tp) # t[5]
  1094. addc $acc6,$acc6,$t3
  1095. $UMULH $t3,$a7,$a2
  1096. adde $acc7,$acc7,$t0
  1097. $UMULL $t0,$a4,$a3 # lo(a[4..7]*a[3]) (iv)
  1098. adde $acc0,$acc0,$t1
  1099. $UMULL $t1,$a5,$a3
  1100. adde $acc1,$acc1,$t2
  1101. $UMULL $t2,$a6,$a3
  1102. adde $acc2,$acc2,$t3
  1103. $UMULL $t3,$a7,$a3
  1104. addc $acc7,$acc7,$t0
  1105. $UMULH $t0,$a4,$a3 # hi(a[4..7]*a[3])
  1106. adde $acc0,$acc0,$t1
  1107. $UMULH $t1,$a5,$a3
  1108. adde $acc1,$acc1,$t2
  1109. $UMULH $t2,$a6,$a3
  1110. adde $acc2,$acc2,$t3
  1111. $UMULH $t3,$a7,$a3
  1112. $ST $acc6,$SIZE_T*7($tp) # t[6]
  1113. addze $acc3,$zero # t[11]
  1114. $STU $acc7,$SIZE_T*8($tp) # t[7]
  1115. addc $acc0,$acc0,$t0
  1116. $UMULL $t0,$a5,$a4 # lo(a[5..7]*a[4]) (v)
  1117. adde $acc1,$acc1,$t1
  1118. $UMULL $t1,$a6,$a4
  1119. adde $acc2,$acc2,$t2
  1120. $UMULL $t2,$a7,$a4
  1121. adde $acc3,$acc3,$t3
  1122. $UMULH $t3,$a5,$a4 # hi(a[5..7]*a[4])
  1123. addc $acc1,$acc1,$t0
  1124. $UMULH $t0,$a6,$a4
  1125. adde $acc2,$acc2,$t1
  1126. $UMULH $t1,$a7,$a4
  1127. adde $acc3,$acc3,$t2
  1128. $UMULL $t2,$a6,$a5 # lo(a[6..7]*a[5]) (vi)
  1129. addze $acc4,$zero # t[12]
  1130. addc $acc2,$acc2,$t3
  1131. $UMULL $t3,$a7,$a5
  1132. adde $acc3,$acc3,$t0
  1133. $UMULH $t0,$a6,$a5 # hi(a[6..7]*a[5])
  1134. adde $acc4,$acc4,$t1
  1135. $UMULH $t1,$a7,$a5
  1136. addc $acc3,$acc3,$t2
  1137. $UMULL $t2,$a7,$a6 # lo(a[7]*a[6]) (vii)
  1138. adde $acc4,$acc4,$t3
  1139. $UMULH $t3,$a7,$a6 # hi(a[7]*a[6])
  1140. addze $acc5,$zero # t[13]
  1141. addc $acc4,$acc4,$t0
  1142. $UCMP $ap_end,$ap # done yet?
  1143. adde $acc5,$acc5,$t1
  1144. addc $acc5,$acc5,$t2
  1145. sub $t0,$ap_end,$num # rewinded ap
  1146. addze $acc6,$zero # t[14]
  1147. add $acc6,$acc6,$t3
  1148. beq .Lsqr8x_outer_break
  1149. mr $n0,$a0
  1150. $LD $a0,$SIZE_T*1($tp)
  1151. $LD $a1,$SIZE_T*2($tp)
  1152. $LD $a2,$SIZE_T*3($tp)
  1153. $LD $a3,$SIZE_T*4($tp)
  1154. $LD $a4,$SIZE_T*5($tp)
  1155. $LD $a5,$SIZE_T*6($tp)
  1156. $LD $a6,$SIZE_T*7($tp)
  1157. $LD $a7,$SIZE_T*8($tp)
  1158. addc $acc0,$acc0,$a0
  1159. $LD $a0,$SIZE_T*1($ap)
  1160. adde $acc1,$acc1,$a1
  1161. $LD $a1,$SIZE_T*2($ap)
  1162. adde $acc2,$acc2,$a2
  1163. $LD $a2,$SIZE_T*3($ap)
  1164. adde $acc3,$acc3,$a3
  1165. $LD $a3,$SIZE_T*4($ap)
  1166. adde $acc4,$acc4,$a4
  1167. $LD $a4,$SIZE_T*5($ap)
  1168. adde $acc5,$acc5,$a5
  1169. $LD $a5,$SIZE_T*6($ap)
  1170. adde $acc6,$acc6,$a6
  1171. $LD $a6,$SIZE_T*7($ap)
  1172. subi $rp,$ap,$SIZE_T*7
  1173. addze $acc7,$a7
  1174. $LDU $a7,$SIZE_T*8($ap)
  1175. #addze $carry,$zero # moved below
  1176. li $cnt,0
  1177. b .Lsqr8x_mul
  1178. # a[8]a[0]
  1179. # a[9]a[0]
  1180. # a[a]a[0]
  1181. # a[b]a[0]
  1182. # a[c]a[0]
  1183. # a[d]a[0]
  1184. # a[e]a[0]
  1185. # a[f]a[0]
  1186. # a[8]a[1]
  1187. # a[f]a[1]........................
  1188. # a[8]a[2]
  1189. # a[f]a[2]........................
  1190. # a[8]a[3]
  1191. # a[f]a[3]........................
  1192. # a[8]a[4]
  1193. # a[f]a[4]........................
  1194. # a[8]a[5]
  1195. # a[f]a[5]........................
  1196. # a[8]a[6]
  1197. # a[f]a[6]........................
  1198. # a[8]a[7]
  1199. # a[f]a[7]........................
  1200. .align 5
  1201. .Lsqr8x_mul:
  1202. $UMULL $t0,$a0,$n0
  1203. addze $carry,$zero # carry bit, modulo-scheduled
  1204. $UMULL $t1,$a1,$n0
  1205. addi $cnt,$cnt,$SIZE_T
  1206. $UMULL $t2,$a2,$n0
  1207. andi. $cnt,$cnt,$SIZE_T*8-1
  1208. $UMULL $t3,$a3,$n0
  1209. addc $acc0,$acc0,$t0
  1210. $UMULL $t0,$a4,$n0
  1211. adde $acc1,$acc1,$t1
  1212. $UMULL $t1,$a5,$n0
  1213. adde $acc2,$acc2,$t2
  1214. $UMULL $t2,$a6,$n0
  1215. adde $acc3,$acc3,$t3
  1216. $UMULL $t3,$a7,$n0
  1217. adde $acc4,$acc4,$t0
  1218. $UMULH $t0,$a0,$n0
  1219. adde $acc5,$acc5,$t1
  1220. $UMULH $t1,$a1,$n0
  1221. adde $acc6,$acc6,$t2
  1222. $UMULH $t2,$a2,$n0
  1223. adde $acc7,$acc7,$t3
  1224. $UMULH $t3,$a3,$n0
  1225. addze $carry,$carry
  1226. $STU $acc0,$SIZE_T($tp)
  1227. addc $acc0,$acc1,$t0
  1228. $UMULH $t0,$a4,$n0
  1229. adde $acc1,$acc2,$t1
  1230. $UMULH $t1,$a5,$n0
  1231. adde $acc2,$acc3,$t2
  1232. $UMULH $t2,$a6,$n0
  1233. adde $acc3,$acc4,$t3
  1234. $UMULH $t3,$a7,$n0
  1235. $LDX $n0,$rp,$cnt
  1236. adde $acc4,$acc5,$t0
  1237. adde $acc5,$acc6,$t1
  1238. adde $acc6,$acc7,$t2
  1239. adde $acc7,$carry,$t3
  1240. #addze $carry,$zero # moved above
  1241. bne .Lsqr8x_mul
  1242. # note that carry flag is guaranteed
  1243. # to be zero at this point
  1244. $UCMP $ap,$ap_end # done yet?
  1245. beq .Lsqr8x_break
  1246. $LD $a0,$SIZE_T*1($tp)
  1247. $LD $a1,$SIZE_T*2($tp)
  1248. $LD $a2,$SIZE_T*3($tp)
  1249. $LD $a3,$SIZE_T*4($tp)
  1250. $LD $a4,$SIZE_T*5($tp)
  1251. $LD $a5,$SIZE_T*6($tp)
  1252. $LD $a6,$SIZE_T*7($tp)
  1253. $LD $a7,$SIZE_T*8($tp)
  1254. addc $acc0,$acc0,$a0
  1255. $LD $a0,$SIZE_T*1($ap)
  1256. adde $acc1,$acc1,$a1
  1257. $LD $a1,$SIZE_T*2($ap)
  1258. adde $acc2,$acc2,$a2
  1259. $LD $a2,$SIZE_T*3($ap)
  1260. adde $acc3,$acc3,$a3
  1261. $LD $a3,$SIZE_T*4($ap)
  1262. adde $acc4,$acc4,$a4
  1263. $LD $a4,$SIZE_T*5($ap)
  1264. adde $acc5,$acc5,$a5
  1265. $LD $a5,$SIZE_T*6($ap)
  1266. adde $acc6,$acc6,$a6
  1267. $LD $a6,$SIZE_T*7($ap)
  1268. adde $acc7,$acc7,$a7
  1269. $LDU $a7,$SIZE_T*8($ap)
  1270. #addze $carry,$zero # moved above
  1271. b .Lsqr8x_mul
  1272. .align 5
  1273. .Lsqr8x_break:
  1274. $LD $a0,$SIZE_T*8($rp)
  1275. addi $ap,$rp,$SIZE_T*15
  1276. $LD $a1,$SIZE_T*9($rp)
  1277. sub. $t0,$ap_end,$ap # is it last iteration?
  1278. $LD $a2,$SIZE_T*10($rp)
  1279. sub $t1,$tp,$t0
  1280. $LD $a3,$SIZE_T*11($rp)
  1281. $LD $a4,$SIZE_T*12($rp)
  1282. $LD $a5,$SIZE_T*13($rp)
  1283. $LD $a6,$SIZE_T*14($rp)
  1284. $LD $a7,$SIZE_T*15($rp)
  1285. beq .Lsqr8x_outer_loop
  1286. $ST $acc0,$SIZE_T*1($tp)
  1287. $LD $acc0,$SIZE_T*1($t1)
  1288. $ST $acc1,$SIZE_T*2($tp)
  1289. $LD $acc1,$SIZE_T*2($t1)
  1290. $ST $acc2,$SIZE_T*3($tp)
  1291. $LD $acc2,$SIZE_T*3($t1)
  1292. $ST $acc3,$SIZE_T*4($tp)
  1293. $LD $acc3,$SIZE_T*4($t1)
  1294. $ST $acc4,$SIZE_T*5($tp)
  1295. $LD $acc4,$SIZE_T*5($t1)
  1296. $ST $acc5,$SIZE_T*6($tp)
  1297. $LD $acc5,$SIZE_T*6($t1)
  1298. $ST $acc6,$SIZE_T*7($tp)
  1299. $LD $acc6,$SIZE_T*7($t1)
  1300. $ST $acc7,$SIZE_T*8($tp)
  1301. $LD $acc7,$SIZE_T*8($t1)
  1302. mr $tp,$t1
  1303. b .Lsqr8x_outer_loop
  1304. .align 5
  1305. .Lsqr8x_outer_break:
  1306. ####################################################################
  1307. # Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
  1308. $LD $a1,$SIZE_T*1($t0) # recall that $t0 is &a[-1]
  1309. $LD $a3,$SIZE_T*2($t0)
  1310. $LD $a5,$SIZE_T*3($t0)
  1311. $LD $a7,$SIZE_T*4($t0)
  1312. addi $ap,$t0,$SIZE_T*4
  1313. # "tp[x]" comments are for num==8 case
  1314. $LD $t1,$SIZE_T*13($sp) # =tp[1], t[0] is not interesting
  1315. $LD $t2,$SIZE_T*14($sp)
  1316. $LD $t3,$SIZE_T*15($sp)
  1317. $LD $t0,$SIZE_T*16($sp)
  1318. $ST $acc0,$SIZE_T*1($tp) # tp[8]=
  1319. srwi $cnt,$num,`log($SIZE_T)/log(2)+2`
  1320. $ST $acc1,$SIZE_T*2($tp)
  1321. subi $cnt,$cnt,1
  1322. $ST $acc2,$SIZE_T*3($tp)
  1323. $ST $acc3,$SIZE_T*4($tp)
  1324. $ST $acc4,$SIZE_T*5($tp)
  1325. $ST $acc5,$SIZE_T*6($tp)
  1326. $ST $acc6,$SIZE_T*7($tp)
  1327. #$ST $acc7,$SIZE_T*8($tp) # tp[15] is not interesting
  1328. addi $tp,$sp,$SIZE_T*11 # &tp[-1]
  1329. $UMULL $acc0,$a1,$a1
  1330. $UMULH $a1,$a1,$a1
  1331. add $acc1,$t1,$t1 # <<1
  1332. $SHRI $t1,$t1,$BITS-1
  1333. $UMULL $a2,$a3,$a3
  1334. $UMULH $a3,$a3,$a3
  1335. addc $acc1,$acc1,$a1
  1336. add $acc2,$t2,$t2
  1337. $SHRI $t2,$t2,$BITS-1
  1338. add $acc3,$t3,$t3
  1339. $SHRI $t3,$t3,$BITS-1
  1340. or $acc2,$acc2,$t1
  1341. mtctr $cnt
  1342. .Lsqr4x_shift_n_add:
  1343. $UMULL $a4,$a5,$a5
  1344. $UMULH $a5,$a5,$a5
  1345. $LD $t1,$SIZE_T*6($tp) # =tp[5]
  1346. $LD $a1,$SIZE_T*1($ap)
  1347. adde $acc2,$acc2,$a2
  1348. add $acc4,$t0,$t0
  1349. $SHRI $t0,$t0,$BITS-1
  1350. or $acc3,$acc3,$t2
  1351. $LD $t2,$SIZE_T*7($tp) # =tp[6]
  1352. adde $acc3,$acc3,$a3
  1353. $LD $a3,$SIZE_T*2($ap)
  1354. add $acc5,$t1,$t1
  1355. $SHRI $t1,$t1,$BITS-1
  1356. or $acc4,$acc4,$t3
  1357. $LD $t3,$SIZE_T*8($tp) # =tp[7]
  1358. $UMULL $a6,$a7,$a7
  1359. $UMULH $a7,$a7,$a7
  1360. adde $acc4,$acc4,$a4
  1361. add $acc6,$t2,$t2
  1362. $SHRI $t2,$t2,$BITS-1
  1363. or $acc5,$acc5,$t0
  1364. $LD $t0,$SIZE_T*9($tp) # =tp[8]
  1365. adde $acc5,$acc5,$a5
  1366. $LD $a5,$SIZE_T*3($ap)
  1367. add $acc7,$t3,$t3
  1368. $SHRI $t3,$t3,$BITS-1
  1369. or $acc6,$acc6,$t1
  1370. $LD $t1,$SIZE_T*10($tp) # =tp[9]
  1371. $UMULL $a0,$a1,$a1
  1372. $UMULH $a1,$a1,$a1
  1373. adde $acc6,$acc6,$a6
  1374. $ST $acc0,$SIZE_T*1($tp) # tp[0]=
  1375. add $acc0,$t0,$t0
  1376. $SHRI $t0,$t0,$BITS-1
  1377. or $acc7,$acc7,$t2
  1378. $LD $t2,$SIZE_T*11($tp) # =tp[10]
  1379. adde $acc7,$acc7,$a7
  1380. $LDU $a7,$SIZE_T*4($ap)
  1381. $ST $acc1,$SIZE_T*2($tp) # tp[1]=
  1382. add $acc1,$t1,$t1
  1383. $SHRI $t1,$t1,$BITS-1
  1384. or $acc0,$acc0,$t3
  1385. $LD $t3,$SIZE_T*12($tp) # =tp[11]
  1386. $UMULL $a2,$a3,$a3
  1387. $UMULH $a3,$a3,$a3
  1388. adde $acc0,$acc0,$a0
  1389. $ST $acc2,$SIZE_T*3($tp) # tp[2]=
  1390. add $acc2,$t2,$t2
  1391. $SHRI $t2,$t2,$BITS-1
  1392. or $acc1,$acc1,$t0
  1393. $LD $t0,$SIZE_T*13($tp) # =tp[12]
  1394. adde $acc1,$acc1,$a1
  1395. $ST $acc3,$SIZE_T*4($tp) # tp[3]=
  1396. $ST $acc4,$SIZE_T*5($tp) # tp[4]=
  1397. $ST $acc5,$SIZE_T*6($tp) # tp[5]=
  1398. $ST $acc6,$SIZE_T*7($tp) # tp[6]=
  1399. $STU $acc7,$SIZE_T*8($tp) # tp[7]=
  1400. add $acc3,$t3,$t3
  1401. $SHRI $t3,$t3,$BITS-1
  1402. or $acc2,$acc2,$t1
  1403. bdnz .Lsqr4x_shift_n_add
  1404. ___
  1405. my ($np,$np_end)=($ap,$ap_end);
  1406. $code.=<<___;
  1407. $POP $np,$SIZE_T*7($sp) # pull &np[-1] and n0
  1408. $POP $n0,$SIZE_T*8($sp)
  1409. $UMULL $a4,$a5,$a5
  1410. $UMULH $a5,$a5,$a5
  1411. $ST $acc0,$SIZE_T*1($tp) # tp[8]=
  1412. $LD $acc0,$SIZE_T*12($sp) # =tp[0]
  1413. $LD $t1,$SIZE_T*6($tp) # =tp[13]
  1414. adde $acc2,$acc2,$a2
  1415. add $acc4,$t0,$t0
  1416. $SHRI $t0,$t0,$BITS-1
  1417. or $acc3,$acc3,$t2
  1418. $LD $t2,$SIZE_T*7($tp) # =tp[14]
  1419. adde $acc3,$acc3,$a3
  1420. add $acc5,$t1,$t1
  1421. $SHRI $t1,$t1,$BITS-1
  1422. or $acc4,$acc4,$t3
  1423. $UMULL $a6,$a7,$a7
  1424. $UMULH $a7,$a7,$a7
  1425. adde $acc4,$acc4,$a4
  1426. add $acc6,$t2,$t2
  1427. $SHRI $t2,$t2,$BITS-1
  1428. or $acc5,$acc5,$t0
  1429. $ST $acc1,$SIZE_T*2($tp) # tp[9]=
  1430. $LD $acc1,$SIZE_T*13($sp) # =tp[1]
  1431. adde $acc5,$acc5,$a5
  1432. or $acc6,$acc6,$t1
  1433. $LD $a0,$SIZE_T*1($np)
  1434. $LD $a1,$SIZE_T*2($np)
  1435. adde $acc6,$acc6,$a6
  1436. $LD $a2,$SIZE_T*3($np)
  1437. $LD $a3,$SIZE_T*4($np)
  1438. adde $acc7,$a7,$t2
  1439. $LD $a4,$SIZE_T*5($np)
  1440. $LD $a5,$SIZE_T*6($np)
  1441. ################################################################
  1442. # Reduce by 8 limbs per iteration
  1443. $UMULL $na0,$n0,$acc0 # t[0]*n0
  1444. li $cnt,8
  1445. $LD $a6,$SIZE_T*7($np)
  1446. add $np_end,$np,$num
  1447. $LDU $a7,$SIZE_T*8($np)
  1448. $ST $acc2,$SIZE_T*3($tp) # tp[10]=
  1449. $LD $acc2,$SIZE_T*14($sp)
  1450. $ST $acc3,$SIZE_T*4($tp) # tp[11]=
  1451. $LD $acc3,$SIZE_T*15($sp)
  1452. $ST $acc4,$SIZE_T*5($tp) # tp[12]=
  1453. $LD $acc4,$SIZE_T*16($sp)
  1454. $ST $acc5,$SIZE_T*6($tp) # tp[13]=
  1455. $LD $acc5,$SIZE_T*17($sp)
  1456. $ST $acc6,$SIZE_T*7($tp) # tp[14]=
  1457. $LD $acc6,$SIZE_T*18($sp)
  1458. $ST $acc7,$SIZE_T*8($tp) # tp[15]=
  1459. $LD $acc7,$SIZE_T*19($sp)
  1460. addi $tp,$sp,$SIZE_T*11 # &tp[-1]
  1461. mtctr $cnt
  1462. b .Lsqr8x_reduction
  1463. .align 5
  1464. .Lsqr8x_reduction:
  1465. # (*) $UMULL $t0,$a0,$na0 # lo(n[0-7])*lo(t[0]*n0)
  1466. $UMULL $t1,$a1,$na0
  1467. $UMULL $t2,$a2,$na0
  1468. $STU $na0,$SIZE_T($tp) # put aside t[0]*n0 for tail processing
  1469. $UMULL $t3,$a3,$na0
  1470. # (*) addc $acc0,$acc0,$t0
  1471. addic $acc0,$acc0,-1 # (*)
  1472. $UMULL $t0,$a4,$na0
  1473. adde $acc0,$acc1,$t1
  1474. $UMULL $t1,$a5,$na0
  1475. adde $acc1,$acc2,$t2
  1476. $UMULL $t2,$a6,$na0
  1477. adde $acc2,$acc3,$t3
  1478. $UMULL $t3,$a7,$na0
  1479. adde $acc3,$acc4,$t0
  1480. $UMULH $t0,$a0,$na0 # hi(n[0-7])*lo(t[0]*n0)
  1481. adde $acc4,$acc5,$t1
  1482. $UMULH $t1,$a1,$na0
  1483. adde $acc5,$acc6,$t2
  1484. $UMULH $t2,$a2,$na0
  1485. adde $acc6,$acc7,$t3
  1486. $UMULH $t3,$a3,$na0
  1487. addze $acc7,$zero
  1488. addc $acc0,$acc0,$t0
  1489. $UMULH $t0,$a4,$na0
  1490. adde $acc1,$acc1,$t1
  1491. $UMULH $t1,$a5,$na0
  1492. adde $acc2,$acc2,$t2
  1493. $UMULH $t2,$a6,$na0
  1494. adde $acc3,$acc3,$t3
  1495. $UMULH $t3,$a7,$na0
  1496. $UMULL $na0,$n0,$acc0 # next t[0]*n0
  1497. adde $acc4,$acc4,$t0
  1498. adde $acc5,$acc5,$t1
  1499. adde $acc6,$acc6,$t2
  1500. adde $acc7,$acc7,$t3
  1501. bdnz .Lsqr8x_reduction
  1502. $LD $t0,$SIZE_T*1($tp)
  1503. $LD $t1,$SIZE_T*2($tp)
  1504. $LD $t2,$SIZE_T*3($tp)
  1505. $LD $t3,$SIZE_T*4($tp)
  1506. subi $rp,$tp,$SIZE_T*7
  1507. $UCMP $np_end,$np # done yet?
  1508. addc $acc0,$acc0,$t0
  1509. $LD $t0,$SIZE_T*5($tp)
  1510. adde $acc1,$acc1,$t1
  1511. $LD $t1,$SIZE_T*6($tp)
  1512. adde $acc2,$acc2,$t2
  1513. $LD $t2,$SIZE_T*7($tp)
  1514. adde $acc3,$acc3,$t3
  1515. $LD $t3,$SIZE_T*8($tp)
  1516. adde $acc4,$acc4,$t0
  1517. adde $acc5,$acc5,$t1
  1518. adde $acc6,$acc6,$t2
  1519. adde $acc7,$acc7,$t3
  1520. #addze $carry,$zero # moved below
  1521. beq .Lsqr8x8_post_condition
  1522. $LD $n0,$SIZE_T*0($rp)
  1523. $LD $a0,$SIZE_T*1($np)
  1524. $LD $a1,$SIZE_T*2($np)
  1525. $LD $a2,$SIZE_T*3($np)
  1526. $LD $a3,$SIZE_T*4($np)
  1527. $LD $a4,$SIZE_T*5($np)
  1528. $LD $a5,$SIZE_T*6($np)
  1529. $LD $a6,$SIZE_T*7($np)
  1530. $LDU $a7,$SIZE_T*8($np)
  1531. li $cnt,0
  1532. .align 5
  1533. .Lsqr8x_tail:
  1534. $UMULL $t0,$a0,$n0
  1535. addze $carry,$zero # carry bit, modulo-scheduled
  1536. $UMULL $t1,$a1,$n0
  1537. addi $cnt,$cnt,$SIZE_T
  1538. $UMULL $t2,$a2,$n0
  1539. andi. $cnt,$cnt,$SIZE_T*8-1
  1540. $UMULL $t3,$a3,$n0
  1541. addc $acc0,$acc0,$t0
  1542. $UMULL $t0,$a4,$n0
  1543. adde $acc1,$acc1,$t1
  1544. $UMULL $t1,$a5,$n0
  1545. adde $acc2,$acc2,$t2
  1546. $UMULL $t2,$a6,$n0
  1547. adde $acc3,$acc3,$t3
  1548. $UMULL $t3,$a7,$n0
  1549. adde $acc4,$acc4,$t0
  1550. $UMULH $t0,$a0,$n0
  1551. adde $acc5,$acc5,$t1
  1552. $UMULH $t1,$a1,$n0
  1553. adde $acc6,$acc6,$t2
  1554. $UMULH $t2,$a2,$n0
  1555. adde $acc7,$acc7,$t3
  1556. $UMULH $t3,$a3,$n0
  1557. addze $carry,$carry
  1558. $STU $acc0,$SIZE_T($tp)
  1559. addc $acc0,$acc1,$t0
  1560. $UMULH $t0,$a4,$n0
  1561. adde $acc1,$acc2,$t1
  1562. $UMULH $t1,$a5,$n0
  1563. adde $acc2,$acc3,$t2
  1564. $UMULH $t2,$a6,$n0
  1565. adde $acc3,$acc4,$t3
  1566. $UMULH $t3,$a7,$n0
  1567. $LDX $n0,$rp,$cnt
  1568. adde $acc4,$acc5,$t0
  1569. adde $acc5,$acc6,$t1
  1570. adde $acc6,$acc7,$t2
  1571. adde $acc7,$carry,$t3
  1572. #addze $carry,$zero # moved above
  1573. bne .Lsqr8x_tail
  1574. # note that carry flag is guaranteed
  1575. # to be zero at this point
  1576. $LD $a0,$SIZE_T*1($tp)
  1577. $POP $carry,$SIZE_T*10($sp) # pull top-most carry in case we break
  1578. $UCMP $np_end,$np # done yet?
  1579. $LD $a1,$SIZE_T*2($tp)
  1580. sub $t2,$np_end,$num # rewinded np
  1581. $LD $a2,$SIZE_T*3($tp)
  1582. $LD $a3,$SIZE_T*4($tp)
  1583. $LD $a4,$SIZE_T*5($tp)
  1584. $LD $a5,$SIZE_T*6($tp)
  1585. $LD $a6,$SIZE_T*7($tp)
  1586. $LD $a7,$SIZE_T*8($tp)
  1587. beq .Lsqr8x_tail_break
  1588. addc $acc0,$acc0,$a0
  1589. $LD $a0,$SIZE_T*1($np)
  1590. adde $acc1,$acc1,$a1
  1591. $LD $a1,$SIZE_T*2($np)
  1592. adde $acc2,$acc2,$a2
  1593. $LD $a2,$SIZE_T*3($np)
  1594. adde $acc3,$acc3,$a3
  1595. $LD $a3,$SIZE_T*4($np)
  1596. adde $acc4,$acc4,$a4
  1597. $LD $a4,$SIZE_T*5($np)
  1598. adde $acc5,$acc5,$a5
  1599. $LD $a5,$SIZE_T*6($np)
  1600. adde $acc6,$acc6,$a6
  1601. $LD $a6,$SIZE_T*7($np)
  1602. adde $acc7,$acc7,$a7
  1603. $LDU $a7,$SIZE_T*8($np)
  1604. #addze $carry,$zero # moved above
  1605. b .Lsqr8x_tail
  1606. .align 5
  1607. .Lsqr8x_tail_break:
  1608. $POP $n0,$SIZE_T*8($sp) # pull n0
  1609. $POP $t3,$SIZE_T*9($sp) # &tp[2*num-1]
  1610. addi $cnt,$tp,$SIZE_T*8 # end of current t[num] window
  1611. addic $carry,$carry,-1 # "move" top-most carry to carry bit
  1612. adde $t0,$acc0,$a0
  1613. $LD $acc0,$SIZE_T*8($rp)
  1614. $LD $a0,$SIZE_T*1($t2) # recall that $t2 is &n[-1]
  1615. adde $t1,$acc1,$a1
  1616. $LD $acc1,$SIZE_T*9($rp)
  1617. $LD $a1,$SIZE_T*2($t2)
  1618. adde $acc2,$acc2,$a2
  1619. $LD $a2,$SIZE_T*3($t2)
  1620. adde $acc3,$acc3,$a3
  1621. $LD $a3,$SIZE_T*4($t2)
  1622. adde $acc4,$acc4,$a4
  1623. $LD $a4,$SIZE_T*5($t2)
  1624. adde $acc5,$acc5,$a5
  1625. $LD $a5,$SIZE_T*6($t2)
  1626. adde $acc6,$acc6,$a6
  1627. $LD $a6,$SIZE_T*7($t2)
  1628. adde $acc7,$acc7,$a7
  1629. $LD $a7,$SIZE_T*8($t2)
  1630. addi $np,$t2,$SIZE_T*8
  1631. addze $t2,$zero # top-most carry
  1632. $UMULL $na0,$n0,$acc0
  1633. $ST $t0,$SIZE_T*1($tp)
  1634. $UCMP $cnt,$t3 # did we hit the bottom?
  1635. $ST $t1,$SIZE_T*2($tp)
  1636. li $cnt,8
  1637. $ST $acc2,$SIZE_T*3($tp)
  1638. $LD $acc2,$SIZE_T*10($rp)
  1639. $ST $acc3,$SIZE_T*4($tp)
  1640. $LD $acc3,$SIZE_T*11($rp)
  1641. $ST $acc4,$SIZE_T*5($tp)
  1642. $LD $acc4,$SIZE_T*12($rp)
  1643. $ST $acc5,$SIZE_T*6($tp)
  1644. $LD $acc5,$SIZE_T*13($rp)
  1645. $ST $acc6,$SIZE_T*7($tp)
  1646. $LD $acc6,$SIZE_T*14($rp)
  1647. $ST $acc7,$SIZE_T*8($tp)
  1648. $LD $acc7,$SIZE_T*15($rp)
  1649. $PUSH $t2,$SIZE_T*10($sp) # off-load top-most carry
  1650. addi $tp,$rp,$SIZE_T*7 # slide the window
  1651. mtctr $cnt
  1652. bne .Lsqr8x_reduction
  1653. ################################################################
  1654. # Final step. We see if result is larger than modulus, and
  1655. # if it is, subtract the modulus. But comparison implies
  1656. # subtraction. So we subtract modulus, see if it borrowed,
  1657. # and conditionally copy original value.
  1658. $POP $rp,$SIZE_T*6($sp) # pull &rp[-1]
  1659. srwi $cnt,$num,`log($SIZE_T)/log(2)+3`
  1660. mr $n0,$tp # put tp aside
  1661. addi $tp,$tp,$SIZE_T*8
  1662. subi $cnt,$cnt,1
  1663. subfc $t0,$a0,$acc0
  1664. subfe $t1,$a1,$acc1
  1665. mr $carry,$t2
  1666. mr $ap_end,$rp # $rp copy
  1667. mtctr $cnt
  1668. b .Lsqr8x_sub
  1669. .align 5
  1670. .Lsqr8x_sub:
  1671. $LD $a0,$SIZE_T*1($np)
  1672. $LD $acc0,$SIZE_T*1($tp)
  1673. $LD $a1,$SIZE_T*2($np)
  1674. $LD $acc1,$SIZE_T*2($tp)
  1675. subfe $t2,$a2,$acc2
  1676. $LD $a2,$SIZE_T*3($np)
  1677. $LD $acc2,$SIZE_T*3($tp)
  1678. subfe $t3,$a3,$acc3
  1679. $LD $a3,$SIZE_T*4($np)
  1680. $LD $acc3,$SIZE_T*4($tp)
  1681. $ST $t0,$SIZE_T*1($rp)
  1682. subfe $t0,$a4,$acc4
  1683. $LD $a4,$SIZE_T*5($np)
  1684. $LD $acc4,$SIZE_T*5($tp)
  1685. $ST $t1,$SIZE_T*2($rp)
  1686. subfe $t1,$a5,$acc5
  1687. $LD $a5,$SIZE_T*6($np)
  1688. $LD $acc5,$SIZE_T*6($tp)
  1689. $ST $t2,$SIZE_T*3($rp)
  1690. subfe $t2,$a6,$acc6
  1691. $LD $a6,$SIZE_T*7($np)
  1692. $LD $acc6,$SIZE_T*7($tp)
  1693. $ST $t3,$SIZE_T*4($rp)
  1694. subfe $t3,$a7,$acc7
  1695. $LDU $a7,$SIZE_T*8($np)
  1696. $LDU $acc7,$SIZE_T*8($tp)
  1697. $ST $t0,$SIZE_T*5($rp)
  1698. subfe $t0,$a0,$acc0
  1699. $ST $t1,$SIZE_T*6($rp)
  1700. subfe $t1,$a1,$acc1
  1701. $ST $t2,$SIZE_T*7($rp)
  1702. $STU $t3,$SIZE_T*8($rp)
  1703. bdnz .Lsqr8x_sub
  1704. srwi $cnt,$num,`log($SIZE_T)/log(2)+2`
  1705. $LD $a0,$SIZE_T*1($ap_end) # original $rp
  1706. $LD $acc0,$SIZE_T*1($n0) # original $tp
  1707. subi $cnt,$cnt,1
  1708. $LD $a1,$SIZE_T*2($ap_end)
  1709. $LD $acc1,$SIZE_T*2($n0)
  1710. subfe $t2,$a2,$acc2
  1711. $LD $a2,$SIZE_T*3($ap_end)
  1712. $LD $acc2,$SIZE_T*3($n0)
  1713. subfe $t3,$a3,$acc3
  1714. $LD $a3,$SIZE_T*4($ap_end)
  1715. $LDU $acc3,$SIZE_T*4($n0)
  1716. $ST $t0,$SIZE_T*1($rp)
  1717. subfe $t0,$a4,$acc4
  1718. $ST $t1,$SIZE_T*2($rp)
  1719. subfe $t1,$a5,$acc5
  1720. $ST $t2,$SIZE_T*3($rp)
  1721. subfe $t2,$a6,$acc6
  1722. $ST $t3,$SIZE_T*4($rp)
  1723. subfe $t3,$a7,$acc7
  1724. $ST $t0,$SIZE_T*5($rp)
  1725. subfe $carry,$zero,$carry # did it borrow?
  1726. $ST $t1,$SIZE_T*6($rp)
  1727. $ST $t2,$SIZE_T*7($rp)
  1728. $ST $t3,$SIZE_T*8($rp)
  1729. addi $tp,$sp,$SIZE_T*11
  1730. mtctr $cnt
  1731. .Lsqr4x_cond_copy:
  1732. andc $a0,$a0,$carry
  1733. $ST $zero,-$SIZE_T*3($n0) # wipe stack clean
  1734. and $acc0,$acc0,$carry
  1735. $ST $zero,-$SIZE_T*2($n0)
  1736. andc $a1,$a1,$carry
  1737. $ST $zero,-$SIZE_T*1($n0)
  1738. and $acc1,$acc1,$carry
  1739. $ST $zero,-$SIZE_T*0($n0)
  1740. andc $a2,$a2,$carry
  1741. $ST $zero,$SIZE_T*1($tp)
  1742. and $acc2,$acc2,$carry
  1743. $ST $zero,$SIZE_T*2($tp)
  1744. andc $a3,$a3,$carry
  1745. $ST $zero,$SIZE_T*3($tp)
  1746. and $acc3,$acc3,$carry
  1747. $STU $zero,$SIZE_T*4($tp)
  1748. or $t0,$a0,$acc0
  1749. $LD $a0,$SIZE_T*5($ap_end)
  1750. $LD $acc0,$SIZE_T*1($n0)
  1751. or $t1,$a1,$acc1
  1752. $LD $a1,$SIZE_T*6($ap_end)
  1753. $LD $acc1,$SIZE_T*2($n0)
  1754. or $t2,$a2,$acc2
  1755. $LD $a2,$SIZE_T*7($ap_end)
  1756. $LD $acc2,$SIZE_T*3($n0)
  1757. or $t3,$a3,$acc3
  1758. $LD $a3,$SIZE_T*8($ap_end)
  1759. $LDU $acc3,$SIZE_T*4($n0)
  1760. $ST $t0,$SIZE_T*1($ap_end)
  1761. $ST $t1,$SIZE_T*2($ap_end)
  1762. $ST $t2,$SIZE_T*3($ap_end)
  1763. $STU $t3,$SIZE_T*4($ap_end)
  1764. bdnz .Lsqr4x_cond_copy
  1765. $POP $ap,0($sp) # pull saved sp
  1766. andc $a0,$a0,$carry
  1767. and $acc0,$acc0,$carry
  1768. andc $a1,$a1,$carry
  1769. and $acc1,$acc1,$carry
  1770. andc $a2,$a2,$carry
  1771. and $acc2,$acc2,$carry
  1772. andc $a3,$a3,$carry
  1773. and $acc3,$acc3,$carry
  1774. or $t0,$a0,$acc0
  1775. or $t1,$a1,$acc1
  1776. or $t2,$a2,$acc2
  1777. or $t3,$a3,$acc3
  1778. $ST $t0,$SIZE_T*1($ap_end)
  1779. $ST $t1,$SIZE_T*2($ap_end)
  1780. $ST $t2,$SIZE_T*3($ap_end)
  1781. $ST $t3,$SIZE_T*4($ap_end)
  1782. b .Lsqr8x_done
  1783. .align 5
  1784. .Lsqr8x8_post_condition:
  1785. $POP $rp,$SIZE_T*6($sp) # pull rp
  1786. $POP $ap,0($sp) # pull saved sp
  1787. addze $carry,$zero
  1788. # $acc0-7,$carry hold result, $a0-7 hold modulus
  1789. subfc $acc0,$a0,$acc0
  1790. subfe $acc1,$a1,$acc1
  1791. $ST $zero,$SIZE_T*12($sp) # wipe stack clean
  1792. $ST $zero,$SIZE_T*13($sp)
  1793. subfe $acc2,$a2,$acc2
  1794. $ST $zero,$SIZE_T*14($sp)
  1795. $ST $zero,$SIZE_T*15($sp)
  1796. subfe $acc3,$a3,$acc3
  1797. $ST $zero,$SIZE_T*16($sp)
  1798. $ST $zero,$SIZE_T*17($sp)
  1799. subfe $acc4,$a4,$acc4
  1800. $ST $zero,$SIZE_T*18($sp)
  1801. $ST $zero,$SIZE_T*19($sp)
  1802. subfe $acc5,$a5,$acc5
  1803. $ST $zero,$SIZE_T*20($sp)
  1804. $ST $zero,$SIZE_T*21($sp)
  1805. subfe $acc6,$a6,$acc6
  1806. $ST $zero,$SIZE_T*22($sp)
  1807. $ST $zero,$SIZE_T*23($sp)
  1808. subfe $acc7,$a7,$acc7
  1809. $ST $zero,$SIZE_T*24($sp)
  1810. $ST $zero,$SIZE_T*25($sp)
  1811. subfe $carry,$zero,$carry # did it borrow?
  1812. $ST $zero,$SIZE_T*26($sp)
  1813. $ST $zero,$SIZE_T*27($sp)
  1814. and $a0,$a0,$carry
  1815. and $a1,$a1,$carry
  1816. addc $acc0,$acc0,$a0 # add modulus back if borrowed
  1817. and $a2,$a2,$carry
  1818. adde $acc1,$acc1,$a1
  1819. and $a3,$a3,$carry
  1820. adde $acc2,$acc2,$a2
  1821. and $a4,$a4,$carry
  1822. adde $acc3,$acc3,$a3
  1823. and $a5,$a5,$carry
  1824. adde $acc4,$acc4,$a4
  1825. and $a6,$a6,$carry
  1826. adde $acc5,$acc5,$a5
  1827. and $a7,$a7,$carry
  1828. adde $acc6,$acc6,$a6
  1829. adde $acc7,$acc7,$a7
  1830. $ST $acc0,$SIZE_T*1($rp)
  1831. $ST $acc1,$SIZE_T*2($rp)
  1832. $ST $acc2,$SIZE_T*3($rp)
  1833. $ST $acc3,$SIZE_T*4($rp)
  1834. $ST $acc4,$SIZE_T*5($rp)
  1835. $ST $acc5,$SIZE_T*6($rp)
  1836. $ST $acc6,$SIZE_T*7($rp)
  1837. $ST $acc7,$SIZE_T*8($rp)
  1838. .Lsqr8x_done:
  1839. $PUSH $zero,$SIZE_T*8($sp)
  1840. $PUSH $zero,$SIZE_T*10($sp)
  1841. $POP r14,-$SIZE_T*18($ap)
  1842. li r3,1 # signal "done"
  1843. $POP r15,-$SIZE_T*17($ap)
  1844. $POP r16,-$SIZE_T*16($ap)
  1845. $POP r17,-$SIZE_T*15($ap)
  1846. $POP r18,-$SIZE_T*14($ap)
  1847. $POP r19,-$SIZE_T*13($ap)
  1848. $POP r20,-$SIZE_T*12($ap)
  1849. $POP r21,-$SIZE_T*11($ap)
  1850. $POP r22,-$SIZE_T*10($ap)
  1851. $POP r23,-$SIZE_T*9($ap)
  1852. $POP r24,-$SIZE_T*8($ap)
  1853. $POP r25,-$SIZE_T*7($ap)
  1854. $POP r26,-$SIZE_T*6($ap)
  1855. $POP r27,-$SIZE_T*5($ap)
  1856. $POP r28,-$SIZE_T*4($ap)
  1857. $POP r29,-$SIZE_T*3($ap)
  1858. $POP r30,-$SIZE_T*2($ap)
  1859. $POP r31,-$SIZE_T*1($ap)
  1860. mr $sp,$ap
  1861. blr
  1862. .long 0
  1863. .byte 0,12,4,0x20,0x80,18,6,0
  1864. .long 0
  1865. .size __bn_sqr8x_mont,.-__bn_sqr8x_mont
  1866. ___
  1867. }
  1868. $code.=<<___;
  1869. .asciz "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@openssl.org>"
  1870. ___
  1871. $code =~ s/\`([^\`]*)\`/eval $1/gem;
  1872. print $code;
  1873. close STDOUT or die "error closing STDOUT: $!";