2
0

ppc64-mont.pl 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628
  1. #!/usr/bin/env perl
  2. # ====================================================================
  3. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  4. # project. The module is, however, dual licensed under OpenSSL and
  5. # CRYPTOGAMS licenses depending on where you obtain it. For further
  6. # details see http://www.openssl.org/~appro/cryptogams/.
  7. # ====================================================================
  8. # December 2007
  9. # The reason for undertaken effort is basically following. Even though
  10. # Power 6 CPU operates at incredible 4.7GHz clock frequency, its PKI
  11. # performance was observed to be less than impressive, essentially as
  12. # fast as 1.8GHz PPC970, or 2.6 times(!) slower than one would hope.
  13. # Well, it's not surprising that IBM had to make some sacrifices to
  14. # boost the clock frequency that much, but no overall improvement?
  15. # Having observed how much difference did switching to FPU make on
  16. # UltraSPARC, playing same stunt on Power 6 appeared appropriate...
  17. # Unfortunately the resulting performance improvement is not as
  18. # impressive, ~30%, and in absolute terms is still very far from what
  19. # one would expect from 4.7GHz CPU. There is a chance that I'm doing
  20. # something wrong, but in the lack of assembler level micro-profiling
  21. # data or at least decent platform guide I can't tell... Or better
  22. # results might be achieved with VMX... Anyway, this module provides
  23. # *worse* performance on other PowerPC implementations, ~40-15% slower
  24. # on PPC970 depending on key length and ~40% slower on Power 5 for all
  25. # key lengths. As it's obviously inappropriate as "best all-round"
  26. # alternative, it has to be complemented with run-time CPU family
  27. # detection. Oh! It should also be noted that unlike other PowerPC
  28. # implementation IALU ppc-mont.pl module performs *suboptimaly* on
  29. # >=1024-bit key lengths on Power 6. It should also be noted that
  30. # *everything* said so far applies to 64-bit builds! As far as 32-bit
  31. # application executed on 64-bit CPU goes, this module is likely to
  32. # become preferred choice, because it's easy to adapt it for such
  33. # case and *is* faster than 32-bit ppc-mont.pl on *all* processors.
  34. # February 2008
  35. # Micro-profiling assisted optimization results in ~15% improvement
  36. # over original ppc64-mont.pl version, or overall ~50% improvement
  37. # over ppc.pl module on Power 6. If compared to ppc-mont.pl on same
  38. # Power 6 CPU, this module is 5-150% faster depending on key length,
  39. # [hereafter] more for longer keys. But if compared to ppc-mont.pl
  40. # on 1.8GHz PPC970, it's only 5-55% faster. Still far from impressive
  41. # in absolute terms, but it's apparently the way Power 6 is...
  42. # December 2009
  43. # Adapted for 32-bit build this module delivers 25-120%, yes, more
  44. # than *twice* for longer keys, performance improvement over 32-bit
  45. # ppc-mont.pl on 1.8GHz PPC970. However! This implementation utilizes
  46. # even 64-bit integer operations and the trouble is that most PPC
  47. # operating systems don't preserve upper halves of general purpose
  48. # registers upon 32-bit signal delivery. They do preserve them upon
  49. # context switch, but not signalling:-( This means that asynchronous
  50. # signals have to be blocked upon entry to this subroutine. Signal
  51. # masking (and of course complementary unmasking) has quite an impact
  52. # on performance, naturally larger for shorter keys. It's so severe
  53. # that 512-bit key performance can be as low as 1/3 of expected one.
  54. # This is why this routine can be engaged for longer key operations
  55. # only on these OSes, see crypto/ppccap.c for further details. MacOS X
  56. # is an exception from this and doesn't require signal masking, and
  57. # that's where above improvement coefficients were collected. For
  58. # others alternative would be to break dependence on upper halves of
  59. # GPRs by sticking to 32-bit integer operations...
  60. # December 2012
  61. # Remove above mentioned dependence on GPRs' upper halves in 32-bit
  62. # build. No signal masking overhead, but integer instructions are
  63. # *more* numerous... It's still "universally" faster than 32-bit
  64. # ppc-mont.pl, but improvement coefficient is not as impressive
  65. # for longer keys...
  66. $flavour = shift;
  67. if ($flavour =~ /32/) {
  68. $SIZE_T=4;
  69. $RZONE= 224;
  70. $fname= "bn_mul_mont_fpu64";
  71. $STUX= "stwux"; # store indexed and update
  72. $PUSH= "stw";
  73. $POP= "lwz";
  74. } elsif ($flavour =~ /64/) {
  75. $SIZE_T=8;
  76. $RZONE= 288;
  77. $fname= "bn_mul_mont_fpu64";
  78. # same as above, but 64-bit mnemonics...
  79. $STUX= "stdux"; # store indexed and update
  80. $PUSH= "std";
  81. $POP= "ld";
  82. } else { die "nonsense $flavour"; }
  83. $LITTLE_ENDIAN = ($flavour=~/le$/) ? 4 : 0;
  84. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  85. ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
  86. ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
  87. die "can't locate ppc-xlate.pl";
  88. open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
  89. $FRAME=64; # padded frame header
  90. $TRANSFER=16*8;
  91. $carry="r0";
  92. $sp="r1";
  93. $toc="r2";
  94. $rp="r3"; $ovf="r3";
  95. $ap="r4";
  96. $bp="r5";
  97. $np="r6";
  98. $n0="r7";
  99. $num="r8";
  100. $rp="r9"; # $rp is reassigned
  101. $tp="r10";
  102. $j="r11";
  103. $i="r12";
  104. # non-volatile registers
  105. $c1="r19";
  106. $n1="r20";
  107. $a1="r21";
  108. $nap_d="r22"; # interleaved ap and np in double format
  109. $a0="r23"; # ap[0]
  110. $t0="r24"; # temporary registers
  111. $t1="r25";
  112. $t2="r26";
  113. $t3="r27";
  114. $t4="r28";
  115. $t5="r29";
  116. $t6="r30";
  117. $t7="r31";
  118. # PPC offers enough register bank capacity to unroll inner loops twice
  119. #
  120. # ..A3A2A1A0
  121. # dcba
  122. # -----------
  123. # A0a
  124. # A0b
  125. # A0c
  126. # A0d
  127. # A1a
  128. # A1b
  129. # A1c
  130. # A1d
  131. # A2a
  132. # A2b
  133. # A2c
  134. # A2d
  135. # A3a
  136. # A3b
  137. # A3c
  138. # A3d
  139. # ..a
  140. # ..b
  141. #
  142. $ba="f0"; $bb="f1"; $bc="f2"; $bd="f3";
  143. $na="f4"; $nb="f5"; $nc="f6"; $nd="f7";
  144. $dota="f8"; $dotb="f9";
  145. $A0="f10"; $A1="f11"; $A2="f12"; $A3="f13";
  146. $N0="f20"; $N1="f21"; $N2="f22"; $N3="f23";
  147. $T0a="f24"; $T0b="f25";
  148. $T1a="f26"; $T1b="f27";
  149. $T2a="f28"; $T2b="f29";
  150. $T3a="f30"; $T3b="f31";
  151. # sp----------->+-------------------------------+
  152. # | saved sp |
  153. # +-------------------------------+
  154. # . .
  155. # +64 +-------------------------------+
  156. # | 16 gpr<->fpr transfer zone |
  157. # . .
  158. # . .
  159. # +16*8 +-------------------------------+
  160. # | __int64 tmp[-1] |
  161. # +-------------------------------+
  162. # | __int64 tmp[num] |
  163. # . .
  164. # . .
  165. # . .
  166. # +(num+1)*8 +-------------------------------+
  167. # | padding to 64 byte boundary |
  168. # . .
  169. # +X +-------------------------------+
  170. # | double nap_d[4*num] |
  171. # . .
  172. # . .
  173. # . .
  174. # +-------------------------------+
  175. # . .
  176. # -13*size_t +-------------------------------+
  177. # | 13 saved gpr, r19-r31 |
  178. # . .
  179. # . .
  180. # -12*8 +-------------------------------+
  181. # | 12 saved fpr, f20-f31 |
  182. # . .
  183. # . .
  184. # +-------------------------------+
  185. $code=<<___;
  186. .machine "any"
  187. .text
  188. .globl .$fname
  189. .align 5
  190. .$fname:
  191. cmpwi $num,`3*8/$SIZE_T`
  192. mr $rp,r3 ; $rp is reassigned
  193. li r3,0 ; possible "not handled" return code
  194. bltlr-
  195. andi. r0,$num,`16/$SIZE_T-1` ; $num has to be "even"
  196. bnelr-
  197. slwi $num,$num,`log($SIZE_T)/log(2)` ; num*=sizeof(BN_LONG)
  198. li $i,-4096
  199. slwi $tp,$num,2 ; place for {an}p_{lh}[num], i.e. 4*num
  200. add $tp,$tp,$num ; place for tp[num+1]
  201. addi $tp,$tp,`$FRAME+$TRANSFER+8+64+$RZONE`
  202. subf $tp,$tp,$sp ; $sp-$tp
  203. and $tp,$tp,$i ; minimize TLB usage
  204. subf $tp,$sp,$tp ; $tp-$sp
  205. mr $i,$sp
  206. $STUX $sp,$sp,$tp ; alloca
  207. $PUSH r19,`-12*8-13*$SIZE_T`($i)
  208. $PUSH r20,`-12*8-12*$SIZE_T`($i)
  209. $PUSH r21,`-12*8-11*$SIZE_T`($i)
  210. $PUSH r22,`-12*8-10*$SIZE_T`($i)
  211. $PUSH r23,`-12*8-9*$SIZE_T`($i)
  212. $PUSH r24,`-12*8-8*$SIZE_T`($i)
  213. $PUSH r25,`-12*8-7*$SIZE_T`($i)
  214. $PUSH r26,`-12*8-6*$SIZE_T`($i)
  215. $PUSH r27,`-12*8-5*$SIZE_T`($i)
  216. $PUSH r28,`-12*8-4*$SIZE_T`($i)
  217. $PUSH r29,`-12*8-3*$SIZE_T`($i)
  218. $PUSH r30,`-12*8-2*$SIZE_T`($i)
  219. $PUSH r31,`-12*8-1*$SIZE_T`($i)
  220. stfd f20,`-12*8`($i)
  221. stfd f21,`-11*8`($i)
  222. stfd f22,`-10*8`($i)
  223. stfd f23,`-9*8`($i)
  224. stfd f24,`-8*8`($i)
  225. stfd f25,`-7*8`($i)
  226. stfd f26,`-6*8`($i)
  227. stfd f27,`-5*8`($i)
  228. stfd f28,`-4*8`($i)
  229. stfd f29,`-3*8`($i)
  230. stfd f30,`-2*8`($i)
  231. stfd f31,`-1*8`($i)
  232. addi $tp,$sp,`$FRAME+$TRANSFER+8+64`
  233. li $i,-64
  234. add $nap_d,$tp,$num
  235. and $nap_d,$nap_d,$i ; align to 64 bytes
  236. ; nap_d is off by 1, because it's used with stfdu/lfdu
  237. addi $nap_d,$nap_d,-8
  238. srwi $j,$num,`3+1` ; counter register, num/2
  239. addi $j,$j,-1
  240. addi $tp,$sp,`$FRAME+$TRANSFER-8`
  241. li $carry,0
  242. mtctr $j
  243. ___
  244. $code.=<<___ if ($SIZE_T==8);
  245. ld $a0,0($ap) ; pull ap[0] value
  246. ld $t3,0($bp) ; bp[0]
  247. ld $n0,0($n0) ; pull n0[0] value
  248. mulld $t7,$a0,$t3 ; ap[0]*bp[0]
  249. ; transfer bp[0] to FPU as 4x16-bit values
  250. extrdi $t0,$t3,16,48
  251. extrdi $t1,$t3,16,32
  252. extrdi $t2,$t3,16,16
  253. extrdi $t3,$t3,16,0
  254. std $t0,`$FRAME+0`($sp)
  255. std $t1,`$FRAME+8`($sp)
  256. std $t2,`$FRAME+16`($sp)
  257. std $t3,`$FRAME+24`($sp)
  258. mulld $t7,$t7,$n0 ; tp[0]*n0
  259. ; transfer (ap[0]*bp[0])*n0 to FPU as 4x16-bit values
  260. extrdi $t4,$t7,16,48
  261. extrdi $t5,$t7,16,32
  262. extrdi $t6,$t7,16,16
  263. extrdi $t7,$t7,16,0
  264. std $t4,`$FRAME+32`($sp)
  265. std $t5,`$FRAME+40`($sp)
  266. std $t6,`$FRAME+48`($sp)
  267. std $t7,`$FRAME+56`($sp)
  268. extrdi $t0,$a0,32,32 ; lwz $t0,4($ap)
  269. extrdi $t1,$a0,32,0 ; lwz $t1,0($ap)
  270. lwz $t2,`12^$LITTLE_ENDIAN`($ap) ; load a[1] as 32-bit word pair
  271. lwz $t3,`8^$LITTLE_ENDIAN`($ap)
  272. lwz $t4,`4^$LITTLE_ENDIAN`($np) ; load n[0] as 32-bit word pair
  273. lwz $t5,`0^$LITTLE_ENDIAN`($np)
  274. lwz $t6,`12^$LITTLE_ENDIAN`($np) ; load n[1] as 32-bit word pair
  275. lwz $t7,`8^$LITTLE_ENDIAN`($np)
  276. ___
  277. $code.=<<___ if ($SIZE_T==4);
  278. lwz $a0,0($ap) ; pull ap[0,1] value
  279. mr $n1,$n0
  280. lwz $a1,4($ap)
  281. li $c1,0
  282. lwz $t1,0($bp) ; bp[0,1]
  283. lwz $t3,4($bp)
  284. lwz $n0,0($n1) ; pull n0[0,1] value
  285. lwz $n1,4($n1)
  286. mullw $t4,$a0,$t1 ; mulld ap[0]*bp[0]
  287. mulhwu $t5,$a0,$t1
  288. mullw $t6,$a1,$t1
  289. mullw $t7,$a0,$t3
  290. add $t5,$t5,$t6
  291. add $t5,$t5,$t7
  292. ; transfer bp[0] to FPU as 4x16-bit values
  293. extrwi $t0,$t1,16,16
  294. extrwi $t1,$t1,16,0
  295. extrwi $t2,$t3,16,16
  296. extrwi $t3,$t3,16,0
  297. std $t0,`$FRAME+0`($sp) ; yes, std in 32-bit build
  298. std $t1,`$FRAME+8`($sp)
  299. std $t2,`$FRAME+16`($sp)
  300. std $t3,`$FRAME+24`($sp)
  301. mullw $t0,$t4,$n0 ; mulld tp[0]*n0
  302. mulhwu $t1,$t4,$n0
  303. mullw $t2,$t5,$n0
  304. mullw $t3,$t4,$n1
  305. add $t1,$t1,$t2
  306. add $t1,$t1,$t3
  307. ; transfer (ap[0]*bp[0])*n0 to FPU as 4x16-bit values
  308. extrwi $t4,$t0,16,16
  309. extrwi $t5,$t0,16,0
  310. extrwi $t6,$t1,16,16
  311. extrwi $t7,$t1,16,0
  312. std $t4,`$FRAME+32`($sp) ; yes, std in 32-bit build
  313. std $t5,`$FRAME+40`($sp)
  314. std $t6,`$FRAME+48`($sp)
  315. std $t7,`$FRAME+56`($sp)
  316. mr $t0,$a0 ; lwz $t0,0($ap)
  317. mr $t1,$a1 ; lwz $t1,4($ap)
  318. lwz $t2,8($ap) ; load a[j..j+3] as 32-bit word pairs
  319. lwz $t3,12($ap)
  320. lwz $t4,0($np) ; load n[j..j+3] as 32-bit word pairs
  321. lwz $t5,4($np)
  322. lwz $t6,8($np)
  323. lwz $t7,12($np)
  324. ___
  325. $code.=<<___;
  326. lfd $ba,`$FRAME+0`($sp)
  327. lfd $bb,`$FRAME+8`($sp)
  328. lfd $bc,`$FRAME+16`($sp)
  329. lfd $bd,`$FRAME+24`($sp)
  330. lfd $na,`$FRAME+32`($sp)
  331. lfd $nb,`$FRAME+40`($sp)
  332. lfd $nc,`$FRAME+48`($sp)
  333. lfd $nd,`$FRAME+56`($sp)
  334. std $t0,`$FRAME+64`($sp) ; yes, std even in 32-bit build
  335. std $t1,`$FRAME+72`($sp)
  336. std $t2,`$FRAME+80`($sp)
  337. std $t3,`$FRAME+88`($sp)
  338. std $t4,`$FRAME+96`($sp)
  339. std $t5,`$FRAME+104`($sp)
  340. std $t6,`$FRAME+112`($sp)
  341. std $t7,`$FRAME+120`($sp)
  342. fcfid $ba,$ba
  343. fcfid $bb,$bb
  344. fcfid $bc,$bc
  345. fcfid $bd,$bd
  346. fcfid $na,$na
  347. fcfid $nb,$nb
  348. fcfid $nc,$nc
  349. fcfid $nd,$nd
  350. lfd $A0,`$FRAME+64`($sp)
  351. lfd $A1,`$FRAME+72`($sp)
  352. lfd $A2,`$FRAME+80`($sp)
  353. lfd $A3,`$FRAME+88`($sp)
  354. lfd $N0,`$FRAME+96`($sp)
  355. lfd $N1,`$FRAME+104`($sp)
  356. lfd $N2,`$FRAME+112`($sp)
  357. lfd $N3,`$FRAME+120`($sp)
  358. fcfid $A0,$A0
  359. fcfid $A1,$A1
  360. fcfid $A2,$A2
  361. fcfid $A3,$A3
  362. fcfid $N0,$N0
  363. fcfid $N1,$N1
  364. fcfid $N2,$N2
  365. fcfid $N3,$N3
  366. addi $ap,$ap,16
  367. addi $np,$np,16
  368. fmul $T1a,$A1,$ba
  369. fmul $T1b,$A1,$bb
  370. stfd $A0,8($nap_d) ; save a[j] in double format
  371. stfd $A1,16($nap_d)
  372. fmul $T2a,$A2,$ba
  373. fmul $T2b,$A2,$bb
  374. stfd $A2,24($nap_d) ; save a[j+1] in double format
  375. stfd $A3,32($nap_d)
  376. fmul $T3a,$A3,$ba
  377. fmul $T3b,$A3,$bb
  378. stfd $N0,40($nap_d) ; save n[j] in double format
  379. stfd $N1,48($nap_d)
  380. fmul $T0a,$A0,$ba
  381. fmul $T0b,$A0,$bb
  382. stfd $N2,56($nap_d) ; save n[j+1] in double format
  383. stfdu $N3,64($nap_d)
  384. fmadd $T1a,$A0,$bc,$T1a
  385. fmadd $T1b,$A0,$bd,$T1b
  386. fmadd $T2a,$A1,$bc,$T2a
  387. fmadd $T2b,$A1,$bd,$T2b
  388. fmadd $T3a,$A2,$bc,$T3a
  389. fmadd $T3b,$A2,$bd,$T3b
  390. fmul $dota,$A3,$bc
  391. fmul $dotb,$A3,$bd
  392. fmadd $T1a,$N1,$na,$T1a
  393. fmadd $T1b,$N1,$nb,$T1b
  394. fmadd $T2a,$N2,$na,$T2a
  395. fmadd $T2b,$N2,$nb,$T2b
  396. fmadd $T3a,$N3,$na,$T3a
  397. fmadd $T3b,$N3,$nb,$T3b
  398. fmadd $T0a,$N0,$na,$T0a
  399. fmadd $T0b,$N0,$nb,$T0b
  400. fmadd $T1a,$N0,$nc,$T1a
  401. fmadd $T1b,$N0,$nd,$T1b
  402. fmadd $T2a,$N1,$nc,$T2a
  403. fmadd $T2b,$N1,$nd,$T2b
  404. fmadd $T3a,$N2,$nc,$T3a
  405. fmadd $T3b,$N2,$nd,$T3b
  406. fmadd $dota,$N3,$nc,$dota
  407. fmadd $dotb,$N3,$nd,$dotb
  408. fctid $T0a,$T0a
  409. fctid $T0b,$T0b
  410. fctid $T1a,$T1a
  411. fctid $T1b,$T1b
  412. fctid $T2a,$T2a
  413. fctid $T2b,$T2b
  414. fctid $T3a,$T3a
  415. fctid $T3b,$T3b
  416. stfd $T0a,`$FRAME+0`($sp)
  417. stfd $T0b,`$FRAME+8`($sp)
  418. stfd $T1a,`$FRAME+16`($sp)
  419. stfd $T1b,`$FRAME+24`($sp)
  420. stfd $T2a,`$FRAME+32`($sp)
  421. stfd $T2b,`$FRAME+40`($sp)
  422. stfd $T3a,`$FRAME+48`($sp)
  423. stfd $T3b,`$FRAME+56`($sp)
  424. .align 5
  425. L1st:
  426. ___
  427. $code.=<<___ if ($SIZE_T==8);
  428. lwz $t0,`4^$LITTLE_ENDIAN`($ap) ; load a[j] as 32-bit word pair
  429. lwz $t1,`0^$LITTLE_ENDIAN`($ap)
  430. lwz $t2,`12^$LITTLE_ENDIAN`($ap) ; load a[j+1] as 32-bit word pair
  431. lwz $t3,`8^$LITTLE_ENDIAN`($ap)
  432. lwz $t4,`4^$LITTLE_ENDIAN`($np) ; load n[j] as 32-bit word pair
  433. lwz $t5,`0^$LITTLE_ENDIAN`($np)
  434. lwz $t6,`12^$LITTLE_ENDIAN`($np) ; load n[j+1] as 32-bit word pair
  435. lwz $t7,`8^$LITTLE_ENDIAN`($np)
  436. ___
  437. $code.=<<___ if ($SIZE_T==4);
  438. lwz $t0,0($ap) ; load a[j..j+3] as 32-bit word pairs
  439. lwz $t1,4($ap)
  440. lwz $t2,8($ap)
  441. lwz $t3,12($ap)
  442. lwz $t4,0($np) ; load n[j..j+3] as 32-bit word pairs
  443. lwz $t5,4($np)
  444. lwz $t6,8($np)
  445. lwz $t7,12($np)
  446. ___
  447. $code.=<<___;
  448. std $t0,`$FRAME+64`($sp) ; yes, std even in 32-bit build
  449. std $t1,`$FRAME+72`($sp)
  450. std $t2,`$FRAME+80`($sp)
  451. std $t3,`$FRAME+88`($sp)
  452. std $t4,`$FRAME+96`($sp)
  453. std $t5,`$FRAME+104`($sp)
  454. std $t6,`$FRAME+112`($sp)
  455. std $t7,`$FRAME+120`($sp)
  456. ___
  457. if ($SIZE_T==8 or $flavour =~ /osx/) {
  458. $code.=<<___;
  459. ld $t0,`$FRAME+0`($sp)
  460. ld $t1,`$FRAME+8`($sp)
  461. ld $t2,`$FRAME+16`($sp)
  462. ld $t3,`$FRAME+24`($sp)
  463. ld $t4,`$FRAME+32`($sp)
  464. ld $t5,`$FRAME+40`($sp)
  465. ld $t6,`$FRAME+48`($sp)
  466. ld $t7,`$FRAME+56`($sp)
  467. ___
  468. } else {
  469. $code.=<<___;
  470. lwz $t1,`$FRAME+0^$LITTLE_ENDIAN`($sp)
  471. lwz $t0,`$FRAME+4^$LITTLE_ENDIAN`($sp)
  472. lwz $t3,`$FRAME+8^$LITTLE_ENDIAN`($sp)
  473. lwz $t2,`$FRAME+12^$LITTLE_ENDIAN`($sp)
  474. lwz $t5,`$FRAME+16^$LITTLE_ENDIAN`($sp)
  475. lwz $t4,`$FRAME+20^$LITTLE_ENDIAN`($sp)
  476. lwz $t7,`$FRAME+24^$LITTLE_ENDIAN`($sp)
  477. lwz $t6,`$FRAME+28^$LITTLE_ENDIAN`($sp)
  478. ___
  479. }
  480. $code.=<<___;
  481. lfd $A0,`$FRAME+64`($sp)
  482. lfd $A1,`$FRAME+72`($sp)
  483. lfd $A2,`$FRAME+80`($sp)
  484. lfd $A3,`$FRAME+88`($sp)
  485. lfd $N0,`$FRAME+96`($sp)
  486. lfd $N1,`$FRAME+104`($sp)
  487. lfd $N2,`$FRAME+112`($sp)
  488. lfd $N3,`$FRAME+120`($sp)
  489. fcfid $A0,$A0
  490. fcfid $A1,$A1
  491. fcfid $A2,$A2
  492. fcfid $A3,$A3
  493. fcfid $N0,$N0
  494. fcfid $N1,$N1
  495. fcfid $N2,$N2
  496. fcfid $N3,$N3
  497. addi $ap,$ap,16
  498. addi $np,$np,16
  499. fmul $T1a,$A1,$ba
  500. fmul $T1b,$A1,$bb
  501. fmul $T2a,$A2,$ba
  502. fmul $T2b,$A2,$bb
  503. stfd $A0,8($nap_d) ; save a[j] in double format
  504. stfd $A1,16($nap_d)
  505. fmul $T3a,$A3,$ba
  506. fmul $T3b,$A3,$bb
  507. fmadd $T0a,$A0,$ba,$dota
  508. fmadd $T0b,$A0,$bb,$dotb
  509. stfd $A2,24($nap_d) ; save a[j+1] in double format
  510. stfd $A3,32($nap_d)
  511. ___
  512. if ($SIZE_T==8 or $flavour =~ /osx/) {
  513. $code.=<<___;
  514. fmadd $T1a,$A0,$bc,$T1a
  515. fmadd $T1b,$A0,$bd,$T1b
  516. fmadd $T2a,$A1,$bc,$T2a
  517. fmadd $T2b,$A1,$bd,$T2b
  518. stfd $N0,40($nap_d) ; save n[j] in double format
  519. stfd $N1,48($nap_d)
  520. fmadd $T3a,$A2,$bc,$T3a
  521. fmadd $T3b,$A2,$bd,$T3b
  522. add $t0,$t0,$carry ; can not overflow
  523. fmul $dota,$A3,$bc
  524. fmul $dotb,$A3,$bd
  525. stfd $N2,56($nap_d) ; save n[j+1] in double format
  526. stfdu $N3,64($nap_d)
  527. srdi $carry,$t0,16
  528. add $t1,$t1,$carry
  529. srdi $carry,$t1,16
  530. fmadd $T1a,$N1,$na,$T1a
  531. fmadd $T1b,$N1,$nb,$T1b
  532. insrdi $t0,$t1,16,32
  533. fmadd $T2a,$N2,$na,$T2a
  534. fmadd $T2b,$N2,$nb,$T2b
  535. add $t2,$t2,$carry
  536. fmadd $T3a,$N3,$na,$T3a
  537. fmadd $T3b,$N3,$nb,$T3b
  538. srdi $carry,$t2,16
  539. fmadd $T0a,$N0,$na,$T0a
  540. fmadd $T0b,$N0,$nb,$T0b
  541. insrdi $t0,$t2,16,16
  542. add $t3,$t3,$carry
  543. srdi $carry,$t3,16
  544. fmadd $T1a,$N0,$nc,$T1a
  545. fmadd $T1b,$N0,$nd,$T1b
  546. insrdi $t0,$t3,16,0 ; 0..63 bits
  547. fmadd $T2a,$N1,$nc,$T2a
  548. fmadd $T2b,$N1,$nd,$T2b
  549. add $t4,$t4,$carry
  550. fmadd $T3a,$N2,$nc,$T3a
  551. fmadd $T3b,$N2,$nd,$T3b
  552. srdi $carry,$t4,16
  553. fmadd $dota,$N3,$nc,$dota
  554. fmadd $dotb,$N3,$nd,$dotb
  555. add $t5,$t5,$carry
  556. srdi $carry,$t5,16
  557. insrdi $t4,$t5,16,32
  558. fctid $T0a,$T0a
  559. fctid $T0b,$T0b
  560. add $t6,$t6,$carry
  561. fctid $T1a,$T1a
  562. fctid $T1b,$T1b
  563. srdi $carry,$t6,16
  564. fctid $T2a,$T2a
  565. fctid $T2b,$T2b
  566. insrdi $t4,$t6,16,16
  567. fctid $T3a,$T3a
  568. fctid $T3b,$T3b
  569. add $t7,$t7,$carry
  570. insrdi $t4,$t7,16,0 ; 64..127 bits
  571. srdi $carry,$t7,16 ; upper 33 bits
  572. stfd $T0a,`$FRAME+0`($sp)
  573. stfd $T0b,`$FRAME+8`($sp)
  574. stfd $T1a,`$FRAME+16`($sp)
  575. stfd $T1b,`$FRAME+24`($sp)
  576. stfd $T2a,`$FRAME+32`($sp)
  577. stfd $T2b,`$FRAME+40`($sp)
  578. stfd $T3a,`$FRAME+48`($sp)
  579. stfd $T3b,`$FRAME+56`($sp)
  580. std $t0,8($tp) ; tp[j-1]
  581. stdu $t4,16($tp) ; tp[j]
  582. ___
  583. } else {
  584. $code.=<<___;
  585. fmadd $T1a,$A0,$bc,$T1a
  586. fmadd $T1b,$A0,$bd,$T1b
  587. addc $t0,$t0,$carry
  588. adde $t1,$t1,$c1
  589. srwi $carry,$t0,16
  590. fmadd $T2a,$A1,$bc,$T2a
  591. fmadd $T2b,$A1,$bd,$T2b
  592. stfd $N0,40($nap_d) ; save n[j] in double format
  593. stfd $N1,48($nap_d)
  594. srwi $c1,$t1,16
  595. insrwi $carry,$t1,16,0
  596. fmadd $T3a,$A2,$bc,$T3a
  597. fmadd $T3b,$A2,$bd,$T3b
  598. addc $t2,$t2,$carry
  599. adde $t3,$t3,$c1
  600. srwi $carry,$t2,16
  601. fmul $dota,$A3,$bc
  602. fmul $dotb,$A3,$bd
  603. stfd $N2,56($nap_d) ; save n[j+1] in double format
  604. stfdu $N3,64($nap_d)
  605. insrwi $t0,$t2,16,0 ; 0..31 bits
  606. srwi $c1,$t3,16
  607. insrwi $carry,$t3,16,0
  608. fmadd $T1a,$N1,$na,$T1a
  609. fmadd $T1b,$N1,$nb,$T1b
  610. lwz $t3,`$FRAME+32^$LITTLE_ENDIAN`($sp) ; permuted $t1
  611. lwz $t2,`$FRAME+36^$LITTLE_ENDIAN`($sp) ; permuted $t0
  612. addc $t4,$t4,$carry
  613. adde $t5,$t5,$c1
  614. srwi $carry,$t4,16
  615. fmadd $T2a,$N2,$na,$T2a
  616. fmadd $T2b,$N2,$nb,$T2b
  617. srwi $c1,$t5,16
  618. insrwi $carry,$t5,16,0
  619. fmadd $T3a,$N3,$na,$T3a
  620. fmadd $T3b,$N3,$nb,$T3b
  621. addc $t6,$t6,$carry
  622. adde $t7,$t7,$c1
  623. srwi $carry,$t6,16
  624. fmadd $T0a,$N0,$na,$T0a
  625. fmadd $T0b,$N0,$nb,$T0b
  626. insrwi $t4,$t6,16,0 ; 32..63 bits
  627. srwi $c1,$t7,16
  628. insrwi $carry,$t7,16,0
  629. fmadd $T1a,$N0,$nc,$T1a
  630. fmadd $T1b,$N0,$nd,$T1b
  631. lwz $t7,`$FRAME+40^$LITTLE_ENDIAN`($sp) ; permuted $t3
  632. lwz $t6,`$FRAME+44^$LITTLE_ENDIAN`($sp) ; permuted $t2
  633. addc $t2,$t2,$carry
  634. adde $t3,$t3,$c1
  635. srwi $carry,$t2,16
  636. fmadd $T2a,$N1,$nc,$T2a
  637. fmadd $T2b,$N1,$nd,$T2b
  638. stw $t0,12($tp) ; tp[j-1]
  639. stw $t4,8($tp)
  640. srwi $c1,$t3,16
  641. insrwi $carry,$t3,16,0
  642. fmadd $T3a,$N2,$nc,$T3a
  643. fmadd $T3b,$N2,$nd,$T3b
  644. lwz $t1,`$FRAME+48^$LITTLE_ENDIAN`($sp) ; permuted $t5
  645. lwz $t0,`$FRAME+52^$LITTLE_ENDIAN`($sp) ; permuted $t4
  646. addc $t6,$t6,$carry
  647. adde $t7,$t7,$c1
  648. srwi $carry,$t6,16
  649. fmadd $dota,$N3,$nc,$dota
  650. fmadd $dotb,$N3,$nd,$dotb
  651. insrwi $t2,$t6,16,0 ; 64..95 bits
  652. srwi $c1,$t7,16
  653. insrwi $carry,$t7,16,0
  654. fctid $T0a,$T0a
  655. fctid $T0b,$T0b
  656. lwz $t5,`$FRAME+56^$LITTLE_ENDIAN`($sp) ; permuted $t7
  657. lwz $t4,`$FRAME+60^$LITTLE_ENDIAN`($sp) ; permuted $t6
  658. addc $t0,$t0,$carry
  659. adde $t1,$t1,$c1
  660. srwi $carry,$t0,16
  661. fctid $T1a,$T1a
  662. fctid $T1b,$T1b
  663. srwi $c1,$t1,16
  664. insrwi $carry,$t1,16,0
  665. fctid $T2a,$T2a
  666. fctid $T2b,$T2b
  667. addc $t4,$t4,$carry
  668. adde $t5,$t5,$c1
  669. srwi $carry,$t4,16
  670. fctid $T3a,$T3a
  671. fctid $T3b,$T3b
  672. insrwi $t0,$t4,16,0 ; 96..127 bits
  673. srwi $c1,$t5,16
  674. insrwi $carry,$t5,16,0
  675. stfd $T0a,`$FRAME+0`($sp)
  676. stfd $T0b,`$FRAME+8`($sp)
  677. stfd $T1a,`$FRAME+16`($sp)
  678. stfd $T1b,`$FRAME+24`($sp)
  679. stfd $T2a,`$FRAME+32`($sp)
  680. stfd $T2b,`$FRAME+40`($sp)
  681. stfd $T3a,`$FRAME+48`($sp)
  682. stfd $T3b,`$FRAME+56`($sp)
  683. stw $t2,20($tp) ; tp[j]
  684. stwu $t0,16($tp)
  685. ___
  686. }
  687. $code.=<<___;
  688. bdnz- L1st
  689. fctid $dota,$dota
  690. fctid $dotb,$dotb
  691. ___
  692. if ($SIZE_T==8 or $flavour =~ /osx/) {
  693. $code.=<<___;
  694. ld $t0,`$FRAME+0`($sp)
  695. ld $t1,`$FRAME+8`($sp)
  696. ld $t2,`$FRAME+16`($sp)
  697. ld $t3,`$FRAME+24`($sp)
  698. ld $t4,`$FRAME+32`($sp)
  699. ld $t5,`$FRAME+40`($sp)
  700. ld $t6,`$FRAME+48`($sp)
  701. ld $t7,`$FRAME+56`($sp)
  702. stfd $dota,`$FRAME+64`($sp)
  703. stfd $dotb,`$FRAME+72`($sp)
  704. add $t0,$t0,$carry ; can not overflow
  705. srdi $carry,$t0,16
  706. add $t1,$t1,$carry
  707. srdi $carry,$t1,16
  708. insrdi $t0,$t1,16,32
  709. add $t2,$t2,$carry
  710. srdi $carry,$t2,16
  711. insrdi $t0,$t2,16,16
  712. add $t3,$t3,$carry
  713. srdi $carry,$t3,16
  714. insrdi $t0,$t3,16,0 ; 0..63 bits
  715. add $t4,$t4,$carry
  716. srdi $carry,$t4,16
  717. add $t5,$t5,$carry
  718. srdi $carry,$t5,16
  719. insrdi $t4,$t5,16,32
  720. add $t6,$t6,$carry
  721. srdi $carry,$t6,16
  722. insrdi $t4,$t6,16,16
  723. add $t7,$t7,$carry
  724. insrdi $t4,$t7,16,0 ; 64..127 bits
  725. srdi $carry,$t7,16 ; upper 33 bits
  726. ld $t6,`$FRAME+64`($sp)
  727. ld $t7,`$FRAME+72`($sp)
  728. std $t0,8($tp) ; tp[j-1]
  729. stdu $t4,16($tp) ; tp[j]
  730. add $t6,$t6,$carry ; can not overflow
  731. srdi $carry,$t6,16
  732. add $t7,$t7,$carry
  733. insrdi $t6,$t7,48,0
  734. srdi $ovf,$t7,48
  735. std $t6,8($tp) ; tp[num-1]
  736. ___
  737. } else {
  738. $code.=<<___;
  739. lwz $t1,`$FRAME+0^$LITTLE_ENDIAN`($sp)
  740. lwz $t0,`$FRAME+4^$LITTLE_ENDIAN`($sp)
  741. lwz $t3,`$FRAME+8^$LITTLE_ENDIAN`($sp)
  742. lwz $t2,`$FRAME+12^$LITTLE_ENDIAN`($sp)
  743. lwz $t5,`$FRAME+16^$LITTLE_ENDIAN`($sp)
  744. lwz $t4,`$FRAME+20^$LITTLE_ENDIAN`($sp)
  745. lwz $t7,`$FRAME+24^$LITTLE_ENDIAN`($sp)
  746. lwz $t6,`$FRAME+28^$LITTLE_ENDIAN`($sp)
  747. stfd $dota,`$FRAME+64`($sp)
  748. stfd $dotb,`$FRAME+72`($sp)
  749. addc $t0,$t0,$carry
  750. adde $t1,$t1,$c1
  751. srwi $carry,$t0,16
  752. insrwi $carry,$t1,16,0
  753. srwi $c1,$t1,16
  754. addc $t2,$t2,$carry
  755. adde $t3,$t3,$c1
  756. srwi $carry,$t2,16
  757. insrwi $t0,$t2,16,0 ; 0..31 bits
  758. insrwi $carry,$t3,16,0
  759. srwi $c1,$t3,16
  760. addc $t4,$t4,$carry
  761. adde $t5,$t5,$c1
  762. srwi $carry,$t4,16
  763. insrwi $carry,$t5,16,0
  764. srwi $c1,$t5,16
  765. addc $t6,$t6,$carry
  766. adde $t7,$t7,$c1
  767. srwi $carry,$t6,16
  768. insrwi $t4,$t6,16,0 ; 32..63 bits
  769. insrwi $carry,$t7,16,0
  770. srwi $c1,$t7,16
  771. stw $t0,12($tp) ; tp[j-1]
  772. stw $t4,8($tp)
  773. lwz $t3,`$FRAME+32^$LITTLE_ENDIAN`($sp) ; permuted $t1
  774. lwz $t2,`$FRAME+36^$LITTLE_ENDIAN`($sp) ; permuted $t0
  775. lwz $t7,`$FRAME+40^$LITTLE_ENDIAN`($sp) ; permuted $t3
  776. lwz $t6,`$FRAME+44^$LITTLE_ENDIAN`($sp) ; permuted $t2
  777. lwz $t1,`$FRAME+48^$LITTLE_ENDIAN`($sp) ; permuted $t5
  778. lwz $t0,`$FRAME+52^$LITTLE_ENDIAN`($sp) ; permuted $t4
  779. lwz $t5,`$FRAME+56^$LITTLE_ENDIAN`($sp) ; permuted $t7
  780. lwz $t4,`$FRAME+60^$LITTLE_ENDIAN`($sp) ; permuted $t6
  781. addc $t2,$t2,$carry
  782. adde $t3,$t3,$c1
  783. srwi $carry,$t2,16
  784. insrwi $carry,$t3,16,0
  785. srwi $c1,$t3,16
  786. addc $t6,$t6,$carry
  787. adde $t7,$t7,$c1
  788. srwi $carry,$t6,16
  789. insrwi $t2,$t6,16,0 ; 64..95 bits
  790. insrwi $carry,$t7,16,0
  791. srwi $c1,$t7,16
  792. addc $t0,$t0,$carry
  793. adde $t1,$t1,$c1
  794. srwi $carry,$t0,16
  795. insrwi $carry,$t1,16,0
  796. srwi $c1,$t1,16
  797. addc $t4,$t4,$carry
  798. adde $t5,$t5,$c1
  799. srwi $carry,$t4,16
  800. insrwi $t0,$t4,16,0 ; 96..127 bits
  801. insrwi $carry,$t5,16,0
  802. srwi $c1,$t5,16
  803. stw $t2,20($tp) ; tp[j]
  804. stwu $t0,16($tp)
  805. lwz $t7,`$FRAME+64^$LITTLE_ENDIAN`($sp)
  806. lwz $t6,`$FRAME+68^$LITTLE_ENDIAN`($sp)
  807. lwz $t5,`$FRAME+72^$LITTLE_ENDIAN`($sp)
  808. lwz $t4,`$FRAME+76^$LITTLE_ENDIAN`($sp)
  809. addc $t6,$t6,$carry
  810. adde $t7,$t7,$c1
  811. srwi $carry,$t6,16
  812. insrwi $carry,$t7,16,0
  813. srwi $c1,$t7,16
  814. addc $t4,$t4,$carry
  815. adde $t5,$t5,$c1
  816. insrwi $t6,$t4,16,0
  817. srwi $t4,$t4,16
  818. insrwi $t4,$t5,16,0
  819. srwi $ovf,$t5,16
  820. stw $t6,12($tp) ; tp[num-1]
  821. stw $t4,8($tp)
  822. ___
  823. }
  824. $code.=<<___;
  825. slwi $t7,$num,2
  826. subf $nap_d,$t7,$nap_d ; rewind pointer
  827. li $i,8 ; i=1
  828. .align 5
  829. Louter:
  830. addi $tp,$sp,`$FRAME+$TRANSFER`
  831. li $carry,0
  832. mtctr $j
  833. ___
  834. $code.=<<___ if ($SIZE_T==8);
  835. ldx $t3,$bp,$i ; bp[i]
  836. ld $t6,`$FRAME+$TRANSFER+8`($sp) ; tp[0]
  837. mulld $t7,$a0,$t3 ; ap[0]*bp[i]
  838. add $t7,$t7,$t6 ; ap[0]*bp[i]+tp[0]
  839. ; transfer bp[i] to FPU as 4x16-bit values
  840. extrdi $t0,$t3,16,48
  841. extrdi $t1,$t3,16,32
  842. extrdi $t2,$t3,16,16
  843. extrdi $t3,$t3,16,0
  844. std $t0,`$FRAME+0`($sp)
  845. std $t1,`$FRAME+8`($sp)
  846. std $t2,`$FRAME+16`($sp)
  847. std $t3,`$FRAME+24`($sp)
  848. mulld $t7,$t7,$n0 ; tp[0]*n0
  849. ; transfer (ap[0]*bp[i]+tp[0])*n0 to FPU as 4x16-bit values
  850. extrdi $t4,$t7,16,48
  851. extrdi $t5,$t7,16,32
  852. extrdi $t6,$t7,16,16
  853. extrdi $t7,$t7,16,0
  854. std $t4,`$FRAME+32`($sp)
  855. std $t5,`$FRAME+40`($sp)
  856. std $t6,`$FRAME+48`($sp)
  857. std $t7,`$FRAME+56`($sp)
  858. ___
  859. $code.=<<___ if ($SIZE_T==4);
  860. add $t0,$bp,$i
  861. li $c1,0
  862. lwz $t1,0($t0) ; bp[i,i+1]
  863. lwz $t3,4($t0)
  864. mullw $t4,$a0,$t1 ; ap[0]*bp[i]
  865. lwz $t0,`$FRAME+$TRANSFER+8+4`($sp) ; tp[0]
  866. mulhwu $t5,$a0,$t1
  867. lwz $t2,`$FRAME+$TRANSFER+8`($sp) ; tp[0]
  868. mullw $t6,$a1,$t1
  869. mullw $t7,$a0,$t3
  870. add $t5,$t5,$t6
  871. add $t5,$t5,$t7
  872. addc $t4,$t4,$t0 ; ap[0]*bp[i]+tp[0]
  873. adde $t5,$t5,$t2
  874. ; transfer bp[i] to FPU as 4x16-bit values
  875. extrwi $t0,$t1,16,16
  876. extrwi $t1,$t1,16,0
  877. extrwi $t2,$t3,16,16
  878. extrwi $t3,$t3,16,0
  879. std $t0,`$FRAME+0`($sp) ; yes, std in 32-bit build
  880. std $t1,`$FRAME+8`($sp)
  881. std $t2,`$FRAME+16`($sp)
  882. std $t3,`$FRAME+24`($sp)
  883. mullw $t0,$t4,$n0 ; mulld tp[0]*n0
  884. mulhwu $t1,$t4,$n0
  885. mullw $t2,$t5,$n0
  886. mullw $t3,$t4,$n1
  887. add $t1,$t1,$t2
  888. add $t1,$t1,$t3
  889. ; transfer (ap[0]*bp[i]+tp[0])*n0 to FPU as 4x16-bit values
  890. extrwi $t4,$t0,16,16
  891. extrwi $t5,$t0,16,0
  892. extrwi $t6,$t1,16,16
  893. extrwi $t7,$t1,16,0
  894. std $t4,`$FRAME+32`($sp) ; yes, std in 32-bit build
  895. std $t5,`$FRAME+40`($sp)
  896. std $t6,`$FRAME+48`($sp)
  897. std $t7,`$FRAME+56`($sp)
  898. ___
  899. $code.=<<___;
  900. lfd $A0,8($nap_d) ; load a[j] in double format
  901. lfd $A1,16($nap_d)
  902. lfd $A2,24($nap_d) ; load a[j+1] in double format
  903. lfd $A3,32($nap_d)
  904. lfd $N0,40($nap_d) ; load n[j] in double format
  905. lfd $N1,48($nap_d)
  906. lfd $N2,56($nap_d) ; load n[j+1] in double format
  907. lfdu $N3,64($nap_d)
  908. lfd $ba,`$FRAME+0`($sp)
  909. lfd $bb,`$FRAME+8`($sp)
  910. lfd $bc,`$FRAME+16`($sp)
  911. lfd $bd,`$FRAME+24`($sp)
  912. lfd $na,`$FRAME+32`($sp)
  913. lfd $nb,`$FRAME+40`($sp)
  914. lfd $nc,`$FRAME+48`($sp)
  915. lfd $nd,`$FRAME+56`($sp)
  916. fcfid $ba,$ba
  917. fcfid $bb,$bb
  918. fcfid $bc,$bc
  919. fcfid $bd,$bd
  920. fcfid $na,$na
  921. fcfid $nb,$nb
  922. fcfid $nc,$nc
  923. fcfid $nd,$nd
  924. fmul $T1a,$A1,$ba
  925. fmul $T1b,$A1,$bb
  926. fmul $T2a,$A2,$ba
  927. fmul $T2b,$A2,$bb
  928. fmul $T3a,$A3,$ba
  929. fmul $T3b,$A3,$bb
  930. fmul $T0a,$A0,$ba
  931. fmul $T0b,$A0,$bb
  932. fmadd $T1a,$A0,$bc,$T1a
  933. fmadd $T1b,$A0,$bd,$T1b
  934. fmadd $T2a,$A1,$bc,$T2a
  935. fmadd $T2b,$A1,$bd,$T2b
  936. fmadd $T3a,$A2,$bc,$T3a
  937. fmadd $T3b,$A2,$bd,$T3b
  938. fmul $dota,$A3,$bc
  939. fmul $dotb,$A3,$bd
  940. fmadd $T1a,$N1,$na,$T1a
  941. fmadd $T1b,$N1,$nb,$T1b
  942. lfd $A0,8($nap_d) ; load a[j] in double format
  943. lfd $A1,16($nap_d)
  944. fmadd $T2a,$N2,$na,$T2a
  945. fmadd $T2b,$N2,$nb,$T2b
  946. lfd $A2,24($nap_d) ; load a[j+1] in double format
  947. lfd $A3,32($nap_d)
  948. fmadd $T3a,$N3,$na,$T3a
  949. fmadd $T3b,$N3,$nb,$T3b
  950. fmadd $T0a,$N0,$na,$T0a
  951. fmadd $T0b,$N0,$nb,$T0b
  952. fmadd $T1a,$N0,$nc,$T1a
  953. fmadd $T1b,$N0,$nd,$T1b
  954. fmadd $T2a,$N1,$nc,$T2a
  955. fmadd $T2b,$N1,$nd,$T2b
  956. fmadd $T3a,$N2,$nc,$T3a
  957. fmadd $T3b,$N2,$nd,$T3b
  958. fmadd $dota,$N3,$nc,$dota
  959. fmadd $dotb,$N3,$nd,$dotb
  960. fctid $T0a,$T0a
  961. fctid $T0b,$T0b
  962. fctid $T1a,$T1a
  963. fctid $T1b,$T1b
  964. fctid $T2a,$T2a
  965. fctid $T2b,$T2b
  966. fctid $T3a,$T3a
  967. fctid $T3b,$T3b
  968. stfd $T0a,`$FRAME+0`($sp)
  969. stfd $T0b,`$FRAME+8`($sp)
  970. stfd $T1a,`$FRAME+16`($sp)
  971. stfd $T1b,`$FRAME+24`($sp)
  972. stfd $T2a,`$FRAME+32`($sp)
  973. stfd $T2b,`$FRAME+40`($sp)
  974. stfd $T3a,`$FRAME+48`($sp)
  975. stfd $T3b,`$FRAME+56`($sp)
  976. .align 5
  977. Linner:
  978. fmul $T1a,$A1,$ba
  979. fmul $T1b,$A1,$bb
  980. fmul $T2a,$A2,$ba
  981. fmul $T2b,$A2,$bb
  982. lfd $N0,40($nap_d) ; load n[j] in double format
  983. lfd $N1,48($nap_d)
  984. fmul $T3a,$A3,$ba
  985. fmul $T3b,$A3,$bb
  986. fmadd $T0a,$A0,$ba,$dota
  987. fmadd $T0b,$A0,$bb,$dotb
  988. lfd $N2,56($nap_d) ; load n[j+1] in double format
  989. lfdu $N3,64($nap_d)
  990. fmadd $T1a,$A0,$bc,$T1a
  991. fmadd $T1b,$A0,$bd,$T1b
  992. fmadd $T2a,$A1,$bc,$T2a
  993. fmadd $T2b,$A1,$bd,$T2b
  994. lfd $A0,8($nap_d) ; load a[j] in double format
  995. lfd $A1,16($nap_d)
  996. fmadd $T3a,$A2,$bc,$T3a
  997. fmadd $T3b,$A2,$bd,$T3b
  998. fmul $dota,$A3,$bc
  999. fmul $dotb,$A3,$bd
  1000. lfd $A2,24($nap_d) ; load a[j+1] in double format
  1001. lfd $A3,32($nap_d)
  1002. ___
  1003. if ($SIZE_T==8 or $flavour =~ /osx/) {
  1004. $code.=<<___;
  1005. fmadd $T1a,$N1,$na,$T1a
  1006. fmadd $T1b,$N1,$nb,$T1b
  1007. ld $t0,`$FRAME+0`($sp)
  1008. ld $t1,`$FRAME+8`($sp)
  1009. fmadd $T2a,$N2,$na,$T2a
  1010. fmadd $T2b,$N2,$nb,$T2b
  1011. ld $t2,`$FRAME+16`($sp)
  1012. ld $t3,`$FRAME+24`($sp)
  1013. fmadd $T3a,$N3,$na,$T3a
  1014. fmadd $T3b,$N3,$nb,$T3b
  1015. add $t0,$t0,$carry ; can not overflow
  1016. ld $t4,`$FRAME+32`($sp)
  1017. ld $t5,`$FRAME+40`($sp)
  1018. fmadd $T0a,$N0,$na,$T0a
  1019. fmadd $T0b,$N0,$nb,$T0b
  1020. srdi $carry,$t0,16
  1021. add $t1,$t1,$carry
  1022. srdi $carry,$t1,16
  1023. ld $t6,`$FRAME+48`($sp)
  1024. ld $t7,`$FRAME+56`($sp)
  1025. fmadd $T1a,$N0,$nc,$T1a
  1026. fmadd $T1b,$N0,$nd,$T1b
  1027. insrdi $t0,$t1,16,32
  1028. ld $t1,8($tp) ; tp[j]
  1029. fmadd $T2a,$N1,$nc,$T2a
  1030. fmadd $T2b,$N1,$nd,$T2b
  1031. add $t2,$t2,$carry
  1032. fmadd $T3a,$N2,$nc,$T3a
  1033. fmadd $T3b,$N2,$nd,$T3b
  1034. srdi $carry,$t2,16
  1035. insrdi $t0,$t2,16,16
  1036. fmadd $dota,$N3,$nc,$dota
  1037. fmadd $dotb,$N3,$nd,$dotb
  1038. add $t3,$t3,$carry
  1039. ldu $t2,16($tp) ; tp[j+1]
  1040. srdi $carry,$t3,16
  1041. insrdi $t0,$t3,16,0 ; 0..63 bits
  1042. add $t4,$t4,$carry
  1043. fctid $T0a,$T0a
  1044. fctid $T0b,$T0b
  1045. srdi $carry,$t4,16
  1046. fctid $T1a,$T1a
  1047. fctid $T1b,$T1b
  1048. add $t5,$t5,$carry
  1049. fctid $T2a,$T2a
  1050. fctid $T2b,$T2b
  1051. srdi $carry,$t5,16
  1052. insrdi $t4,$t5,16,32
  1053. fctid $T3a,$T3a
  1054. fctid $T3b,$T3b
  1055. add $t6,$t6,$carry
  1056. srdi $carry,$t6,16
  1057. insrdi $t4,$t6,16,16
  1058. stfd $T0a,`$FRAME+0`($sp)
  1059. stfd $T0b,`$FRAME+8`($sp)
  1060. add $t7,$t7,$carry
  1061. addc $t3,$t0,$t1
  1062. ___
  1063. $code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
  1064. extrdi $t0,$t0,32,0
  1065. extrdi $t1,$t1,32,0
  1066. adde $t0,$t0,$t1
  1067. ___
  1068. $code.=<<___;
  1069. stfd $T1a,`$FRAME+16`($sp)
  1070. stfd $T1b,`$FRAME+24`($sp)
  1071. insrdi $t4,$t7,16,0 ; 64..127 bits
  1072. srdi $carry,$t7,16 ; upper 33 bits
  1073. stfd $T2a,`$FRAME+32`($sp)
  1074. stfd $T2b,`$FRAME+40`($sp)
  1075. adde $t5,$t4,$t2
  1076. ___
  1077. $code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
  1078. extrdi $t4,$t4,32,0
  1079. extrdi $t2,$t2,32,0
  1080. adde $t4,$t4,$t2
  1081. ___
  1082. $code.=<<___;
  1083. stfd $T3a,`$FRAME+48`($sp)
  1084. stfd $T3b,`$FRAME+56`($sp)
  1085. addze $carry,$carry
  1086. std $t3,-16($tp) ; tp[j-1]
  1087. std $t5,-8($tp) ; tp[j]
  1088. ___
  1089. } else {
  1090. $code.=<<___;
  1091. fmadd $T1a,$N1,$na,$T1a
  1092. fmadd $T1b,$N1,$nb,$T1b
  1093. lwz $t1,`$FRAME+0^$LITTLE_ENDIAN`($sp)
  1094. lwz $t0,`$FRAME+4^$LITTLE_ENDIAN`($sp)
  1095. fmadd $T2a,$N2,$na,$T2a
  1096. fmadd $T2b,$N2,$nb,$T2b
  1097. lwz $t3,`$FRAME+8^$LITTLE_ENDIAN`($sp)
  1098. lwz $t2,`$FRAME+12^$LITTLE_ENDIAN`($sp)
  1099. fmadd $T3a,$N3,$na,$T3a
  1100. fmadd $T3b,$N3,$nb,$T3b
  1101. lwz $t5,`$FRAME+16^$LITTLE_ENDIAN`($sp)
  1102. lwz $t4,`$FRAME+20^$LITTLE_ENDIAN`($sp)
  1103. addc $t0,$t0,$carry
  1104. adde $t1,$t1,$c1
  1105. srwi $carry,$t0,16
  1106. fmadd $T0a,$N0,$na,$T0a
  1107. fmadd $T0b,$N0,$nb,$T0b
  1108. lwz $t7,`$FRAME+24^$LITTLE_ENDIAN`($sp)
  1109. lwz $t6,`$FRAME+28^$LITTLE_ENDIAN`($sp)
  1110. srwi $c1,$t1,16
  1111. insrwi $carry,$t1,16,0
  1112. fmadd $T1a,$N0,$nc,$T1a
  1113. fmadd $T1b,$N0,$nd,$T1b
  1114. addc $t2,$t2,$carry
  1115. adde $t3,$t3,$c1
  1116. srwi $carry,$t2,16
  1117. fmadd $T2a,$N1,$nc,$T2a
  1118. fmadd $T2b,$N1,$nd,$T2b
  1119. insrwi $t0,$t2,16,0 ; 0..31 bits
  1120. srwi $c1,$t3,16
  1121. insrwi $carry,$t3,16,0
  1122. fmadd $T3a,$N2,$nc,$T3a
  1123. fmadd $T3b,$N2,$nd,$T3b
  1124. lwz $t2,12($tp) ; tp[j]
  1125. lwz $t3,8($tp)
  1126. addc $t4,$t4,$carry
  1127. adde $t5,$t5,$c1
  1128. srwi $carry,$t4,16
  1129. fmadd $dota,$N3,$nc,$dota
  1130. fmadd $dotb,$N3,$nd,$dotb
  1131. srwi $c1,$t5,16
  1132. insrwi $carry,$t5,16,0
  1133. fctid $T0a,$T0a
  1134. addc $t6,$t6,$carry
  1135. adde $t7,$t7,$c1
  1136. srwi $carry,$t6,16
  1137. fctid $T0b,$T0b
  1138. insrwi $t4,$t6,16,0 ; 32..63 bits
  1139. srwi $c1,$t7,16
  1140. insrwi $carry,$t7,16,0
  1141. fctid $T1a,$T1a
  1142. addc $t0,$t0,$t2
  1143. adde $t4,$t4,$t3
  1144. lwz $t3,`$FRAME+32^$LITTLE_ENDIAN`($sp) ; permuted $t1
  1145. lwz $t2,`$FRAME+36^$LITTLE_ENDIAN`($sp) ; permuted $t0
  1146. fctid $T1b,$T1b
  1147. addze $carry,$carry
  1148. addze $c1,$c1
  1149. stw $t0,4($tp) ; tp[j-1]
  1150. stw $t4,0($tp)
  1151. fctid $T2a,$T2a
  1152. addc $t2,$t2,$carry
  1153. adde $t3,$t3,$c1
  1154. srwi $carry,$t2,16
  1155. lwz $t7,`$FRAME+40^$LITTLE_ENDIAN`($sp) ; permuted $t3
  1156. lwz $t6,`$FRAME+44^$LITTLE_ENDIAN`($sp) ; permuted $t2
  1157. fctid $T2b,$T2b
  1158. srwi $c1,$t3,16
  1159. insrwi $carry,$t3,16,0
  1160. lwz $t1,`$FRAME+48^$LITTLE_ENDIAN`($sp) ; permuted $t5
  1161. lwz $t0,`$FRAME+52^$LITTLE_ENDIAN`($sp) ; permuted $t4
  1162. fctid $T3a,$T3a
  1163. addc $t6,$t6,$carry
  1164. adde $t7,$t7,$c1
  1165. srwi $carry,$t6,16
  1166. lwz $t5,`$FRAME+56^$LITTLE_ENDIAN`($sp) ; permuted $t7
  1167. lwz $t4,`$FRAME+60^$LITTLE_ENDIAN`($sp) ; permuted $t6
  1168. fctid $T3b,$T3b
  1169. insrwi $t2,$t6,16,0 ; 64..95 bits
  1170. insrwi $carry,$t7,16,0
  1171. srwi $c1,$t7,16
  1172. lwz $t6,20($tp)
  1173. lwzu $t7,16($tp)
  1174. addc $t0,$t0,$carry
  1175. stfd $T0a,`$FRAME+0`($sp)
  1176. adde $t1,$t1,$c1
  1177. srwi $carry,$t0,16
  1178. stfd $T0b,`$FRAME+8`($sp)
  1179. insrwi $carry,$t1,16,0
  1180. srwi $c1,$t1,16
  1181. addc $t4,$t4,$carry
  1182. stfd $T1a,`$FRAME+16`($sp)
  1183. adde $t5,$t5,$c1
  1184. srwi $carry,$t4,16
  1185. insrwi $t0,$t4,16,0 ; 96..127 bits
  1186. stfd $T1b,`$FRAME+24`($sp)
  1187. insrwi $carry,$t5,16,0
  1188. srwi $c1,$t5,16
  1189. addc $t2,$t2,$t6
  1190. stfd $T2a,`$FRAME+32`($sp)
  1191. adde $t0,$t0,$t7
  1192. stfd $T2b,`$FRAME+40`($sp)
  1193. addze $carry,$carry
  1194. stfd $T3a,`$FRAME+48`($sp)
  1195. addze $c1,$c1
  1196. stfd $T3b,`$FRAME+56`($sp)
  1197. stw $t2,-4($tp) ; tp[j]
  1198. stw $t0,-8($tp)
  1199. ___
  1200. }
  1201. $code.=<<___;
  1202. bdnz- Linner
  1203. fctid $dota,$dota
  1204. fctid $dotb,$dotb
  1205. ___
  1206. if ($SIZE_T==8 or $flavour =~ /osx/) {
  1207. $code.=<<___;
  1208. ld $t0,`$FRAME+0`($sp)
  1209. ld $t1,`$FRAME+8`($sp)
  1210. ld $t2,`$FRAME+16`($sp)
  1211. ld $t3,`$FRAME+24`($sp)
  1212. ld $t4,`$FRAME+32`($sp)
  1213. ld $t5,`$FRAME+40`($sp)
  1214. ld $t6,`$FRAME+48`($sp)
  1215. ld $t7,`$FRAME+56`($sp)
  1216. stfd $dota,`$FRAME+64`($sp)
  1217. stfd $dotb,`$FRAME+72`($sp)
  1218. add $t0,$t0,$carry ; can not overflow
  1219. srdi $carry,$t0,16
  1220. add $t1,$t1,$carry
  1221. srdi $carry,$t1,16
  1222. insrdi $t0,$t1,16,32
  1223. add $t2,$t2,$carry
  1224. ld $t1,8($tp) ; tp[j]
  1225. srdi $carry,$t2,16
  1226. insrdi $t0,$t2,16,16
  1227. add $t3,$t3,$carry
  1228. ldu $t2,16($tp) ; tp[j+1]
  1229. srdi $carry,$t3,16
  1230. insrdi $t0,$t3,16,0 ; 0..63 bits
  1231. add $t4,$t4,$carry
  1232. srdi $carry,$t4,16
  1233. add $t5,$t5,$carry
  1234. srdi $carry,$t5,16
  1235. insrdi $t4,$t5,16,32
  1236. add $t6,$t6,$carry
  1237. srdi $carry,$t6,16
  1238. insrdi $t4,$t6,16,16
  1239. add $t7,$t7,$carry
  1240. insrdi $t4,$t7,16,0 ; 64..127 bits
  1241. srdi $carry,$t7,16 ; upper 33 bits
  1242. ld $t6,`$FRAME+64`($sp)
  1243. ld $t7,`$FRAME+72`($sp)
  1244. addc $t3,$t0,$t1
  1245. ___
  1246. $code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
  1247. extrdi $t0,$t0,32,0
  1248. extrdi $t1,$t1,32,0
  1249. adde $t0,$t0,$t1
  1250. ___
  1251. $code.=<<___;
  1252. adde $t5,$t4,$t2
  1253. ___
  1254. $code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
  1255. extrdi $t4,$t4,32,0
  1256. extrdi $t2,$t2,32,0
  1257. adde $t4,$t4,$t2
  1258. ___
  1259. $code.=<<___;
  1260. addze $carry,$carry
  1261. std $t3,-16($tp) ; tp[j-1]
  1262. std $t5,-8($tp) ; tp[j]
  1263. add $carry,$carry,$ovf ; comsume upmost overflow
  1264. add $t6,$t6,$carry ; can not overflow
  1265. srdi $carry,$t6,16
  1266. add $t7,$t7,$carry
  1267. insrdi $t6,$t7,48,0
  1268. srdi $ovf,$t7,48
  1269. std $t6,0($tp) ; tp[num-1]
  1270. ___
  1271. } else {
  1272. $code.=<<___;
  1273. lwz $t1,`$FRAME+0^$LITTLE_ENDIAN`($sp)
  1274. lwz $t0,`$FRAME+4^$LITTLE_ENDIAN`($sp)
  1275. lwz $t3,`$FRAME+8^$LITTLE_ENDIAN`($sp)
  1276. lwz $t2,`$FRAME+12^$LITTLE_ENDIAN`($sp)
  1277. lwz $t5,`$FRAME+16^$LITTLE_ENDIAN`($sp)
  1278. lwz $t4,`$FRAME+20^$LITTLE_ENDIAN`($sp)
  1279. lwz $t7,`$FRAME+24^$LITTLE_ENDIAN`($sp)
  1280. lwz $t6,`$FRAME+28^$LITTLE_ENDIAN`($sp)
  1281. stfd $dota,`$FRAME+64`($sp)
  1282. stfd $dotb,`$FRAME+72`($sp)
  1283. addc $t0,$t0,$carry
  1284. adde $t1,$t1,$c1
  1285. srwi $carry,$t0,16
  1286. insrwi $carry,$t1,16,0
  1287. srwi $c1,$t1,16
  1288. addc $t2,$t2,$carry
  1289. adde $t3,$t3,$c1
  1290. srwi $carry,$t2,16
  1291. insrwi $t0,$t2,16,0 ; 0..31 bits
  1292. lwz $t2,12($tp) ; tp[j]
  1293. insrwi $carry,$t3,16,0
  1294. srwi $c1,$t3,16
  1295. lwz $t3,8($tp)
  1296. addc $t4,$t4,$carry
  1297. adde $t5,$t5,$c1
  1298. srwi $carry,$t4,16
  1299. insrwi $carry,$t5,16,0
  1300. srwi $c1,$t5,16
  1301. addc $t6,$t6,$carry
  1302. adde $t7,$t7,$c1
  1303. srwi $carry,$t6,16
  1304. insrwi $t4,$t6,16,0 ; 32..63 bits
  1305. insrwi $carry,$t7,16,0
  1306. srwi $c1,$t7,16
  1307. addc $t0,$t0,$t2
  1308. adde $t4,$t4,$t3
  1309. addze $carry,$carry
  1310. addze $c1,$c1
  1311. stw $t0,4($tp) ; tp[j-1]
  1312. stw $t4,0($tp)
  1313. lwz $t3,`$FRAME+32^$LITTLE_ENDIAN`($sp) ; permuted $t1
  1314. lwz $t2,`$FRAME+36^$LITTLE_ENDIAN`($sp) ; permuted $t0
  1315. lwz $t7,`$FRAME+40^$LITTLE_ENDIAN`($sp) ; permuted $t3
  1316. lwz $t6,`$FRAME+44^$LITTLE_ENDIAN`($sp) ; permuted $t2
  1317. lwz $t1,`$FRAME+48^$LITTLE_ENDIAN`($sp) ; permuted $t5
  1318. lwz $t0,`$FRAME+52^$LITTLE_ENDIAN`($sp) ; permuted $t4
  1319. lwz $t5,`$FRAME+56^$LITTLE_ENDIAN`($sp) ; permuted $t7
  1320. lwz $t4,`$FRAME+60^$LITTLE_ENDIAN`($sp) ; permuted $t6
  1321. addc $t2,$t2,$carry
  1322. adde $t3,$t3,$c1
  1323. srwi $carry,$t2,16
  1324. insrwi $carry,$t3,16,0
  1325. srwi $c1,$t3,16
  1326. addc $t6,$t6,$carry
  1327. adde $t7,$t7,$c1
  1328. srwi $carry,$t6,16
  1329. insrwi $t2,$t6,16,0 ; 64..95 bits
  1330. lwz $t6,20($tp)
  1331. insrwi $carry,$t7,16,0
  1332. srwi $c1,$t7,16
  1333. lwzu $t7,16($tp)
  1334. addc $t0,$t0,$carry
  1335. adde $t1,$t1,$c1
  1336. srwi $carry,$t0,16
  1337. insrwi $carry,$t1,16,0
  1338. srwi $c1,$t1,16
  1339. addc $t4,$t4,$carry
  1340. adde $t5,$t5,$c1
  1341. srwi $carry,$t4,16
  1342. insrwi $t0,$t4,16,0 ; 96..127 bits
  1343. insrwi $carry,$t5,16,0
  1344. srwi $c1,$t5,16
  1345. addc $t2,$t2,$t6
  1346. adde $t0,$t0,$t7
  1347. lwz $t7,`$FRAME+64^$LITTLE_ENDIAN`($sp)
  1348. lwz $t6,`$FRAME+68^$LITTLE_ENDIAN`($sp)
  1349. addze $carry,$carry
  1350. addze $c1,$c1
  1351. lwz $t5,`$FRAME+72^$LITTLE_ENDIAN`($sp)
  1352. lwz $t4,`$FRAME+76^$LITTLE_ENDIAN`($sp)
  1353. addc $t6,$t6,$carry
  1354. adde $t7,$t7,$c1
  1355. stw $t2,-4($tp) ; tp[j]
  1356. stw $t0,-8($tp)
  1357. addc $t6,$t6,$ovf
  1358. addze $t7,$t7
  1359. srwi $carry,$t6,16
  1360. insrwi $carry,$t7,16,0
  1361. srwi $c1,$t7,16
  1362. addc $t4,$t4,$carry
  1363. adde $t5,$t5,$c1
  1364. insrwi $t6,$t4,16,0
  1365. srwi $t4,$t4,16
  1366. insrwi $t4,$t5,16,0
  1367. srwi $ovf,$t5,16
  1368. stw $t6,4($tp) ; tp[num-1]
  1369. stw $t4,0($tp)
  1370. ___
  1371. }
  1372. $code.=<<___;
  1373. slwi $t7,$num,2
  1374. addi $i,$i,8
  1375. subf $nap_d,$t7,$nap_d ; rewind pointer
  1376. cmpw $i,$num
  1377. blt- Louter
  1378. ___
  1379. $code.=<<___ if ($SIZE_T==8);
  1380. subf $np,$num,$np ; rewind np
  1381. addi $j,$j,1 ; restore counter
  1382. subfc $i,$i,$i ; j=0 and "clear" XER[CA]
  1383. addi $tp,$sp,`$FRAME+$TRANSFER+8`
  1384. addi $t4,$sp,`$FRAME+$TRANSFER+16`
  1385. addi $t5,$np,8
  1386. addi $t6,$rp,8
  1387. mtctr $j
  1388. .align 4
  1389. Lsub: ldx $t0,$tp,$i
  1390. ldx $t1,$np,$i
  1391. ldx $t2,$t4,$i
  1392. ldx $t3,$t5,$i
  1393. subfe $t0,$t1,$t0 ; tp[j]-np[j]
  1394. subfe $t2,$t3,$t2 ; tp[j+1]-np[j+1]
  1395. stdx $t0,$rp,$i
  1396. stdx $t2,$t6,$i
  1397. addi $i,$i,16
  1398. bdnz- Lsub
  1399. li $i,0
  1400. subfe $ovf,$i,$ovf ; handle upmost overflow bit
  1401. and $ap,$tp,$ovf
  1402. andc $np,$rp,$ovf
  1403. or $ap,$ap,$np ; ap=borrow?tp:rp
  1404. addi $t7,$ap,8
  1405. mtctr $j
  1406. .align 4
  1407. Lcopy: ; copy or in-place refresh
  1408. ldx $t0,$ap,$i
  1409. ldx $t1,$t7,$i
  1410. std $i,8($nap_d) ; zap nap_d
  1411. std $i,16($nap_d)
  1412. std $i,24($nap_d)
  1413. std $i,32($nap_d)
  1414. std $i,40($nap_d)
  1415. std $i,48($nap_d)
  1416. std $i,56($nap_d)
  1417. stdu $i,64($nap_d)
  1418. stdx $t0,$rp,$i
  1419. stdx $t1,$t6,$i
  1420. stdx $i,$tp,$i ; zap tp at once
  1421. stdx $i,$t4,$i
  1422. addi $i,$i,16
  1423. bdnz- Lcopy
  1424. ___
  1425. $code.=<<___ if ($SIZE_T==4);
  1426. subf $np,$num,$np ; rewind np
  1427. addi $j,$j,1 ; restore counter
  1428. subfc $i,$i,$i ; j=0 and "clear" XER[CA]
  1429. addi $tp,$sp,`$FRAME+$TRANSFER`
  1430. addi $np,$np,-4
  1431. addi $rp,$rp,-4
  1432. addi $ap,$sp,`$FRAME+$TRANSFER+4`
  1433. mtctr $j
  1434. .align 4
  1435. Lsub: lwz $t0,12($tp) ; load tp[j..j+3] in 64-bit word order
  1436. lwz $t1,8($tp)
  1437. lwz $t2,20($tp)
  1438. lwzu $t3,16($tp)
  1439. lwz $t4,4($np) ; load np[j..j+3] in 32-bit word order
  1440. lwz $t5,8($np)
  1441. lwz $t6,12($np)
  1442. lwzu $t7,16($np)
  1443. subfe $t4,$t4,$t0 ; tp[j]-np[j]
  1444. stw $t0,4($ap) ; save tp[j..j+3] in 32-bit word order
  1445. subfe $t5,$t5,$t1 ; tp[j+1]-np[j+1]
  1446. stw $t1,8($ap)
  1447. subfe $t6,$t6,$t2 ; tp[j+2]-np[j+2]
  1448. stw $t2,12($ap)
  1449. subfe $t7,$t7,$t3 ; tp[j+3]-np[j+3]
  1450. stwu $t3,16($ap)
  1451. stw $t4,4($rp)
  1452. stw $t5,8($rp)
  1453. stw $t6,12($rp)
  1454. stwu $t7,16($rp)
  1455. bdnz- Lsub
  1456. li $i,0
  1457. subfe $ovf,$i,$ovf ; handle upmost overflow bit
  1458. addi $tp,$sp,`$FRAME+$TRANSFER+4`
  1459. subf $rp,$num,$rp ; rewind rp
  1460. and $ap,$tp,$ovf
  1461. andc $np,$rp,$ovf
  1462. or $ap,$ap,$np ; ap=borrow?tp:rp
  1463. addi $tp,$sp,`$FRAME+$TRANSFER`
  1464. mtctr $j
  1465. .align 4
  1466. Lcopy: ; copy or in-place refresh
  1467. lwz $t0,4($ap)
  1468. lwz $t1,8($ap)
  1469. lwz $t2,12($ap)
  1470. lwzu $t3,16($ap)
  1471. std $i,8($nap_d) ; zap nap_d
  1472. std $i,16($nap_d)
  1473. std $i,24($nap_d)
  1474. std $i,32($nap_d)
  1475. std $i,40($nap_d)
  1476. std $i,48($nap_d)
  1477. std $i,56($nap_d)
  1478. stdu $i,64($nap_d)
  1479. stw $t0,4($rp)
  1480. stw $t1,8($rp)
  1481. stw $t2,12($rp)
  1482. stwu $t3,16($rp)
  1483. std $i,8($tp) ; zap tp at once
  1484. stdu $i,16($tp)
  1485. bdnz- Lcopy
  1486. ___
  1487. $code.=<<___;
  1488. $POP $i,0($sp)
  1489. li r3,1 ; signal "handled"
  1490. $POP r19,`-12*8-13*$SIZE_T`($i)
  1491. $POP r20,`-12*8-12*$SIZE_T`($i)
  1492. $POP r21,`-12*8-11*$SIZE_T`($i)
  1493. $POP r22,`-12*8-10*$SIZE_T`($i)
  1494. $POP r23,`-12*8-9*$SIZE_T`($i)
  1495. $POP r24,`-12*8-8*$SIZE_T`($i)
  1496. $POP r25,`-12*8-7*$SIZE_T`($i)
  1497. $POP r26,`-12*8-6*$SIZE_T`($i)
  1498. $POP r27,`-12*8-5*$SIZE_T`($i)
  1499. $POP r28,`-12*8-4*$SIZE_T`($i)
  1500. $POP r29,`-12*8-3*$SIZE_T`($i)
  1501. $POP r30,`-12*8-2*$SIZE_T`($i)
  1502. $POP r31,`-12*8-1*$SIZE_T`($i)
  1503. lfd f20,`-12*8`($i)
  1504. lfd f21,`-11*8`($i)
  1505. lfd f22,`-10*8`($i)
  1506. lfd f23,`-9*8`($i)
  1507. lfd f24,`-8*8`($i)
  1508. lfd f25,`-7*8`($i)
  1509. lfd f26,`-6*8`($i)
  1510. lfd f27,`-5*8`($i)
  1511. lfd f28,`-4*8`($i)
  1512. lfd f29,`-3*8`($i)
  1513. lfd f30,`-2*8`($i)
  1514. lfd f31,`-1*8`($i)
  1515. mr $sp,$i
  1516. blr
  1517. .long 0
  1518. .byte 0,12,4,0,0x8c,13,6,0
  1519. .long 0
  1520. .size .$fname,.-.$fname
  1521. .asciz "Montgomery Multiplication for PPC64, CRYPTOGAMS by <appro\@openssl.org>"
  1522. ___
  1523. $code =~ s/\`([^\`]*)\`/eval $1/gem;
  1524. print $code;
  1525. close STDOUT;