2
0

sparct4-mont.pl 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228
  1. #! /usr/bin/env perl
  2. # Copyright 2012-2018 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. # ====================================================================
  9. # Written by David S. Miller and Andy Polyakov
  10. # The module is licensed under 2-clause BSD license.
  11. # November 2012. All rights reserved.
  12. # ====================================================================
  13. ######################################################################
  14. # Montgomery squaring-n-multiplication module for SPARC T4.
  15. #
  16. # The module consists of three parts:
  17. #
  18. # 1) collection of "single-op" subroutines that perform single
  19. # operation, Montgomery squaring or multiplication, on 512-,
  20. # 1024-, 1536- and 2048-bit operands;
  21. # 2) collection of "multi-op" subroutines that perform 5 squaring and
  22. # 1 multiplication operations on operands of above lengths;
  23. # 3) fall-back and helper VIS3 subroutines.
  24. #
  25. # RSA sign is dominated by multi-op subroutine, while RSA verify and
  26. # DSA - by single-op. Special note about 4096-bit RSA verify result.
  27. # Operands are too long for dedicated hardware and it's handled by
  28. # VIS3 code, which is why you don't see any improvement. It's surely
  29. # possible to improve it [by deploying 'mpmul' instruction], maybe in
  30. # the future...
  31. #
  32. # Performance improvement.
  33. #
  34. # 64-bit process, VIS3:
  35. # sign verify sign/s verify/s
  36. # rsa 1024 bits 0.000628s 0.000028s 1592.4 35434.4
  37. # rsa 2048 bits 0.003282s 0.000106s 304.7 9438.3
  38. # rsa 4096 bits 0.025866s 0.000340s 38.7 2940.9
  39. # dsa 1024 bits 0.000301s 0.000332s 3323.7 3013.9
  40. # dsa 2048 bits 0.001056s 0.001233s 946.9 810.8
  41. #
  42. # 64-bit process, this module:
  43. # sign verify sign/s verify/s
  44. # rsa 1024 bits 0.000256s 0.000016s 3904.4 61411.9
  45. # rsa 2048 bits 0.000946s 0.000029s 1056.8 34292.7
  46. # rsa 4096 bits 0.005061s 0.000340s 197.6 2940.5
  47. # dsa 1024 bits 0.000176s 0.000195s 5674.7 5130.5
  48. # dsa 2048 bits 0.000296s 0.000354s 3383.2 2827.6
  49. #
  50. ######################################################################
  51. # 32-bit process, VIS3:
  52. # sign verify sign/s verify/s
  53. # rsa 1024 bits 0.000665s 0.000028s 1504.8 35233.3
  54. # rsa 2048 bits 0.003349s 0.000106s 298.6 9433.4
  55. # rsa 4096 bits 0.025959s 0.000341s 38.5 2934.8
  56. # dsa 1024 bits 0.000320s 0.000341s 3123.3 2929.6
  57. # dsa 2048 bits 0.001101s 0.001260s 908.2 793.4
  58. #
  59. # 32-bit process, this module:
  60. # sign verify sign/s verify/s
  61. # rsa 1024 bits 0.000301s 0.000017s 3317.1 60240.0
  62. # rsa 2048 bits 0.001034s 0.000030s 966.9 33812.7
  63. # rsa 4096 bits 0.005244s 0.000341s 190.7 2935.4
  64. # dsa 1024 bits 0.000201s 0.000205s 4976.1 4879.2
  65. # dsa 2048 bits 0.000328s 0.000360s 3051.1 2774.2
  66. #
  67. # 32-bit code is prone to performance degradation as interrupt rate
  68. # dispatched to CPU executing the code grows. This is because in
  69. # standard process of handling interrupt in 32-bit process context
  70. # upper halves of most integer registers used as input or output are
  71. # zeroed. This renders result invalid, and operation has to be re-run.
  72. # If CPU is "bothered" with timer interrupts only, the penalty is
  73. # hardly measurable. But in order to mitigate this problem for higher
  74. # interrupt rates contemporary Linux kernel recognizes biased stack
  75. # even in 32-bit process context and preserves full register contents.
  76. # See http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=517ffce4e1a03aea979fe3a18a3dd1761a24fafb
  77. # for details.
  78. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  79. push(@INC,"${dir}","${dir}../../perlasm");
  80. require "sparcv9_modes.pl";
  81. $output = pop;
  82. open STDOUT,">$output";
  83. $code.=<<___;
  84. #include "sparc_arch.h"
  85. #ifdef __arch64__
  86. .register %g2,#scratch
  87. .register %g3,#scratch
  88. #endif
  89. .section ".text",#alloc,#execinstr
  90. #ifdef __PIC__
  91. SPARC_PIC_THUNK(%g1)
  92. #endif
  93. ___
  94. ########################################################################
  95. # Register layout for mont[mul|sqr] instructions.
  96. # For details see "Oracle SPARC Architecture 2011" manual at
  97. # http://www.oracle.com/technetwork/server-storage/sun-sparc-enterprise/documentation/.
  98. #
  99. my @R=map("%f".2*$_,(0..11,30,31,12..29));
  100. my @N=(map("%l$_",(0..7)),map("%o$_",(0..5))); @N=(@N,@N,@N[0..3]);
  101. my @A=(@N[0..13],@R[14..31]);
  102. my @B=(map("%i$_",(0..5)),map("%l$_",(0..7))); @B=(@B,@B,map("%o$_",(0..3)));
  103. ########################################################################
  104. # int bn_mul_mont_t4_$NUM(u64 *rp,const u64 *ap,const u64 *bp,
  105. # const u64 *np,const BN_ULONG *n0);
  106. #
  107. sub generate_bn_mul_mont_t4() {
  108. my $NUM=shift;
  109. my ($rp,$ap,$bp,$np,$sentinel)=map("%g$_",(1..5));
  110. $code.=<<___;
  111. .globl bn_mul_mont_t4_$NUM
  112. .align 32
  113. bn_mul_mont_t4_$NUM:
  114. #ifdef __arch64__
  115. mov 0,$sentinel
  116. mov -128,%g4
  117. #elif defined(SPARCV9_64BIT_STACK)
  118. SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
  119. ld [%g1+0],%g1 ! OPENSSL_sparcv9_P[0]
  120. mov -2047,%g4
  121. and %g1,SPARCV9_64BIT_STACK,%g1
  122. movrz %g1,0,%g4
  123. mov -1,$sentinel
  124. add %g4,-128,%g4
  125. #else
  126. mov -1,$sentinel
  127. mov -128,%g4
  128. #endif
  129. sllx $sentinel,32,$sentinel
  130. save %sp,%g4,%sp
  131. #ifndef __arch64__
  132. save %sp,-128,%sp ! warm it up
  133. save %sp,-128,%sp
  134. save %sp,-128,%sp
  135. save %sp,-128,%sp
  136. save %sp,-128,%sp
  137. save %sp,-128,%sp
  138. restore
  139. restore
  140. restore
  141. restore
  142. restore
  143. restore
  144. #endif
  145. and %sp,1,%g4
  146. or $sentinel,%fp,%fp
  147. or %g4,$sentinel,$sentinel
  148. ! copy arguments to global registers
  149. mov %i0,$rp
  150. mov %i1,$ap
  151. mov %i2,$bp
  152. mov %i3,$np
  153. ld [%i4+0],%f1 ! load *n0
  154. ld [%i4+4],%f0
  155. fsrc2 %f0,%f60
  156. ___
  157. # load ap[$NUM] ########################################################
  158. $code.=<<___;
  159. save %sp,-128,%sp; or $sentinel,%fp,%fp
  160. ___
  161. for($i=0; $i<14 && $i<$NUM; $i++) {
  162. my $lo=$i<13?@A[$i+1]:"%o7";
  163. $code.=<<___;
  164. ld [$ap+$i*8+0],$lo
  165. ld [$ap+$i*8+4],@A[$i]
  166. sllx @A[$i],32,@A[$i]
  167. or $lo,@A[$i],@A[$i]
  168. ___
  169. }
  170. for(; $i<$NUM; $i++) {
  171. my ($hi,$lo)=("%f".2*($i%4),"%f".(2*($i%4)+1));
  172. $code.=<<___;
  173. ld [$ap+$i*8+0],$lo
  174. ld [$ap+$i*8+4],$hi
  175. fsrc2 $hi,@A[$i]
  176. ___
  177. }
  178. # load np[$NUM] ########################################################
  179. $code.=<<___;
  180. save %sp,-128,%sp; or $sentinel,%fp,%fp
  181. ___
  182. for($i=0; $i<14 && $i<$NUM; $i++) {
  183. my $lo=$i<13?@N[$i+1]:"%o7";
  184. $code.=<<___;
  185. ld [$np+$i*8+0],$lo
  186. ld [$np+$i*8+4],@N[$i]
  187. sllx @N[$i],32,@N[$i]
  188. or $lo,@N[$i],@N[$i]
  189. ___
  190. }
  191. $code.=<<___;
  192. save %sp,-128,%sp; or $sentinel,%fp,%fp
  193. ___
  194. for(; $i<28 && $i<$NUM; $i++) {
  195. my $lo=$i<27?@N[$i+1]:"%o7";
  196. $code.=<<___;
  197. ld [$np+$i*8+0],$lo
  198. ld [$np+$i*8+4],@N[$i]
  199. sllx @N[$i],32,@N[$i]
  200. or $lo,@N[$i],@N[$i]
  201. ___
  202. }
  203. $code.=<<___;
  204. save %sp,-128,%sp; or $sentinel,%fp,%fp
  205. ___
  206. for(; $i<$NUM; $i++) {
  207. my $lo=($i<$NUM-1)?@N[$i+1]:"%o7";
  208. $code.=<<___;
  209. ld [$np+$i*8+0],$lo
  210. ld [$np+$i*8+4],@N[$i]
  211. sllx @N[$i],32,@N[$i]
  212. or $lo,@N[$i],@N[$i]
  213. ___
  214. }
  215. $code.=<<___;
  216. cmp $ap,$bp
  217. be SIZE_T_CC,.Lmsquare_$NUM
  218. nop
  219. ___
  220. # load bp[$NUM] ########################################################
  221. $code.=<<___;
  222. save %sp,-128,%sp; or $sentinel,%fp,%fp
  223. ___
  224. for($i=0; $i<14 && $i<$NUM; $i++) {
  225. my $lo=$i<13?@B[$i+1]:"%o7";
  226. $code.=<<___;
  227. ld [$bp+$i*8+0],$lo
  228. ld [$bp+$i*8+4],@B[$i]
  229. sllx @B[$i],32,@B[$i]
  230. or $lo,@B[$i],@B[$i]
  231. ___
  232. }
  233. $code.=<<___;
  234. save %sp,-128,%sp; or $sentinel,%fp,%fp
  235. ___
  236. for(; $i<$NUM; $i++) {
  237. my $lo=($i<$NUM-1)?@B[$i+1]:"%o7";
  238. $code.=<<___;
  239. ld [$bp+$i*8+0],$lo
  240. ld [$bp+$i*8+4],@B[$i]
  241. sllx @B[$i],32,@B[$i]
  242. or $lo,@B[$i],@B[$i]
  243. ___
  244. }
  245. # magic ################################################################
  246. $code.=<<___;
  247. .word 0x81b02920+$NUM-1 ! montmul $NUM-1
  248. .Lmresume_$NUM:
  249. fbu,pn %fcc3,.Lmabort_$NUM
  250. #ifndef __arch64__
  251. and %fp,$sentinel,$sentinel
  252. brz,pn $sentinel,.Lmabort_$NUM
  253. #endif
  254. nop
  255. #ifdef __arch64__
  256. restore
  257. restore
  258. restore
  259. restore
  260. restore
  261. #else
  262. restore; and %fp,$sentinel,$sentinel
  263. restore; and %fp,$sentinel,$sentinel
  264. restore; and %fp,$sentinel,$sentinel
  265. restore; and %fp,$sentinel,$sentinel
  266. brz,pn $sentinel,.Lmabort1_$NUM
  267. restore
  268. #endif
  269. ___
  270. # save tp[$NUM] ########################################################
  271. for($i=0; $i<14 && $i<$NUM; $i++) {
  272. $code.=<<___;
  273. movxtod @A[$i],@R[$i]
  274. ___
  275. }
  276. $code.=<<___;
  277. #ifdef __arch64__
  278. restore
  279. #else
  280. and %fp,$sentinel,$sentinel
  281. restore
  282. and $sentinel,1,%o7
  283. and %fp,$sentinel,$sentinel
  284. srl %fp,0,%fp ! just in case?
  285. or %o7,$sentinel,$sentinel
  286. brz,a,pn $sentinel,.Lmdone_$NUM
  287. mov 0,%i0 ! return failure
  288. #endif
  289. ___
  290. for($i=0; $i<12 && $i<$NUM; $i++) {
  291. @R[$i] =~ /%f([0-9]+)/;
  292. my $lo = "%f".($1+1);
  293. $code.=<<___;
  294. st $lo,[$rp+$i*8+0]
  295. st @R[$i],[$rp+$i*8+4]
  296. ___
  297. }
  298. for(; $i<$NUM; $i++) {
  299. my ($hi,$lo)=("%f".2*($i%4),"%f".(2*($i%4)+1));
  300. $code.=<<___;
  301. fsrc2 @R[$i],$hi
  302. st $lo,[$rp+$i*8+0]
  303. st $hi,[$rp+$i*8+4]
  304. ___
  305. }
  306. $code.=<<___;
  307. mov 1,%i0 ! return success
  308. .Lmdone_$NUM:
  309. ret
  310. restore
  311. .Lmabort_$NUM:
  312. restore
  313. restore
  314. restore
  315. restore
  316. restore
  317. .Lmabort1_$NUM:
  318. restore
  319. mov 0,%i0 ! return failure
  320. ret
  321. restore
  322. .align 32
  323. .Lmsquare_$NUM:
  324. save %sp,-128,%sp; or $sentinel,%fp,%fp
  325. save %sp,-128,%sp; or $sentinel,%fp,%fp
  326. .word 0x81b02940+$NUM-1 ! montsqr $NUM-1
  327. ba .Lmresume_$NUM
  328. nop
  329. .type bn_mul_mont_t4_$NUM, #function
  330. .size bn_mul_mont_t4_$NUM, .-bn_mul_mont_t4_$NUM
  331. ___
  332. }
  333. for ($i=8;$i<=32;$i+=8) {
  334. &generate_bn_mul_mont_t4($i);
  335. }
  336. ########################################################################
  337. #
  338. sub load_ccr {
  339. my ($ptbl,$pwr,$ccr,$skip_wr)=@_;
  340. $code.=<<___;
  341. srl $pwr, 2, %o4
  342. and $pwr, 3, %o5
  343. and %o4, 7, %o4
  344. sll %o5, 3, %o5 ! offset within first cache line
  345. add %o5, $ptbl, $ptbl ! of the pwrtbl
  346. or %g0, 1, %o5
  347. sll %o5, %o4, $ccr
  348. ___
  349. $code.=<<___ if (!$skip_wr);
  350. wr $ccr, %g0, %ccr
  351. ___
  352. }
  353. sub load_b_pair {
  354. my ($pwrtbl,$B0,$B1)=@_;
  355. $code.=<<___;
  356. ldx [$pwrtbl+0*32], $B0
  357. ldx [$pwrtbl+8*32], $B1
  358. ldx [$pwrtbl+1*32], %o4
  359. ldx [$pwrtbl+9*32], %o5
  360. movvs %icc, %o4, $B0
  361. ldx [$pwrtbl+2*32], %o4
  362. movvs %icc, %o5, $B1
  363. ldx [$pwrtbl+10*32],%o5
  364. move %icc, %o4, $B0
  365. ldx [$pwrtbl+3*32], %o4
  366. move %icc, %o5, $B1
  367. ldx [$pwrtbl+11*32],%o5
  368. movneg %icc, %o4, $B0
  369. ldx [$pwrtbl+4*32], %o4
  370. movneg %icc, %o5, $B1
  371. ldx [$pwrtbl+12*32],%o5
  372. movcs %xcc, %o4, $B0
  373. ldx [$pwrtbl+5*32],%o4
  374. movcs %xcc, %o5, $B1
  375. ldx [$pwrtbl+13*32],%o5
  376. movvs %xcc, %o4, $B0
  377. ldx [$pwrtbl+6*32], %o4
  378. movvs %xcc, %o5, $B1
  379. ldx [$pwrtbl+14*32],%o5
  380. move %xcc, %o4, $B0
  381. ldx [$pwrtbl+7*32], %o4
  382. move %xcc, %o5, $B1
  383. ldx [$pwrtbl+15*32],%o5
  384. movneg %xcc, %o4, $B0
  385. add $pwrtbl,16*32, $pwrtbl
  386. movneg %xcc, %o5, $B1
  387. ___
  388. }
  389. sub load_b {
  390. my ($pwrtbl,$Bi)=@_;
  391. $code.=<<___;
  392. ldx [$pwrtbl+0*32], $Bi
  393. ldx [$pwrtbl+1*32], %o4
  394. ldx [$pwrtbl+2*32], %o5
  395. movvs %icc, %o4, $Bi
  396. ldx [$pwrtbl+3*32], %o4
  397. move %icc, %o5, $Bi
  398. ldx [$pwrtbl+4*32], %o5
  399. movneg %icc, %o4, $Bi
  400. ldx [$pwrtbl+5*32], %o4
  401. movcs %xcc, %o5, $Bi
  402. ldx [$pwrtbl+6*32], %o5
  403. movvs %xcc, %o4, $Bi
  404. ldx [$pwrtbl+7*32], %o4
  405. move %xcc, %o5, $Bi
  406. add $pwrtbl,8*32, $pwrtbl
  407. movneg %xcc, %o4, $Bi
  408. ___
  409. }
  410. ########################################################################
  411. # int bn_pwr5_mont_t4_$NUM(u64 *tp,const u64 *np,const BN_ULONG *n0,
  412. # const u64 *pwrtbl,int pwr,int stride);
  413. #
  414. sub generate_bn_pwr5_mont_t4() {
  415. my $NUM=shift;
  416. my ($tp,$np,$pwrtbl,$pwr,$sentinel)=map("%g$_",(1..5));
  417. $code.=<<___;
  418. .globl bn_pwr5_mont_t4_$NUM
  419. .align 32
  420. bn_pwr5_mont_t4_$NUM:
  421. #ifdef __arch64__
  422. mov 0,$sentinel
  423. mov -128,%g4
  424. #elif defined(SPARCV9_64BIT_STACK)
  425. SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
  426. ld [%g1+0],%g1 ! OPENSSL_sparcv9_P[0]
  427. mov -2047,%g4
  428. and %g1,SPARCV9_64BIT_STACK,%g1
  429. movrz %g1,0,%g4
  430. mov -1,$sentinel
  431. add %g4,-128,%g4
  432. #else
  433. mov -1,$sentinel
  434. mov -128,%g4
  435. #endif
  436. sllx $sentinel,32,$sentinel
  437. save %sp,%g4,%sp
  438. #ifndef __arch64__
  439. save %sp,-128,%sp ! warm it up
  440. save %sp,-128,%sp
  441. save %sp,-128,%sp
  442. save %sp,-128,%sp
  443. save %sp,-128,%sp
  444. save %sp,-128,%sp
  445. restore
  446. restore
  447. restore
  448. restore
  449. restore
  450. restore
  451. #endif
  452. and %sp,1,%g4
  453. or $sentinel,%fp,%fp
  454. or %g4,$sentinel,$sentinel
  455. ! copy arguments to global registers
  456. mov %i0,$tp
  457. mov %i1,$np
  458. ld [%i2+0],%f1 ! load *n0
  459. ld [%i2+4],%f0
  460. mov %i3,$pwrtbl
  461. srl %i4,%g0,%i4 ! pack last arguments
  462. sllx %i5,32,$pwr
  463. or %i4,$pwr,$pwr
  464. fsrc2 %f0,%f60
  465. ___
  466. # load tp[$NUM] ########################################################
  467. $code.=<<___;
  468. save %sp,-128,%sp; or $sentinel,%fp,%fp
  469. ___
  470. for($i=0; $i<14 && $i<$NUM; $i++) {
  471. $code.=<<___;
  472. ldx [$tp+$i*8],@A[$i]
  473. ___
  474. }
  475. for(; $i<$NUM; $i++) {
  476. $code.=<<___;
  477. ldd [$tp+$i*8],@A[$i]
  478. ___
  479. }
  480. # load np[$NUM] ########################################################
  481. $code.=<<___;
  482. save %sp,-128,%sp; or $sentinel,%fp,%fp
  483. ___
  484. for($i=0; $i<14 && $i<$NUM; $i++) {
  485. $code.=<<___;
  486. ldx [$np+$i*8],@N[$i]
  487. ___
  488. }
  489. $code.=<<___;
  490. save %sp,-128,%sp; or $sentinel,%fp,%fp
  491. ___
  492. for(; $i<28 && $i<$NUM; $i++) {
  493. $code.=<<___;
  494. ldx [$np+$i*8],@N[$i]
  495. ___
  496. }
  497. $code.=<<___;
  498. save %sp,-128,%sp; or $sentinel,%fp,%fp
  499. ___
  500. for(; $i<$NUM; $i++) {
  501. $code.=<<___;
  502. ldx [$np+$i*8],@N[$i]
  503. ___
  504. }
  505. # load pwrtbl[pwr] ########################################################
  506. $code.=<<___;
  507. save %sp,-128,%sp; or $sentinel,%fp,%fp
  508. srlx $pwr, 32, %o4 ! unpack $pwr
  509. srl $pwr, %g0, %o5
  510. sub %o4, 5, %o4
  511. mov $pwrtbl, %o7
  512. sllx %o4, 32, $pwr ! re-pack $pwr
  513. or %o5, $pwr, $pwr
  514. srl %o5, %o4, %o5
  515. ___
  516. &load_ccr("%o7","%o5","%o4");
  517. $code.=<<___;
  518. b .Lstride_$NUM
  519. nop
  520. .align 16
  521. .Lstride_$NUM:
  522. ___
  523. for($i=0; $i<14 && $i<$NUM; $i+=2) {
  524. &load_b_pair("%o7",@B[$i],@B[$i+1]);
  525. }
  526. $code.=<<___;
  527. save %sp,-128,%sp; or $sentinel,%fp,%fp
  528. ___
  529. for(; $i<$NUM; $i+=2) {
  530. &load_b_pair("%i7",@B[$i],@B[$i+1]);
  531. }
  532. $code.=<<___;
  533. srax $pwr, 32, %o4 ! unpack $pwr
  534. srl $pwr, %g0, %o5
  535. sub %o4, 5, %o4
  536. mov $pwrtbl, %i7
  537. sllx %o4, 32, $pwr ! re-pack $pwr
  538. or %o5, $pwr, $pwr
  539. srl %o5, %o4, %o5
  540. ___
  541. &load_ccr("%i7","%o5","%o4",1);
  542. # magic ################################################################
  543. for($i=0; $i<5; $i++) {
  544. $code.=<<___;
  545. .word 0x81b02940+$NUM-1 ! montsqr $NUM-1
  546. fbu,pn %fcc3,.Labort_$NUM
  547. #ifndef __arch64__
  548. and %fp,$sentinel,$sentinel
  549. brz,pn $sentinel,.Labort_$NUM
  550. #endif
  551. nop
  552. ___
  553. }
  554. $code.=<<___;
  555. wr %o4, %g0, %ccr
  556. .word 0x81b02920+$NUM-1 ! montmul $NUM-1
  557. fbu,pn %fcc3,.Labort_$NUM
  558. #ifndef __arch64__
  559. and %fp,$sentinel,$sentinel
  560. brz,pn $sentinel,.Labort_$NUM
  561. #endif
  562. srax $pwr, 32, %o4
  563. #ifdef __arch64__
  564. brgez %o4,.Lstride_$NUM
  565. restore
  566. restore
  567. restore
  568. restore
  569. restore
  570. #else
  571. brgez %o4,.Lstride_$NUM
  572. restore; and %fp,$sentinel,$sentinel
  573. restore; and %fp,$sentinel,$sentinel
  574. restore; and %fp,$sentinel,$sentinel
  575. restore; and %fp,$sentinel,$sentinel
  576. brz,pn $sentinel,.Labort1_$NUM
  577. restore
  578. #endif
  579. ___
  580. # save tp[$NUM] ########################################################
  581. for($i=0; $i<14 && $i<$NUM; $i++) {
  582. $code.=<<___;
  583. movxtod @A[$i],@R[$i]
  584. ___
  585. }
  586. $code.=<<___;
  587. #ifdef __arch64__
  588. restore
  589. #else
  590. and %fp,$sentinel,$sentinel
  591. restore
  592. and $sentinel,1,%o7
  593. and %fp,$sentinel,$sentinel
  594. srl %fp,0,%fp ! just in case?
  595. or %o7,$sentinel,$sentinel
  596. brz,a,pn $sentinel,.Ldone_$NUM
  597. mov 0,%i0 ! return failure
  598. #endif
  599. ___
  600. for($i=0; $i<$NUM; $i++) {
  601. $code.=<<___;
  602. std @R[$i],[$tp+$i*8]
  603. ___
  604. }
  605. $code.=<<___;
  606. mov 1,%i0 ! return success
  607. .Ldone_$NUM:
  608. ret
  609. restore
  610. .Labort_$NUM:
  611. restore
  612. restore
  613. restore
  614. restore
  615. restore
  616. .Labort1_$NUM:
  617. restore
  618. mov 0,%i0 ! return failure
  619. ret
  620. restore
  621. .type bn_pwr5_mont_t4_$NUM, #function
  622. .size bn_pwr5_mont_t4_$NUM, .-bn_pwr5_mont_t4_$NUM
  623. ___
  624. }
  625. for ($i=8;$i<=32;$i+=8) {
  626. &generate_bn_pwr5_mont_t4($i);
  627. }
  628. {
  629. ########################################################################
  630. # Fall-back subroutines
  631. #
  632. # copy of bn_mul_mont_vis3 adjusted for vectors of 64-bit values
  633. #
  634. ($n0,$m0,$m1,$lo0,$hi0, $lo1,$hi1,$aj,$alo,$nj,$nlo,$tj)=
  635. (map("%g$_",(1..5)),map("%o$_",(0..5,7)));
  636. # int bn_mul_mont(
  637. $rp="%o0"; # u64 *rp,
  638. $ap="%o1"; # const u64 *ap,
  639. $bp="%o2"; # const u64 *bp,
  640. $np="%o3"; # const u64 *np,
  641. $n0p="%o4"; # const BN_ULONG *n0,
  642. $num="%o5"; # int num); # caller ensures that num is >=3
  643. $code.=<<___;
  644. .globl bn_mul_mont_t4
  645. .align 32
  646. bn_mul_mont_t4:
  647. add %sp, STACK_BIAS, %g4 ! real top of stack
  648. sll $num, 3, $num ! size in bytes
  649. add $num, 63, %g1
  650. andn %g1, 63, %g1 ! buffer size rounded up to 64 bytes
  651. sub %g4, %g1, %g1
  652. andn %g1, 63, %g1 ! align at 64 byte
  653. sub %g1, STACK_FRAME, %g1 ! new top of stack
  654. sub %g1, %g4, %g1
  655. save %sp, %g1, %sp
  656. ___
  657. # +-------------------------------+<----- %sp
  658. # . .
  659. # +-------------------------------+<----- aligned at 64 bytes
  660. # | __int64 tmp[0] |
  661. # +-------------------------------+
  662. # . .
  663. # . .
  664. # +-------------------------------+<----- aligned at 64 bytes
  665. # . .
  666. ($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5));
  667. ($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz)=map("%l$_",(0..7));
  668. ($ovf,$i)=($t0,$t1);
  669. $code.=<<___;
  670. ld [$n0p+0], $t0 ! pull n0[0..1] value
  671. ld [$n0p+4], $t1
  672. add %sp, STACK_BIAS+STACK_FRAME, $tp
  673. ldx [$bp+0], $m0 ! m0=bp[0]
  674. sllx $t1, 32, $n0
  675. add $bp, 8, $bp
  676. or $t0, $n0, $n0
  677. ldx [$ap+0], $aj ! ap[0]
  678. mulx $aj, $m0, $lo0 ! ap[0]*bp[0]
  679. umulxhi $aj, $m0, $hi0
  680. ldx [$ap+8], $aj ! ap[1]
  681. add $ap, 16, $ap
  682. ldx [$np+0], $nj ! np[0]
  683. mulx $lo0, $n0, $m1 ! "tp[0]"*n0
  684. mulx $aj, $m0, $alo ! ap[1]*bp[0]
  685. umulxhi $aj, $m0, $aj ! ahi=aj
  686. mulx $nj, $m1, $lo1 ! np[0]*m1
  687. umulxhi $nj, $m1, $hi1
  688. ldx [$np+8], $nj ! np[1]
  689. addcc $lo0, $lo1, $lo1
  690. add $np, 16, $np
  691. addxc %g0, $hi1, $hi1
  692. mulx $nj, $m1, $nlo ! np[1]*m1
  693. umulxhi $nj, $m1, $nj ! nhi=nj
  694. ba .L1st
  695. sub $num, 24, $cnt ! cnt=num-3
  696. .align 16
  697. .L1st:
  698. addcc $alo, $hi0, $lo0
  699. addxc $aj, %g0, $hi0
  700. ldx [$ap+0], $aj ! ap[j]
  701. addcc $nlo, $hi1, $lo1
  702. add $ap, 8, $ap
  703. addxc $nj, %g0, $hi1 ! nhi=nj
  704. ldx [$np+0], $nj ! np[j]
  705. mulx $aj, $m0, $alo ! ap[j]*bp[0]
  706. add $np, 8, $np
  707. umulxhi $aj, $m0, $aj ! ahi=aj
  708. mulx $nj, $m1, $nlo ! np[j]*m1
  709. addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
  710. umulxhi $nj, $m1, $nj ! nhi=nj
  711. addxc %g0, $hi1, $hi1
  712. stxa $lo1, [$tp]0xe2 ! tp[j-1]
  713. add $tp, 8, $tp ! tp++
  714. brnz,pt $cnt, .L1st
  715. sub $cnt, 8, $cnt ! j--
  716. !.L1st
  717. addcc $alo, $hi0, $lo0
  718. addxc $aj, %g0, $hi0 ! ahi=aj
  719. addcc $nlo, $hi1, $lo1
  720. addxc $nj, %g0, $hi1
  721. addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
  722. addxc %g0, $hi1, $hi1
  723. stxa $lo1, [$tp]0xe2 ! tp[j-1]
  724. add $tp, 8, $tp
  725. addcc $hi0, $hi1, $hi1
  726. addxc %g0, %g0, $ovf ! upmost overflow bit
  727. stxa $hi1, [$tp]0xe2
  728. add $tp, 8, $tp
  729. ba .Louter
  730. sub $num, 16, $i ! i=num-2
  731. .align 16
  732. .Louter:
  733. ldx [$bp+0], $m0 ! m0=bp[i]
  734. add $bp, 8, $bp
  735. sub $ap, $num, $ap ! rewind
  736. sub $np, $num, $np
  737. sub $tp, $num, $tp
  738. ldx [$ap+0], $aj ! ap[0]
  739. ldx [$np+0], $nj ! np[0]
  740. mulx $aj, $m0, $lo0 ! ap[0]*bp[i]
  741. ldx [$tp], $tj ! tp[0]
  742. umulxhi $aj, $m0, $hi0
  743. ldx [$ap+8], $aj ! ap[1]
  744. addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0]
  745. mulx $aj, $m0, $alo ! ap[1]*bp[i]
  746. addxc %g0, $hi0, $hi0
  747. mulx $lo0, $n0, $m1 ! tp[0]*n0
  748. umulxhi $aj, $m0, $aj ! ahi=aj
  749. mulx $nj, $m1, $lo1 ! np[0]*m1
  750. add $ap, 16, $ap
  751. umulxhi $nj, $m1, $hi1
  752. ldx [$np+8], $nj ! np[1]
  753. add $np, 16, $np
  754. addcc $lo1, $lo0, $lo1
  755. mulx $nj, $m1, $nlo ! np[1]*m1
  756. addxc %g0, $hi1, $hi1
  757. umulxhi $nj, $m1, $nj ! nhi=nj
  758. ba .Linner
  759. sub $num, 24, $cnt ! cnt=num-3
  760. .align 16
  761. .Linner:
  762. addcc $alo, $hi0, $lo0
  763. ldx [$tp+8], $tj ! tp[j]
  764. addxc $aj, %g0, $hi0 ! ahi=aj
  765. ldx [$ap+0], $aj ! ap[j]
  766. add $ap, 8, $ap
  767. addcc $nlo, $hi1, $lo1
  768. mulx $aj, $m0, $alo ! ap[j]*bp[i]
  769. addxc $nj, %g0, $hi1 ! nhi=nj
  770. ldx [$np+0], $nj ! np[j]
  771. add $np, 8, $np
  772. umulxhi $aj, $m0, $aj ! ahi=aj
  773. addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
  774. mulx $nj, $m1, $nlo ! np[j]*m1
  775. addxc %g0, $hi0, $hi0
  776. umulxhi $nj, $m1, $nj ! nhi=nj
  777. addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
  778. addxc %g0, $hi1, $hi1
  779. stx $lo1, [$tp] ! tp[j-1]
  780. add $tp, 8, $tp
  781. brnz,pt $cnt, .Linner
  782. sub $cnt, 8, $cnt
  783. !.Linner
  784. ldx [$tp+8], $tj ! tp[j]
  785. addcc $alo, $hi0, $lo0
  786. addxc $aj, %g0, $hi0 ! ahi=aj
  787. addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
  788. addxc %g0, $hi0, $hi0
  789. addcc $nlo, $hi1, $lo1
  790. addxc $nj, %g0, $hi1 ! nhi=nj
  791. addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
  792. addxc %g0, $hi1, $hi1
  793. stx $lo1, [$tp] ! tp[j-1]
  794. subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc
  795. addxccc $hi1, $hi0, $hi1
  796. addxc %g0, %g0, $ovf
  797. stx $hi1, [$tp+8]
  798. add $tp, 16, $tp
  799. brnz,pt $i, .Louter
  800. sub $i, 8, $i
  801. sub $ap, $num, $ap ! rewind
  802. sub $np, $num, $np
  803. sub $tp, $num, $tp
  804. ba .Lsub
  805. subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc
  806. .align 16
  807. .Lsub:
  808. ldx [$tp], $tj
  809. add $tp, 8, $tp
  810. ldx [$np+0], $nj
  811. add $np, 8, $np
  812. subccc $tj, $nj, $t2 ! tp[j]-np[j]
  813. srlx $tj, 32, $tj
  814. srlx $nj, 32, $nj
  815. subccc $tj, $nj, $t3
  816. add $rp, 8, $rp
  817. st $t2, [$rp-4] ! reverse order
  818. st $t3, [$rp-8]
  819. brnz,pt $cnt, .Lsub
  820. sub $cnt, 8, $cnt
  821. sub $np, $num, $np ! rewind
  822. sub $tp, $num, $tp
  823. sub $rp, $num, $rp
  824. subccc $ovf, %g0, $ovf ! handle upmost overflow bit
  825. ba .Lcopy
  826. sub $num, 8, $cnt
  827. .align 16
  828. .Lcopy: ! conditional copy
  829. ldx [$tp], $tj
  830. ldx [$rp+0], $t2
  831. stx %g0, [$tp] ! zap
  832. add $tp, 8, $tp
  833. movcs %icc, $tj, $t2
  834. stx $t2, [$rp+0]
  835. add $rp, 8, $rp
  836. brnz $cnt, .Lcopy
  837. sub $cnt, 8, $cnt
  838. mov 1, %o0
  839. ret
  840. restore
  841. .type bn_mul_mont_t4, #function
  842. .size bn_mul_mont_t4, .-bn_mul_mont_t4
  843. ___
  844. # int bn_mul_mont_gather5(
  845. $rp="%o0"; # u64 *rp,
  846. $ap="%o1"; # const u64 *ap,
  847. $bp="%o2"; # const u64 *pwrtbl,
  848. $np="%o3"; # const u64 *np,
  849. $n0p="%o4"; # const BN_ULONG *n0,
  850. $num="%o5"; # int num, # caller ensures that num is >=3
  851. # int power);
  852. $code.=<<___;
  853. .globl bn_mul_mont_gather5_t4
  854. .align 32
  855. bn_mul_mont_gather5_t4:
  856. add %sp, STACK_BIAS, %g4 ! real top of stack
  857. sll $num, 3, $num ! size in bytes
  858. add $num, 63, %g1
  859. andn %g1, 63, %g1 ! buffer size rounded up to 64 bytes
  860. sub %g4, %g1, %g1
  861. andn %g1, 63, %g1 ! align at 64 byte
  862. sub %g1, STACK_FRAME, %g1 ! new top of stack
  863. sub %g1, %g4, %g1
  864. LDPTR [%sp+STACK_7thARG], %g4 ! load power, 7th argument
  865. save %sp, %g1, %sp
  866. ___
  867. # +-------------------------------+<----- %sp
  868. # . .
  869. # +-------------------------------+<----- aligned at 64 bytes
  870. # | __int64 tmp[0] |
  871. # +-------------------------------+
  872. # . .
  873. # . .
  874. # +-------------------------------+<----- aligned at 64 bytes
  875. # . .
  876. ($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5));
  877. ($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz,$ccr)=map("%l$_",(0..7));
  878. ($ovf,$i)=($t0,$t1);
  879. &load_ccr($bp,"%g4",$ccr);
  880. &load_b($bp,$m0,"%o7"); # m0=bp[0]
  881. $code.=<<___;
  882. ld [$n0p+0], $t0 ! pull n0[0..1] value
  883. ld [$n0p+4], $t1
  884. add %sp, STACK_BIAS+STACK_FRAME, $tp
  885. sllx $t1, 32, $n0
  886. or $t0, $n0, $n0
  887. ldx [$ap+0], $aj ! ap[0]
  888. mulx $aj, $m0, $lo0 ! ap[0]*bp[0]
  889. umulxhi $aj, $m0, $hi0
  890. ldx [$ap+8], $aj ! ap[1]
  891. add $ap, 16, $ap
  892. ldx [$np+0], $nj ! np[0]
  893. mulx $lo0, $n0, $m1 ! "tp[0]"*n0
  894. mulx $aj, $m0, $alo ! ap[1]*bp[0]
  895. umulxhi $aj, $m0, $aj ! ahi=aj
  896. mulx $nj, $m1, $lo1 ! np[0]*m1
  897. umulxhi $nj, $m1, $hi1
  898. ldx [$np+8], $nj ! np[1]
  899. addcc $lo0, $lo1, $lo1
  900. add $np, 16, $np
  901. addxc %g0, $hi1, $hi1
  902. mulx $nj, $m1, $nlo ! np[1]*m1
  903. umulxhi $nj, $m1, $nj ! nhi=nj
  904. ba .L1st_g5
  905. sub $num, 24, $cnt ! cnt=num-3
  906. .align 16
  907. .L1st_g5:
  908. addcc $alo, $hi0, $lo0
  909. addxc $aj, %g0, $hi0
  910. ldx [$ap+0], $aj ! ap[j]
  911. addcc $nlo, $hi1, $lo1
  912. add $ap, 8, $ap
  913. addxc $nj, %g0, $hi1 ! nhi=nj
  914. ldx [$np+0], $nj ! np[j]
  915. mulx $aj, $m0, $alo ! ap[j]*bp[0]
  916. add $np, 8, $np
  917. umulxhi $aj, $m0, $aj ! ahi=aj
  918. mulx $nj, $m1, $nlo ! np[j]*m1
  919. addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
  920. umulxhi $nj, $m1, $nj ! nhi=nj
  921. addxc %g0, $hi1, $hi1
  922. stxa $lo1, [$tp]0xe2 ! tp[j-1]
  923. add $tp, 8, $tp ! tp++
  924. brnz,pt $cnt, .L1st_g5
  925. sub $cnt, 8, $cnt ! j--
  926. !.L1st_g5
  927. addcc $alo, $hi0, $lo0
  928. addxc $aj, %g0, $hi0 ! ahi=aj
  929. addcc $nlo, $hi1, $lo1
  930. addxc $nj, %g0, $hi1
  931. addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
  932. addxc %g0, $hi1, $hi1
  933. stxa $lo1, [$tp]0xe2 ! tp[j-1]
  934. add $tp, 8, $tp
  935. addcc $hi0, $hi1, $hi1
  936. addxc %g0, %g0, $ovf ! upmost overflow bit
  937. stxa $hi1, [$tp]0xe2
  938. add $tp, 8, $tp
  939. ba .Louter_g5
  940. sub $num, 16, $i ! i=num-2
  941. .align 16
  942. .Louter_g5:
  943. wr $ccr, %g0, %ccr
  944. ___
  945. &load_b($bp,$m0); # m0=bp[i]
  946. $code.=<<___;
  947. sub $ap, $num, $ap ! rewind
  948. sub $np, $num, $np
  949. sub $tp, $num, $tp
  950. ldx [$ap+0], $aj ! ap[0]
  951. ldx [$np+0], $nj ! np[0]
  952. mulx $aj, $m0, $lo0 ! ap[0]*bp[i]
  953. ldx [$tp], $tj ! tp[0]
  954. umulxhi $aj, $m0, $hi0
  955. ldx [$ap+8], $aj ! ap[1]
  956. addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0]
  957. mulx $aj, $m0, $alo ! ap[1]*bp[i]
  958. addxc %g0, $hi0, $hi0
  959. mulx $lo0, $n0, $m1 ! tp[0]*n0
  960. umulxhi $aj, $m0, $aj ! ahi=aj
  961. mulx $nj, $m1, $lo1 ! np[0]*m1
  962. add $ap, 16, $ap
  963. umulxhi $nj, $m1, $hi1
  964. ldx [$np+8], $nj ! np[1]
  965. add $np, 16, $np
  966. addcc $lo1, $lo0, $lo1
  967. mulx $nj, $m1, $nlo ! np[1]*m1
  968. addxc %g0, $hi1, $hi1
  969. umulxhi $nj, $m1, $nj ! nhi=nj
  970. ba .Linner_g5
  971. sub $num, 24, $cnt ! cnt=num-3
  972. .align 16
  973. .Linner_g5:
  974. addcc $alo, $hi0, $lo0
  975. ldx [$tp+8], $tj ! tp[j]
  976. addxc $aj, %g0, $hi0 ! ahi=aj
  977. ldx [$ap+0], $aj ! ap[j]
  978. add $ap, 8, $ap
  979. addcc $nlo, $hi1, $lo1
  980. mulx $aj, $m0, $alo ! ap[j]*bp[i]
  981. addxc $nj, %g0, $hi1 ! nhi=nj
  982. ldx [$np+0], $nj ! np[j]
  983. add $np, 8, $np
  984. umulxhi $aj, $m0, $aj ! ahi=aj
  985. addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
  986. mulx $nj, $m1, $nlo ! np[j]*m1
  987. addxc %g0, $hi0, $hi0
  988. umulxhi $nj, $m1, $nj ! nhi=nj
  989. addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
  990. addxc %g0, $hi1, $hi1
  991. stx $lo1, [$tp] ! tp[j-1]
  992. add $tp, 8, $tp
  993. brnz,pt $cnt, .Linner_g5
  994. sub $cnt, 8, $cnt
  995. !.Linner_g5
  996. ldx [$tp+8], $tj ! tp[j]
  997. addcc $alo, $hi0, $lo0
  998. addxc $aj, %g0, $hi0 ! ahi=aj
  999. addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
  1000. addxc %g0, $hi0, $hi0
  1001. addcc $nlo, $hi1, $lo1
  1002. addxc $nj, %g0, $hi1 ! nhi=nj
  1003. addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
  1004. addxc %g0, $hi1, $hi1
  1005. stx $lo1, [$tp] ! tp[j-1]
  1006. subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc
  1007. addxccc $hi1, $hi0, $hi1
  1008. addxc %g0, %g0, $ovf
  1009. stx $hi1, [$tp+8]
  1010. add $tp, 16, $tp
  1011. brnz,pt $i, .Louter_g5
  1012. sub $i, 8, $i
  1013. sub $ap, $num, $ap ! rewind
  1014. sub $np, $num, $np
  1015. sub $tp, $num, $tp
  1016. ba .Lsub_g5
  1017. subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc
  1018. .align 16
  1019. .Lsub_g5:
  1020. ldx [$tp], $tj
  1021. add $tp, 8, $tp
  1022. ldx [$np+0], $nj
  1023. add $np, 8, $np
  1024. subccc $tj, $nj, $t2 ! tp[j]-np[j]
  1025. srlx $tj, 32, $tj
  1026. srlx $nj, 32, $nj
  1027. subccc $tj, $nj, $t3
  1028. add $rp, 8, $rp
  1029. st $t2, [$rp-4] ! reverse order
  1030. st $t3, [$rp-8]
  1031. brnz,pt $cnt, .Lsub_g5
  1032. sub $cnt, 8, $cnt
  1033. sub $np, $num, $np ! rewind
  1034. sub $tp, $num, $tp
  1035. sub $rp, $num, $rp
  1036. subccc $ovf, %g0, $ovf ! handle upmost overflow bit
  1037. ba .Lcopy_g5
  1038. sub $num, 8, $cnt
  1039. .align 16
  1040. .Lcopy_g5: ! conditional copy
  1041. ldx [$tp], $tj
  1042. ldx [$rp+0], $t2
  1043. stx %g0, [$tp] ! zap
  1044. add $tp, 8, $tp
  1045. movcs %icc, $tj, $t2
  1046. stx $t2, [$rp+0]
  1047. add $rp, 8, $rp
  1048. brnz $cnt, .Lcopy_g5
  1049. sub $cnt, 8, $cnt
  1050. mov 1, %o0
  1051. ret
  1052. restore
  1053. .type bn_mul_mont_gather5_t4, #function
  1054. .size bn_mul_mont_gather5_t4, .-bn_mul_mont_gather5_t4
  1055. ___
  1056. }
  1057. $code.=<<___;
  1058. .globl bn_flip_t4
  1059. .align 32
  1060. bn_flip_t4:
  1061. .Loop_flip:
  1062. ld [%o1+0], %o4
  1063. sub %o2, 1, %o2
  1064. ld [%o1+4], %o5
  1065. add %o1, 8, %o1
  1066. st %o5, [%o0+0]
  1067. st %o4, [%o0+4]
  1068. brnz %o2, .Loop_flip
  1069. add %o0, 8, %o0
  1070. retl
  1071. nop
  1072. .type bn_flip_t4, #function
  1073. .size bn_flip_t4, .-bn_flip_t4
  1074. .globl bn_flip_n_scatter5_t4
  1075. .align 32
  1076. bn_flip_n_scatter5_t4:
  1077. sll %o3, 3, %o3
  1078. srl %o1, 1, %o1
  1079. add %o3, %o2, %o2 ! &pwrtbl[pwr]
  1080. sub %o1, 1, %o1
  1081. .Loop_flip_n_scatter5:
  1082. ld [%o0+0], %o4 ! inp[i]
  1083. ld [%o0+4], %o5
  1084. add %o0, 8, %o0
  1085. sllx %o5, 32, %o5
  1086. or %o4, %o5, %o5
  1087. stx %o5, [%o2]
  1088. add %o2, 32*8, %o2
  1089. brnz %o1, .Loop_flip_n_scatter5
  1090. sub %o1, 1, %o1
  1091. retl
  1092. nop
  1093. .type bn_flip_n_scatter5_t4, #function
  1094. .size bn_flip_n_scatter5_t4, .-bn_flip_n_scatter5_t4
  1095. .globl bn_gather5_t4
  1096. .align 32
  1097. bn_gather5_t4:
  1098. ___
  1099. &load_ccr("%o2","%o3","%g1");
  1100. $code.=<<___;
  1101. sub %o1, 1, %o1
  1102. .Loop_gather5:
  1103. ___
  1104. &load_b("%o2","%g1");
  1105. $code.=<<___;
  1106. stx %g1, [%o0]
  1107. add %o0, 8, %o0
  1108. brnz %o1, .Loop_gather5
  1109. sub %o1, 1, %o1
  1110. retl
  1111. nop
  1112. .type bn_gather5_t4, #function
  1113. .size bn_gather5_t4, .-bn_gather5_t4
  1114. .asciz "Montgomery Multiplication for SPARC T4, David S. Miller, Andy Polyakov"
  1115. .align 4
  1116. ___
  1117. &emit_assembler();
  1118. close STDOUT;