vis3-mont.pl 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. #!/usr/bin/env perl
  2. # ====================================================================
  3. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  4. # project. The module is, however, dual licensed under OpenSSL and
  5. # CRYPTOGAMS licenses depending on where you obtain it. For further
  6. # details see http://www.openssl.org/~appro/cryptogams/.
  7. # ====================================================================
  8. # October 2012.
  9. #
  10. # SPARCv9 VIS3 Montgomery multiplicaion procedure suitable for T3 and
  11. # onward. There are three new instructions used here: umulxhi,
  12. # addxc[cc] and initializing store. On T3 RSA private key operations
  13. # are 1.54/1.87/2.11/2.26 times faster for 512/1024/2048/4096-bit key
  14. # lengths. This is without dedicated squaring procedure. On T4
  15. # corresponding coefficients are 1.47/2.10/2.80/2.90x, which is mostly
  16. # for reference purposes, because T4 has dedicated Montgomery
  17. # multiplication and squaring *instructions* that deliver even more.
  18. $bits=32;
  19. for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
  20. if ($bits==64) { $bias=2047; $frame=192; }
  21. else { $bias=0; $frame=112; }
  22. $code.=<<___ if ($bits==64);
  23. .register %g2,#scratch
  24. .register %g3,#scratch
  25. ___
  26. $code.=<<___;
  27. .section ".text",#alloc,#execinstr
  28. ___
  29. ($n0,$m0,$m1,$lo0,$hi0, $lo1,$hi1,$aj,$alo,$nj,$nlo,$tj)=
  30. (map("%g$_",(1..5)),map("%o$_",(0..5,7)));
  31. # int bn_mul_mont(
  32. $rp="%o0"; # BN_ULONG *rp,
  33. $ap="%o1"; # const BN_ULONG *ap,
  34. $bp="%o2"; # const BN_ULONG *bp,
  35. $np="%o3"; # const BN_ULONG *np,
  36. $n0p="%o4"; # const BN_ULONG *n0,
  37. $num="%o5"; # int num); # caller ensures that num is even
  38. # and >=6
  39. $code.=<<___;
  40. .globl bn_mul_mont_vis3
  41. .align 32
  42. bn_mul_mont_vis3:
  43. add %sp, $bias, %g4 ! real top of stack
  44. sll $num, 2, $num ! size in bytes
  45. add $num, 63, %g5
  46. andn %g5, 63, %g5 ! buffer size rounded up to 64 bytes
  47. add %g5, %g5, %g1
  48. add %g5, %g1, %g1 ! 3*buffer size
  49. sub %g4, %g1, %g1
  50. andn %g1, 63, %g1 ! align at 64 byte
  51. sub %g1, $frame, %g1 ! new top of stack
  52. sub %g1, %g4, %g1
  53. save %sp, %g1, %sp
  54. ___
  55. # +-------------------------------+<----- %sp
  56. # . .
  57. # +-------------------------------+<----- aligned at 64 bytes
  58. # | __int64 tmp[0] |
  59. # +-------------------------------+
  60. # . .
  61. # . .
  62. # +-------------------------------+<----- aligned at 64 bytes
  63. # | __int64 ap[1..0] | converted ap[]
  64. # +-------------------------------+
  65. # | __int64 np[1..0] | converted np[]
  66. # +-------------------------------+
  67. # | __int64 ap[3..2] |
  68. # . .
  69. # . .
  70. # +-------------------------------+
  71. ($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5));
  72. ($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz,$anp)=map("%l$_",(0..7));
  73. ($ovf,$i)=($t0,$t1);
  74. $code.=<<___;
  75. ld [$n0p+0], $t0 ! pull n0[0..1] value
  76. add %sp, $bias+$frame, $tp
  77. ld [$n0p+4], $t1
  78. add $tp, %g5, $anp
  79. ld [$bp+0], $t2 ! m0=bp[0]
  80. sllx $t1, 32, $n0
  81. ld [$bp+4], $t3
  82. or $t0, $n0, $n0
  83. add $bp, 8, $bp
  84. ld [$ap+0], $t0 ! ap[0]
  85. sllx $t3, 32, $m0
  86. ld [$ap+4], $t1
  87. or $t2, $m0, $m0
  88. ld [$ap+8], $t2 ! ap[1]
  89. sllx $t1, 32, $aj
  90. ld [$ap+12], $t3
  91. or $t0, $aj, $aj
  92. add $ap, 16, $ap
  93. stx $aj, [$anp] ! converted ap[0]
  94. mulx $aj, $m0, $lo0 ! ap[0]*bp[0]
  95. umulxhi $aj, $m0, $hi0
  96. ld [$np+0], $t0 ! np[0]
  97. sllx $t3, 32, $aj
  98. ld [$np+4], $t1
  99. or $t2, $aj, $aj
  100. ld [$np+8], $t2 ! np[1]
  101. sllx $t1, 32, $nj
  102. ld [$np+12], $t3
  103. or $t0, $nj, $nj
  104. add $np, 16, $np
  105. stx $nj, [$anp+8] ! converted np[0]
  106. mulx $lo0, $n0, $m1 ! "tp[0]"*n0
  107. stx $aj, [$anp+16] ! converted ap[1]
  108. mulx $aj, $m0, $alo ! ap[1]*bp[0]
  109. umulxhi $aj, $m0, $aj ! ahi=aj
  110. mulx $nj, $m1, $lo1 ! np[0]*m1
  111. umulxhi $nj, $m1, $hi1
  112. sllx $t3, 32, $nj
  113. or $t2, $nj, $nj
  114. stx $nj, [$anp+24] ! converted np[1]
  115. add $anp, 32, $anp
  116. addcc $lo0, $lo1, $lo1
  117. addxc %g0, $hi1, $hi1
  118. mulx $nj, $m1, $nlo ! np[1]*m1
  119. umulxhi $nj, $m1, $nj ! nhi=nj
  120. ba .L1st
  121. sub $num, 24, $cnt ! cnt=num-3
  122. .align 16
  123. .L1st:
  124. ld [$ap+0], $t0 ! ap[j]
  125. addcc $alo, $hi0, $lo0
  126. ld [$ap+4], $t1
  127. addxc $aj, %g0, $hi0
  128. sllx $t1, 32, $aj
  129. add $ap, 8, $ap
  130. or $t0, $aj, $aj
  131. stx $aj, [$anp] ! converted ap[j]
  132. ld [$np+0], $t2 ! np[j]
  133. addcc $nlo, $hi1, $lo1
  134. ld [$np+4], $t3
  135. addxc $nj, %g0, $hi1 ! nhi=nj
  136. sllx $t3, 32, $nj
  137. add $np, 8, $np
  138. mulx $aj, $m0, $alo ! ap[j]*bp[0]
  139. or $t2, $nj, $nj
  140. umulxhi $aj, $m0, $aj ! ahi=aj
  141. stx $nj, [$anp+8] ! converted np[j]
  142. add $anp, 16, $anp ! anp++
  143. mulx $nj, $m1, $nlo ! np[j]*m1
  144. addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
  145. umulxhi $nj, $m1, $nj ! nhi=nj
  146. addxc %g0, $hi1, $hi1
  147. stx $lo1, [$tp] ! tp[j-1]
  148. add $tp, 8, $tp ! tp++
  149. brnz,pt $cnt, .L1st
  150. sub $cnt, 8, $cnt ! j--
  151. !.L1st
  152. addcc $alo, $hi0, $lo0
  153. addxc $aj, %g0, $hi0 ! ahi=aj
  154. addcc $nlo, $hi1, $lo1
  155. addxc $nj, %g0, $hi1
  156. addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
  157. addxc %g0, $hi1, $hi1
  158. stx $lo1, [$tp] ! tp[j-1]
  159. add $tp, 8, $tp
  160. addcc $hi0, $hi1, $hi1
  161. addxc %g0, %g0, $ovf ! upmost overflow bit
  162. stx $hi1, [$tp]
  163. add $tp, 8, $tp
  164. ba .Louter
  165. sub $num, 16, $i ! i=num-2
  166. .align 16
  167. .Louter:
  168. ld [$bp+0], $t2 ! m0=bp[i]
  169. ld [$bp+4], $t3
  170. sub $anp, $num, $anp ! rewind
  171. sub $tp, $num, $tp
  172. sub $anp, $num, $anp
  173. add $bp, 8, $bp
  174. sllx $t3, 32, $m0
  175. ldx [$anp+0], $aj ! ap[0]
  176. or $t2, $m0, $m0
  177. ldx [$anp+8], $nj ! np[0]
  178. mulx $aj, $m0, $lo0 ! ap[0]*bp[i]
  179. ldx [$tp], $tj ! tp[0]
  180. umulxhi $aj, $m0, $hi0
  181. ldx [$anp+16], $aj ! ap[1]
  182. addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0]
  183. mulx $aj, $m0, $alo ! ap[1]*bp[i]
  184. addxc %g0, $hi0, $hi0
  185. mulx $lo0, $n0, $m1 ! tp[0]*n0
  186. umulxhi $aj, $m0, $aj ! ahi=aj
  187. mulx $nj, $m1, $lo1 ! np[0]*m1
  188. umulxhi $nj, $m1, $hi1
  189. ldx [$anp+24], $nj ! np[1]
  190. add $anp, 32, $anp
  191. addcc $lo1, $lo0, $lo1
  192. mulx $nj, $m1, $nlo ! np[1]*m1
  193. addxc %g0, $hi1, $hi1
  194. umulxhi $nj, $m1, $nj ! nhi=nj
  195. ba .Linner
  196. sub $num, 24, $cnt ! cnt=num-3
  197. .align 16
  198. .Linner:
  199. addcc $alo, $hi0, $lo0
  200. ldx [$tp+8], $tj ! tp[j]
  201. addxc $aj, %g0, $hi0 ! ahi=aj
  202. ldx [$anp+0], $aj ! ap[j]
  203. addcc $nlo, $hi1, $lo1
  204. mulx $aj, $m0, $alo ! ap[j]*bp[i]
  205. addxc $nj, %g0, $hi1 ! nhi=nj
  206. ldx [$anp+8], $nj ! np[j]
  207. add $anp, 16, $anp
  208. umulxhi $aj, $m0, $aj ! ahi=aj
  209. addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
  210. mulx $nj, $m1, $nlo ! np[j]*m1
  211. addxc %g0, $hi0, $hi0
  212. umulxhi $nj, $m1, $nj ! nhi=nj
  213. addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
  214. addxc %g0, $hi1, $hi1
  215. stx $lo1, [$tp] ! tp[j-1]
  216. add $tp, 8, $tp
  217. brnz,pt $cnt, .Linner
  218. sub $cnt, 8, $cnt
  219. !.Linner
  220. ldx [$tp+8], $tj ! tp[j]
  221. addcc $alo, $hi0, $lo0
  222. addxc $aj, %g0, $hi0 ! ahi=aj
  223. addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
  224. addxc %g0, $hi0, $hi0
  225. addcc $nlo, $hi1, $lo1
  226. addxc $nj, %g0, $hi1 ! nhi=nj
  227. addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
  228. addxc %g0, $hi1, $hi1
  229. stx $lo1, [$tp] ! tp[j-1]
  230. subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc
  231. addxccc $hi1, $hi0, $hi1
  232. addxc %g0, %g0, $ovf
  233. stx $hi1, [$tp+8]
  234. add $tp, 16, $tp
  235. brnz,pt $i, .Louter
  236. sub $i, 8, $i
  237. sub $anp, $num, $anp ! rewind
  238. sub $tp, $num, $tp
  239. sub $anp, $num, $anp
  240. ba .Lsub
  241. subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc
  242. .align 16
  243. .Lsub:
  244. ldx [$tp], $tj
  245. add $tp, 8, $tp
  246. ldx [$anp+8], $nj
  247. add $anp, 16, $anp
  248. subccc $tj, $nj, $t2 ! tp[j]-np[j]
  249. srlx $tj, 32, $tj
  250. srlx $nj, 32, $nj
  251. subccc $tj, $nj, $t3
  252. add $rp, 8, $rp
  253. st $t2, [$rp-4] ! reverse order
  254. st $t3, [$rp-8]
  255. brnz,pt $cnt, .Lsub
  256. sub $cnt, 8, $cnt
  257. sub $anp, $num, $anp ! rewind
  258. sub $tp, $num, $tp
  259. sub $anp, $num, $anp
  260. sub $rp, $num, $rp
  261. subc $ovf, %g0, $ovf ! handle upmost overflow bit
  262. and $tp, $ovf, $ap
  263. andn $rp, $ovf, $np
  264. or $np, $ap, $ap ! ap=borrow?tp:rp
  265. ba .Lcopy
  266. sub $num, 8, $cnt
  267. .align 16
  268. .Lcopy: ! copy or in-place refresh
  269. ld [$ap+0], $t2
  270. ld [$ap+4], $t3
  271. add $ap, 8, $ap
  272. stx %g0, [$tp] ! zap
  273. add $tp, 8, $tp
  274. stx %g0, [$anp] ! zap
  275. stx %g0, [$anp+8]
  276. add $anp, 16, $anp
  277. st $t3, [$rp+0] ! flip order
  278. st $t2, [$rp+4]
  279. add $rp, 8, $rp
  280. brnz $cnt, .Lcopy
  281. sub $cnt, 8, $cnt
  282. mov 1, %o0
  283. ret
  284. restore
  285. .type bn_mul_mont_vis3, #function
  286. .size bn_mul_mont_vis3, .-bn_mul_mont_vis3
  287. .asciz "Montgomery Multiplication for SPARCv9 VIS3, CRYPTOGAMS by <appro\@openssl.org>"
  288. .align 4
  289. ___
  290. # Purpose of these subroutines is to explicitly encode VIS instructions,
  291. # so that one can compile the module without having to specify VIS
  292. # extentions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
  293. # Idea is to reserve for option to produce "universal" binary and let
  294. # programmer detect if current CPU is VIS capable at run-time.
  295. sub unvis3 {
  296. my ($mnemonic,$rs1,$rs2,$rd)=@_;
  297. my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
  298. my ($ref,$opf);
  299. my %visopf = ( "addxc" => 0x011,
  300. "addxccc" => 0x013,
  301. "umulxhi" => 0x016 );
  302. $ref = "$mnemonic\t$rs1,$rs2,$rd";
  303. if ($opf=$visopf{$mnemonic}) {
  304. foreach ($rs1,$rs2,$rd) {
  305. return $ref if (!/%([goli])([0-9])/);
  306. $_=$bias{$1}+$2;
  307. }
  308. return sprintf ".word\t0x%08x !%s",
  309. 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
  310. $ref;
  311. } else {
  312. return $ref;
  313. }
  314. }
  315. foreach (split("\n",$code)) {
  316. s/\`([^\`]*)\`/eval $1/ge;
  317. s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
  318. &unvis3($1,$2,$3,$4)
  319. /ge;
  320. print $_,"\n";
  321. }
  322. close STDOUT;