vis3-mont.pl 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. #! /usr/bin/env perl
  2. # Copyright 2012-2021 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. # ====================================================================
  9. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  10. # project. The module is, however, dual licensed under OpenSSL and
  11. # CRYPTOGAMS licenses depending on where you obtain it. For further
  12. # details see http://www.openssl.org/~appro/cryptogams/.
  13. # ====================================================================
  14. # October 2012.
  15. #
  16. # SPARCv9 VIS3 Montgomery multiplication procedure suitable for T3 and
  17. # onward. There are three new instructions used here: umulxhi,
  18. # addxc[cc] and initializing store. On T3 RSA private key operations
  19. # are 1.54/1.87/2.11/2.26 times faster for 512/1024/2048/4096-bit key
  20. # lengths. This is without dedicated squaring procedure. On T4
  21. # corresponding coefficients are 1.47/2.10/2.80/2.90x, which is mostly
  22. # for reference purposes, because T4 has dedicated Montgomery
  23. # multiplication and squaring *instructions* that deliver even more.
  24. $output = pop and open STDOUT,">$output";
  25. $frame = "STACK_FRAME";
  26. $bias = "STACK_BIAS";
  27. $code.=<<___;
  28. #ifndef __ASSEMBLER__
  29. # define __ASSEMBLER__ 1
  30. #endif
  31. #include "crypto/sparc_arch.h"
  32. #ifdef __arch64__
  33. .register %g2,#scratch
  34. .register %g3,#scratch
  35. #endif
  36. .section ".text",#alloc,#execinstr
  37. ___
  38. ($n0,$m0,$m1,$lo0,$hi0, $lo1,$hi1,$aj,$alo,$nj,$nlo,$tj)=
  39. (map("%g$_",(1..5)),map("%o$_",(0..5,7)));
  40. # int bn_mul_mont(
  41. $rp="%o0"; # BN_ULONG *rp,
  42. $ap="%o1"; # const BN_ULONG *ap,
  43. $bp="%o2"; # const BN_ULONG *bp,
  44. $np="%o3"; # const BN_ULONG *np,
  45. $n0p="%o4"; # const BN_ULONG *n0,
  46. $num="%o5"; # int num); # caller ensures that num is even
  47. # and >=6
  48. $code.=<<___;
  49. .globl bn_mul_mont_vis3
  50. .align 32
  51. bn_mul_mont_vis3:
  52. add %sp, $bias, %g4 ! real top of stack
  53. sll $num, 2, $num ! size in bytes
  54. add $num, 63, %g5
  55. andn %g5, 63, %g5 ! buffer size rounded up to 64 bytes
  56. add %g5, %g5, %g1
  57. add %g5, %g1, %g1 ! 3*buffer size
  58. sub %g4, %g1, %g1
  59. andn %g1, 63, %g1 ! align at 64 byte
  60. sub %g1, $frame, %g1 ! new top of stack
  61. sub %g1, %g4, %g1
  62. save %sp, %g1, %sp
  63. ___
  64. # +-------------------------------+<----- %sp
  65. # . .
  66. # +-------------------------------+<----- aligned at 64 bytes
  67. # | __int64 tmp[0] |
  68. # +-------------------------------+
  69. # . .
  70. # . .
  71. # +-------------------------------+<----- aligned at 64 bytes
  72. # | __int64 ap[1..0] | converted ap[]
  73. # +-------------------------------+
  74. # | __int64 np[1..0] | converted np[]
  75. # +-------------------------------+
  76. # | __int64 ap[3..2] |
  77. # . .
  78. # . .
  79. # +-------------------------------+
  80. ($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5));
  81. ($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz,$anp)=map("%l$_",(0..7));
  82. ($ovf,$i)=($t0,$t1);
  83. $code.=<<___;
  84. ld [$n0p+0], $t0 ! pull n0[0..1] value
  85. add %sp, $bias+$frame, $tp
  86. ld [$n0p+4], $t1
  87. add $tp, %g5, $anp
  88. ld [$bp+0], $t2 ! m0=bp[0]
  89. sllx $t1, 32, $n0
  90. ld [$bp+4], $t3
  91. or $t0, $n0, $n0
  92. add $bp, 8, $bp
  93. ld [$ap+0], $t0 ! ap[0]
  94. sllx $t3, 32, $m0
  95. ld [$ap+4], $t1
  96. or $t2, $m0, $m0
  97. ld [$ap+8], $t2 ! ap[1]
  98. sllx $t1, 32, $aj
  99. ld [$ap+12], $t3
  100. or $t0, $aj, $aj
  101. add $ap, 16, $ap
  102. stx $aj, [$anp] ! converted ap[0]
  103. mulx $aj, $m0, $lo0 ! ap[0]*bp[0]
  104. umulxhi $aj, $m0, $hi0
  105. ld [$np+0], $t0 ! np[0]
  106. sllx $t3, 32, $aj
  107. ld [$np+4], $t1
  108. or $t2, $aj, $aj
  109. ld [$np+8], $t2 ! np[1]
  110. sllx $t1, 32, $nj
  111. ld [$np+12], $t3
  112. or $t0, $nj, $nj
  113. add $np, 16, $np
  114. stx $nj, [$anp+8] ! converted np[0]
  115. mulx $lo0, $n0, $m1 ! "tp[0]"*n0
  116. stx $aj, [$anp+16] ! converted ap[1]
  117. mulx $aj, $m0, $alo ! ap[1]*bp[0]
  118. umulxhi $aj, $m0, $aj ! ahi=aj
  119. mulx $nj, $m1, $lo1 ! np[0]*m1
  120. umulxhi $nj, $m1, $hi1
  121. sllx $t3, 32, $nj
  122. or $t2, $nj, $nj
  123. stx $nj, [$anp+24] ! converted np[1]
  124. add $anp, 32, $anp
  125. addcc $lo0, $lo1, $lo1
  126. addxc %g0, $hi1, $hi1
  127. mulx $nj, $m1, $nlo ! np[1]*m1
  128. umulxhi $nj, $m1, $nj ! nhi=nj
  129. ba .L1st
  130. sub $num, 24, $cnt ! cnt=num-3
  131. .align 16
  132. .L1st:
  133. ld [$ap+0], $t0 ! ap[j]
  134. addcc $alo, $hi0, $lo0
  135. ld [$ap+4], $t1
  136. addxc $aj, %g0, $hi0
  137. sllx $t1, 32, $aj
  138. add $ap, 8, $ap
  139. or $t0, $aj, $aj
  140. stx $aj, [$anp] ! converted ap[j]
  141. ld [$np+0], $t2 ! np[j]
  142. addcc $nlo, $hi1, $lo1
  143. ld [$np+4], $t3
  144. addxc $nj, %g0, $hi1 ! nhi=nj
  145. sllx $t3, 32, $nj
  146. add $np, 8, $np
  147. mulx $aj, $m0, $alo ! ap[j]*bp[0]
  148. or $t2, $nj, $nj
  149. umulxhi $aj, $m0, $aj ! ahi=aj
  150. stx $nj, [$anp+8] ! converted np[j]
  151. add $anp, 16, $anp ! anp++
  152. mulx $nj, $m1, $nlo ! np[j]*m1
  153. addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
  154. umulxhi $nj, $m1, $nj ! nhi=nj
  155. addxc %g0, $hi1, $hi1
  156. stx $lo1, [$tp] ! tp[j-1]
  157. add $tp, 8, $tp ! tp++
  158. brnz,pt $cnt, .L1st
  159. sub $cnt, 8, $cnt ! j--
  160. !.L1st
  161. addcc $alo, $hi0, $lo0
  162. addxc $aj, %g0, $hi0 ! ahi=aj
  163. addcc $nlo, $hi1, $lo1
  164. addxc $nj, %g0, $hi1
  165. addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
  166. addxc %g0, $hi1, $hi1
  167. stx $lo1, [$tp] ! tp[j-1]
  168. add $tp, 8, $tp
  169. addcc $hi0, $hi1, $hi1
  170. addxc %g0, %g0, $ovf ! upmost overflow bit
  171. stx $hi1, [$tp]
  172. add $tp, 8, $tp
  173. ba .Louter
  174. sub $num, 16, $i ! i=num-2
  175. .align 16
  176. .Louter:
  177. ld [$bp+0], $t2 ! m0=bp[i]
  178. ld [$bp+4], $t3
  179. sub $anp, $num, $anp ! rewind
  180. sub $tp, $num, $tp
  181. sub $anp, $num, $anp
  182. add $bp, 8, $bp
  183. sllx $t3, 32, $m0
  184. ldx [$anp+0], $aj ! ap[0]
  185. or $t2, $m0, $m0
  186. ldx [$anp+8], $nj ! np[0]
  187. mulx $aj, $m0, $lo0 ! ap[0]*bp[i]
  188. ldx [$tp], $tj ! tp[0]
  189. umulxhi $aj, $m0, $hi0
  190. ldx [$anp+16], $aj ! ap[1]
  191. addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0]
  192. mulx $aj, $m0, $alo ! ap[1]*bp[i]
  193. addxc %g0, $hi0, $hi0
  194. mulx $lo0, $n0, $m1 ! tp[0]*n0
  195. umulxhi $aj, $m0, $aj ! ahi=aj
  196. mulx $nj, $m1, $lo1 ! np[0]*m1
  197. umulxhi $nj, $m1, $hi1
  198. ldx [$anp+24], $nj ! np[1]
  199. add $anp, 32, $anp
  200. addcc $lo1, $lo0, $lo1
  201. mulx $nj, $m1, $nlo ! np[1]*m1
  202. addxc %g0, $hi1, $hi1
  203. umulxhi $nj, $m1, $nj ! nhi=nj
  204. ba .Linner
  205. sub $num, 24, $cnt ! cnt=num-3
  206. .align 16
  207. .Linner:
  208. addcc $alo, $hi0, $lo0
  209. ldx [$tp+8], $tj ! tp[j]
  210. addxc $aj, %g0, $hi0 ! ahi=aj
  211. ldx [$anp+0], $aj ! ap[j]
  212. addcc $nlo, $hi1, $lo1
  213. mulx $aj, $m0, $alo ! ap[j]*bp[i]
  214. addxc $nj, %g0, $hi1 ! nhi=nj
  215. ldx [$anp+8], $nj ! np[j]
  216. add $anp, 16, $anp
  217. umulxhi $aj, $m0, $aj ! ahi=aj
  218. addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
  219. mulx $nj, $m1, $nlo ! np[j]*m1
  220. addxc %g0, $hi0, $hi0
  221. umulxhi $nj, $m1, $nj ! nhi=nj
  222. addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
  223. addxc %g0, $hi1, $hi1
  224. stx $lo1, [$tp] ! tp[j-1]
  225. add $tp, 8, $tp
  226. brnz,pt $cnt, .Linner
  227. sub $cnt, 8, $cnt
  228. !.Linner
  229. ldx [$tp+8], $tj ! tp[j]
  230. addcc $alo, $hi0, $lo0
  231. addxc $aj, %g0, $hi0 ! ahi=aj
  232. addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
  233. addxc %g0, $hi0, $hi0
  234. addcc $nlo, $hi1, $lo1
  235. addxc $nj, %g0, $hi1 ! nhi=nj
  236. addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
  237. addxc %g0, $hi1, $hi1
  238. stx $lo1, [$tp] ! tp[j-1]
  239. subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc
  240. addxccc $hi1, $hi0, $hi1
  241. addxc %g0, %g0, $ovf
  242. stx $hi1, [$tp+8]
  243. add $tp, 16, $tp
  244. brnz,pt $i, .Louter
  245. sub $i, 8, $i
  246. sub $anp, $num, $anp ! rewind
  247. sub $tp, $num, $tp
  248. sub $anp, $num, $anp
  249. ba .Lsub
  250. subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc
  251. .align 16
  252. .Lsub:
  253. ldx [$tp], $tj
  254. add $tp, 8, $tp
  255. ldx [$anp+8], $nj
  256. add $anp, 16, $anp
  257. subccc $tj, $nj, $t2 ! tp[j]-np[j]
  258. srlx $tj, 32, $tj
  259. srlx $nj, 32, $nj
  260. subccc $tj, $nj, $t3
  261. add $rp, 8, $rp
  262. st $t2, [$rp-4] ! reverse order
  263. st $t3, [$rp-8]
  264. brnz,pt $cnt, .Lsub
  265. sub $cnt, 8, $cnt
  266. sub $anp, $num, $anp ! rewind
  267. sub $tp, $num, $tp
  268. sub $anp, $num, $anp
  269. sub $rp, $num, $rp
  270. subccc $ovf, %g0, $ovf ! handle upmost overflow bit
  271. ba .Lcopy
  272. sub $num, 8, $cnt
  273. .align 16
  274. .Lcopy: ! conditional copy
  275. ld [$tp+0], $t0
  276. ld [$tp+4], $t1
  277. ld [$rp+0], $t2
  278. ld [$rp+4], $t3
  279. stx %g0, [$tp] ! zap
  280. add $tp, 8, $tp
  281. stx %g0, [$anp] ! zap
  282. stx %g0, [$anp+8]
  283. add $anp, 16, $anp
  284. movcs %icc, $t0, $t2
  285. movcs %icc, $t1, $t3
  286. st $t3, [$rp+0] ! flip order
  287. st $t2, [$rp+4]
  288. add $rp, 8, $rp
  289. brnz $cnt, .Lcopy
  290. sub $cnt, 8, $cnt
  291. mov 1, %o0
  292. ret
  293. restore
  294. .type bn_mul_mont_vis3, #function
  295. .size bn_mul_mont_vis3, .-bn_mul_mont_vis3
  296. .asciz "Montgomery Multiplication for SPARCv9 VIS3, CRYPTOGAMS by <appro\@openssl.org>"
  297. .align 4
  298. ___
  299. # Purpose of these subroutines is to explicitly encode VIS instructions,
  300. # so that one can compile the module without having to specify VIS
  301. # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
  302. # Idea is to reserve for option to produce "universal" binary and let
  303. # programmer detect if current CPU is VIS capable at run-time.
  304. sub unvis3 {
  305. my ($mnemonic,$rs1,$rs2,$rd)=@_;
  306. my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
  307. my ($ref,$opf);
  308. my %visopf = ( "addxc" => 0x011,
  309. "addxccc" => 0x013,
  310. "umulxhi" => 0x016 );
  311. $ref = "$mnemonic\t$rs1,$rs2,$rd";
  312. if ($opf=$visopf{$mnemonic}) {
  313. foreach ($rs1,$rs2,$rd) {
  314. return $ref if (!/%([goli])([0-9])/);
  315. $_=$bias{$1}+$2;
  316. }
  317. return sprintf ".word\t0x%08x !%s",
  318. 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
  319. $ref;
  320. } else {
  321. return $ref;
  322. }
  323. }
  324. foreach (split("\n",$code)) {
  325. s/\`([^\`]*)\`/eval $1/ge;
  326. s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
  327. &unvis3($1,$2,$3,$4)
  328. /ge;
  329. print $_,"\n";
  330. }
  331. close STDOUT or die "error closing STDOUT: $!";