parisc-mont.pl 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993
  1. #!/usr/bin/env perl
  2. # ====================================================================
  3. # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
  4. # project. The module is, however, dual licensed under OpenSSL and
  5. # CRYPTOGAMS licenses depending on where you obtain it. For further
  6. # details see http://www.openssl.org/~appro/cryptogams/.
  7. # ====================================================================
  8. # On PA-7100LC this module performs ~90-50% better, less for longer
  9. # keys, than code generated by gcc 3.2 for PA-RISC 1.1. Latter means
  10. # that compiler utilized xmpyu instruction to perform 32x32=64-bit
  11. # multiplication, which in turn means that "baseline" performance was
  12. # optimal in respect to instruction set capabilities. Fair comparison
  13. # with vendor compiler is problematic, because OpenSSL doesn't define
  14. # BN_LLONG [presumably] for historical reasons, which drives compiler
  15. # toward 4 times 16x16=32-bit multiplicatons [plus complementary
  16. # shifts and additions] instead. This means that you should observe
  17. # several times improvement over code generated by vendor compiler
  18. # for PA-RISC 1.1, but the "baseline" is far from optimal. The actual
  19. # improvement coefficient was never collected on PA-7100LC, or any
  20. # other 1.1 CPU, because I don't have access to such machine with
  21. # vendor compiler. But to give you a taste, PA-RISC 1.1 code path
  22. # reportedly outperformed code generated by cc +DA1.1 +O3 by factor
  23. # of ~5x on PA-8600.
  24. #
  25. # On PA-RISC 2.0 it has to compete with pa-risc2[W].s, which is
  26. # reportedly ~2x faster than vendor compiler generated code [according
  27. # to comment in pa-risc2[W].s]. Here comes a catch. Execution core of
  28. # this implementation is actually 32-bit one, in the sense that it
  29. # operates on 32-bit values. But pa-risc2[W].s operates on arrays of
  30. # 64-bit BN_LONGs... How do they interoperate then? No problem. This
  31. # module picks halves of 64-bit values in reverse order and pretends
  32. # they were 32-bit BN_LONGs. But can 32-bit core compete with "pure"
  33. # 64-bit code such as pa-risc2[W].s then? Well, the thing is that
  34. # 32x32=64-bit multiplication is the best even PA-RISC 2.0 can do,
  35. # i.e. there is no "wider" multiplication like on most other 64-bit
  36. # platforms. This means that even being effectively 32-bit, this
  37. # implementation performs "64-bit" computational task in same amount
  38. # of arithmetic operations, most notably multiplications. It requires
  39. # more memory references, most notably to tp[num], but this doesn't
  40. # seem to exhaust memory port capacity. And indeed, dedicated PA-RISC
  41. # 2.0 code path, provides virtually same performance as pa-risc2[W].s:
  42. # it's ~10% better for shortest key length and ~10% worse for longest
  43. # one.
  44. #
  45. # In case it wasn't clear. The module has two distinct code paths:
  46. # PA-RISC 1.1 and PA-RISC 2.0 ones. Latter features carry-free 64-bit
  47. # additions and 64-bit integer loads, not to mention specific
  48. # instruction scheduling. In 64-bit build naturally only 2.0 code path
  49. # is assembled. In 32-bit application context both code paths are
  50. # assembled, PA-RISC 2.0 CPU is detected at run-time and proper path
  51. # is taken automatically. Also, in 32-bit build the module imposes
  52. # couple of limitations: vector lengths has to be even and vector
  53. # addresses has to be 64-bit aligned. Normally neither is a problem:
  54. # most common key lengths are even and vectors are commonly malloc-ed,
  55. # which ensures alignment.
  56. #
  57. # Special thanks to polarhome.com for providing HP-UX account on
  58. # PA-RISC 1.1 machine, and to correspondent who chose to remain
  59. # anonymous for testing the code on PA-RISC 2.0 machine.
  60. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  61. $flavour = shift;
  62. $output = shift;
  63. open STDOUT,">$output";
  64. if ($flavour =~ /64/) {
  65. $LEVEL ="2.0W";
  66. $SIZE_T =8;
  67. $FRAME_MARKER =80;
  68. $SAVED_RP =16;
  69. $PUSH ="std";
  70. $PUSHMA ="std,ma";
  71. $POP ="ldd";
  72. $POPMB ="ldd,mb";
  73. $BN_SZ =$SIZE_T;
  74. } else {
  75. $LEVEL ="1.1"; #$LEVEL.="\n\t.ALLOW\t2.0";
  76. $SIZE_T =4;
  77. $FRAME_MARKER =48;
  78. $SAVED_RP =20;
  79. $PUSH ="stw";
  80. $PUSHMA ="stwm";
  81. $POP ="ldw";
  82. $POPMB ="ldwm";
  83. $BN_SZ =$SIZE_T;
  84. if (open CONF,"<${dir}../../opensslconf.h") {
  85. while(<CONF>) {
  86. if (m/#\s*define\s+SIXTY_FOUR_BIT/) {
  87. $BN_SZ=8;
  88. $LEVEL="2.0";
  89. last;
  90. }
  91. }
  92. close CONF;
  93. }
  94. }
  95. $FRAME=8*$SIZE_T+$FRAME_MARKER; # 8 saved regs + frame marker
  96. # [+ argument transfer]
  97. $LOCALS=$FRAME-$FRAME_MARKER;
  98. $FRAME+=32; # local variables
  99. $tp="%r31";
  100. $ti1="%r29";
  101. $ti0="%r28";
  102. $rp="%r26";
  103. $ap="%r25";
  104. $bp="%r24";
  105. $np="%r23";
  106. $n0="%r22"; # passed through stack in 32-bit
  107. $num="%r21"; # passed through stack in 32-bit
  108. $idx="%r20";
  109. $arrsz="%r19";
  110. $nm1="%r7";
  111. $nm0="%r6";
  112. $ab1="%r5";
  113. $ab0="%r4";
  114. $fp="%r3";
  115. $hi1="%r2";
  116. $hi0="%r1";
  117. $xfer=$n0; # accomodates [-16..15] offset in fld[dw]s
  118. $fm0="%fr4"; $fti=$fm0;
  119. $fbi="%fr5L";
  120. $fn0="%fr5R";
  121. $fai="%fr6"; $fab0="%fr7"; $fab1="%fr8";
  122. $fni="%fr9"; $fnm0="%fr10"; $fnm1="%fr11";
  123. $code=<<___;
  124. .LEVEL $LEVEL
  125. .SPACE \$TEXT\$
  126. .SUBSPA \$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
  127. .EXPORT bn_mul_mont,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR
  128. .ALIGN 64
  129. bn_mul_mont
  130. .PROC
  131. .CALLINFO FRAME=`$FRAME-8*$SIZE_T`,NO_CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=6
  132. .ENTRY
  133. $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue
  134. $PUSHMA %r3,$FRAME(%sp)
  135. $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp)
  136. $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp)
  137. $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp)
  138. $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp)
  139. $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp)
  140. $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp)
  141. $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp)
  142. ldo -$FRAME(%sp),$fp
  143. ___
  144. $code.=<<___ if ($SIZE_T==4);
  145. ldw `-$FRAME_MARKER-4`($fp),$n0
  146. ldw `-$FRAME_MARKER-8`($fp),$num
  147. nop
  148. nop ; alignment
  149. ___
  150. $code.=<<___ if ($BN_SZ==4);
  151. comiclr,<= 6,$num,%r0 ; are vectors long enough?
  152. b L\$abort
  153. ldi 0,%r28 ; signal "unhandled"
  154. add,ev %r0,$num,$num ; is $num even?
  155. b L\$abort
  156. nop
  157. or $ap,$np,$ti1
  158. extru,= $ti1,31,3,%r0 ; are ap and np 64-bit aligned?
  159. b L\$abort
  160. nop
  161. nop ; alignment
  162. nop
  163. fldws 0($n0),${fn0}
  164. fldws,ma 4($bp),${fbi} ; bp[0]
  165. ___
  166. $code.=<<___ if ($BN_SZ==8);
  167. comib,> 3,$num,L\$abort ; are vectors long enough?
  168. ldi 0,%r28 ; signal "unhandled"
  169. addl $num,$num,$num ; I operate on 32-bit values
  170. fldws 4($n0),${fn0} ; only low part of n0
  171. fldws 4($bp),${fbi} ; bp[0] in flipped word order
  172. ___
  173. $code.=<<___;
  174. fldds 0($ap),${fai} ; ap[0,1]
  175. fldds 0($np),${fni} ; np[0,1]
  176. sh2addl $num,%r0,$arrsz
  177. ldi 31,$hi0
  178. ldo 36($arrsz),$hi1 ; space for tp[num+1]
  179. andcm $hi1,$hi0,$hi1 ; align
  180. addl $hi1,%sp,%sp
  181. $PUSH $fp,-$SIZE_T(%sp)
  182. ldo `$LOCALS+16`($fp),$xfer
  183. ldo `$LOCALS+32+4`($fp),$tp
  184. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[0]
  185. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[0]
  186. xmpyu ${fn0},${fab0}R,${fm0}
  187. addl $arrsz,$ap,$ap ; point at the end
  188. addl $arrsz,$np,$np
  189. subi 0,$arrsz,$idx ; j=0
  190. ldo 8($idx),$idx ; j++++
  191. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
  192. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
  193. fstds ${fab0},-16($xfer)
  194. fstds ${fnm0},-8($xfer)
  195. fstds ${fab1},0($xfer)
  196. fstds ${fnm1},8($xfer)
  197. flddx $idx($ap),${fai} ; ap[2,3]
  198. flddx $idx($np),${fni} ; np[2,3]
  199. ___
  200. $code.=<<___ if ($BN_SZ==4);
  201. mtctl $hi0,%cr11 ; $hi0 still holds 31
  202. extrd,u,*= $hi0,%sar,1,$hi0 ; executes on PA-RISC 1.0
  203. b L\$parisc11
  204. nop
  205. ___
  206. $code.=<<___; # PA-RISC 2.0 code-path
  207. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
  208. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  209. ldd -16($xfer),$ab0
  210. fstds ${fab0},-16($xfer)
  211. extrd,u $ab0,31,32,$hi0
  212. extrd,u $ab0,63,32,$ab0
  213. ldd -8($xfer),$nm0
  214. fstds ${fnm0},-8($xfer)
  215. ldo 8($idx),$idx ; j++++
  216. addl $ab0,$nm0,$nm0 ; low part is discarded
  217. extrd,u $nm0,31,32,$hi1
  218. L\$1st
  219. xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
  220. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
  221. ldd 0($xfer),$ab1
  222. fstds ${fab1},0($xfer)
  223. addl $hi0,$ab1,$ab1
  224. extrd,u $ab1,31,32,$hi0
  225. ldd 8($xfer),$nm1
  226. fstds ${fnm1},8($xfer)
  227. extrd,u $ab1,63,32,$ab1
  228. addl $hi1,$nm1,$nm1
  229. flddx $idx($ap),${fai} ; ap[j,j+1]
  230. flddx $idx($np),${fni} ; np[j,j+1]
  231. addl $ab1,$nm1,$nm1
  232. extrd,u $nm1,31,32,$hi1
  233. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
  234. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  235. ldd -16($xfer),$ab0
  236. fstds ${fab0},-16($xfer)
  237. addl $hi0,$ab0,$ab0
  238. extrd,u $ab0,31,32,$hi0
  239. ldd -8($xfer),$nm0
  240. fstds ${fnm0},-8($xfer)
  241. extrd,u $ab0,63,32,$ab0
  242. addl $hi1,$nm0,$nm0
  243. stw $nm1,-4($tp) ; tp[j-1]
  244. addl $ab0,$nm0,$nm0
  245. stw,ma $nm0,8($tp) ; tp[j-1]
  246. addib,<> 8,$idx,L\$1st ; j++++
  247. extrd,u $nm0,31,32,$hi1
  248. xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
  249. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
  250. ldd 0($xfer),$ab1
  251. fstds ${fab1},0($xfer)
  252. addl $hi0,$ab1,$ab1
  253. extrd,u $ab1,31,32,$hi0
  254. ldd 8($xfer),$nm1
  255. fstds ${fnm1},8($xfer)
  256. extrd,u $ab1,63,32,$ab1
  257. addl $hi1,$nm1,$nm1
  258. ldd -16($xfer),$ab0
  259. addl $ab1,$nm1,$nm1
  260. ldd -8($xfer),$nm0
  261. extrd,u $nm1,31,32,$hi1
  262. addl $hi0,$ab0,$ab0
  263. extrd,u $ab0,31,32,$hi0
  264. stw $nm1,-4($tp) ; tp[j-1]
  265. extrd,u $ab0,63,32,$ab0
  266. addl $hi1,$nm0,$nm0
  267. ldd 0($xfer),$ab1
  268. addl $ab0,$nm0,$nm0
  269. ldd,mb 8($xfer),$nm1
  270. extrd,u $nm0,31,32,$hi1
  271. stw,ma $nm0,8($tp) ; tp[j-1]
  272. ldo -1($num),$num ; i--
  273. subi 0,$arrsz,$idx ; j=0
  274. ___
  275. $code.=<<___ if ($BN_SZ==4);
  276. fldws,ma 4($bp),${fbi} ; bp[1]
  277. ___
  278. $code.=<<___ if ($BN_SZ==8);
  279. fldws 0($bp),${fbi} ; bp[1] in flipped word order
  280. ___
  281. $code.=<<___;
  282. flddx $idx($ap),${fai} ; ap[0,1]
  283. flddx $idx($np),${fni} ; np[0,1]
  284. fldws 8($xfer),${fti}R ; tp[0]
  285. addl $hi0,$ab1,$ab1
  286. extrd,u $ab1,31,32,$hi0
  287. extrd,u $ab1,63,32,$ab1
  288. ldo 8($idx),$idx ; j++++
  289. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
  290. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
  291. addl $hi1,$nm1,$nm1
  292. addl $ab1,$nm1,$nm1
  293. extrd,u $nm1,31,32,$hi1
  294. fstws,mb ${fab0}L,-8($xfer) ; save high part
  295. stw $nm1,-4($tp) ; tp[j-1]
  296. fcpy,sgl %fr0,${fti}L ; zero high part
  297. fcpy,sgl %fr0,${fab0}L
  298. addl $hi1,$hi0,$hi0
  299. extrd,u $hi0,31,32,$hi1
  300. fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
  301. fcnvxf,dbl,dbl ${fab0},${fab0}
  302. stw $hi0,0($tp)
  303. stw $hi1,4($tp)
  304. fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
  305. fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
  306. xmpyu ${fn0},${fab0}R,${fm0}
  307. ldo `$LOCALS+32+4`($fp),$tp
  308. L\$outer
  309. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
  310. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
  311. fstds ${fab0},-16($xfer) ; 33-bit value
  312. fstds ${fnm0},-8($xfer)
  313. flddx $idx($ap),${fai} ; ap[2]
  314. flddx $idx($np),${fni} ; np[2]
  315. ldo 8($idx),$idx ; j++++
  316. ldd -16($xfer),$ab0 ; 33-bit value
  317. ldd -8($xfer),$nm0
  318. ldw 0($xfer),$hi0 ; high part
  319. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
  320. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  321. extrd,u $ab0,31,32,$ti0 ; carry bit
  322. extrd,u $ab0,63,32,$ab0
  323. fstds ${fab1},0($xfer)
  324. addl $ti0,$hi0,$hi0 ; account carry bit
  325. fstds ${fnm1},8($xfer)
  326. addl $ab0,$nm0,$nm0 ; low part is discarded
  327. ldw 0($tp),$ti1 ; tp[1]
  328. extrd,u $nm0,31,32,$hi1
  329. fstds ${fab0},-16($xfer)
  330. fstds ${fnm0},-8($xfer)
  331. L\$inner
  332. xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
  333. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
  334. ldd 0($xfer),$ab1
  335. fstds ${fab1},0($xfer)
  336. addl $hi0,$ti1,$ti1
  337. addl $ti1,$ab1,$ab1
  338. ldd 8($xfer),$nm1
  339. fstds ${fnm1},8($xfer)
  340. extrd,u $ab1,31,32,$hi0
  341. extrd,u $ab1,63,32,$ab1
  342. flddx $idx($ap),${fai} ; ap[j,j+1]
  343. flddx $idx($np),${fni} ; np[j,j+1]
  344. addl $hi1,$nm1,$nm1
  345. addl $ab1,$nm1,$nm1
  346. ldw 4($tp),$ti0 ; tp[j]
  347. stw $nm1,-4($tp) ; tp[j-1]
  348. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
  349. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  350. ldd -16($xfer),$ab0
  351. fstds ${fab0},-16($xfer)
  352. addl $hi0,$ti0,$ti0
  353. addl $ti0,$ab0,$ab0
  354. ldd -8($xfer),$nm0
  355. fstds ${fnm0},-8($xfer)
  356. extrd,u $ab0,31,32,$hi0
  357. extrd,u $nm1,31,32,$hi1
  358. ldw 8($tp),$ti1 ; tp[j]
  359. extrd,u $ab0,63,32,$ab0
  360. addl $hi1,$nm0,$nm0
  361. addl $ab0,$nm0,$nm0
  362. stw,ma $nm0,8($tp) ; tp[j-1]
  363. addib,<> 8,$idx,L\$inner ; j++++
  364. extrd,u $nm0,31,32,$hi1
  365. xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
  366. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
  367. ldd 0($xfer),$ab1
  368. fstds ${fab1},0($xfer)
  369. addl $hi0,$ti1,$ti1
  370. addl $ti1,$ab1,$ab1
  371. ldd 8($xfer),$nm1
  372. fstds ${fnm1},8($xfer)
  373. extrd,u $ab1,31,32,$hi0
  374. extrd,u $ab1,63,32,$ab1
  375. ldw 4($tp),$ti0 ; tp[j]
  376. addl $hi1,$nm1,$nm1
  377. addl $ab1,$nm1,$nm1
  378. ldd -16($xfer),$ab0
  379. ldd -8($xfer),$nm0
  380. extrd,u $nm1,31,32,$hi1
  381. addl $hi0,$ab0,$ab0
  382. addl $ti0,$ab0,$ab0
  383. stw $nm1,-4($tp) ; tp[j-1]
  384. extrd,u $ab0,31,32,$hi0
  385. ldw 8($tp),$ti1 ; tp[j]
  386. extrd,u $ab0,63,32,$ab0
  387. addl $hi1,$nm0,$nm0
  388. ldd 0($xfer),$ab1
  389. addl $ab0,$nm0,$nm0
  390. ldd,mb 8($xfer),$nm1
  391. extrd,u $nm0,31,32,$hi1
  392. stw,ma $nm0,8($tp) ; tp[j-1]
  393. addib,= -1,$num,L\$outerdone ; i--
  394. subi 0,$arrsz,$idx ; j=0
  395. ___
  396. $code.=<<___ if ($BN_SZ==4);
  397. fldws,ma 4($bp),${fbi} ; bp[i]
  398. ___
  399. $code.=<<___ if ($BN_SZ==8);
  400. ldi 12,$ti0 ; bp[i] in flipped word order
  401. addl,ev %r0,$num,$num
  402. ldi -4,$ti0
  403. addl $ti0,$bp,$bp
  404. fldws 0($bp),${fbi}
  405. ___
  406. $code.=<<___;
  407. flddx $idx($ap),${fai} ; ap[0]
  408. addl $hi0,$ab1,$ab1
  409. flddx $idx($np),${fni} ; np[0]
  410. fldws 8($xfer),${fti}R ; tp[0]
  411. addl $ti1,$ab1,$ab1
  412. extrd,u $ab1,31,32,$hi0
  413. extrd,u $ab1,63,32,$ab1
  414. ldo 8($idx),$idx ; j++++
  415. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
  416. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
  417. ldw 4($tp),$ti0 ; tp[j]
  418. addl $hi1,$nm1,$nm1
  419. fstws,mb ${fab0}L,-8($xfer) ; save high part
  420. addl $ab1,$nm1,$nm1
  421. extrd,u $nm1,31,32,$hi1
  422. fcpy,sgl %fr0,${fti}L ; zero high part
  423. fcpy,sgl %fr0,${fab0}L
  424. stw $nm1,-4($tp) ; tp[j-1]
  425. fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
  426. fcnvxf,dbl,dbl ${fab0},${fab0}
  427. addl $hi1,$hi0,$hi0
  428. fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
  429. addl $ti0,$hi0,$hi0
  430. extrd,u $hi0,31,32,$hi1
  431. fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
  432. stw $hi0,0($tp)
  433. stw $hi1,4($tp)
  434. xmpyu ${fn0},${fab0}R,${fm0}
  435. b L\$outer
  436. ldo `$LOCALS+32+4`($fp),$tp
  437. L\$outerdone
  438. addl $hi0,$ab1,$ab1
  439. addl $ti1,$ab1,$ab1
  440. extrd,u $ab1,31,32,$hi0
  441. extrd,u $ab1,63,32,$ab1
  442. ldw 4($tp),$ti0 ; tp[j]
  443. addl $hi1,$nm1,$nm1
  444. addl $ab1,$nm1,$nm1
  445. extrd,u $nm1,31,32,$hi1
  446. stw $nm1,-4($tp) ; tp[j-1]
  447. addl $hi1,$hi0,$hi0
  448. addl $ti0,$hi0,$hi0
  449. extrd,u $hi0,31,32,$hi1
  450. stw $hi0,0($tp)
  451. stw $hi1,4($tp)
  452. ldo `$LOCALS+32`($fp),$tp
  453. sub %r0,%r0,%r0 ; clear borrow
  454. ___
  455. $code.=<<___ if ($BN_SZ==4);
  456. ldws,ma 4($tp),$ti0
  457. extru,= $rp,31,3,%r0 ; is rp 64-bit aligned?
  458. b L\$sub_pa11
  459. addl $tp,$arrsz,$tp
  460. L\$sub
  461. ldwx $idx($np),$hi0
  462. subb $ti0,$hi0,$hi1
  463. ldwx $idx($tp),$ti0
  464. addib,<> 4,$idx,L\$sub
  465. stws,ma $hi1,4($rp)
  466. subb $ti0,%r0,$hi1
  467. ldo -4($tp),$tp
  468. ___
  469. $code.=<<___ if ($BN_SZ==8);
  470. ldd,ma 8($tp),$ti0
  471. L\$sub
  472. ldd $idx($np),$hi0
  473. shrpd $ti0,$ti0,32,$ti0 ; flip word order
  474. std $ti0,-8($tp) ; save flipped value
  475. sub,db $ti0,$hi0,$hi1
  476. ldd,ma 8($tp),$ti0
  477. addib,<> 8,$idx,L\$sub
  478. std,ma $hi1,8($rp)
  479. extrd,u $ti0,31,32,$ti0 ; carry in flipped word order
  480. sub,db $ti0,%r0,$hi1
  481. ldo -8($tp),$tp
  482. ___
  483. $code.=<<___;
  484. and $tp,$hi1,$ap
  485. andcm $rp,$hi1,$bp
  486. or $ap,$bp,$np
  487. sub $rp,$arrsz,$rp ; rewind rp
  488. subi 0,$arrsz,$idx
  489. ldo `$LOCALS+32`($fp),$tp
  490. L\$copy
  491. ldd $idx($np),$hi0
  492. std,ma %r0,8($tp)
  493. addib,<> 8,$idx,.-8 ; L\$copy
  494. std,ma $hi0,8($rp)
  495. ___
  496. if ($BN_SZ==4) { # PA-RISC 1.1 code-path
  497. $ablo=$ab0;
  498. $abhi=$ab1;
  499. $nmlo0=$nm0;
  500. $nmhi0=$nm1;
  501. $nmlo1="%r9";
  502. $nmhi1="%r8";
  503. $code.=<<___;
  504. b L\$done
  505. nop
  506. .ALIGN 8
  507. L\$parisc11
  508. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
  509. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  510. ldw -12($xfer),$ablo
  511. ldw -16($xfer),$hi0
  512. ldw -4($xfer),$nmlo0
  513. ldw -8($xfer),$nmhi0
  514. fstds ${fab0},-16($xfer)
  515. fstds ${fnm0},-8($xfer)
  516. ldo 8($idx),$idx ; j++++
  517. add $ablo,$nmlo0,$nmlo0 ; discarded
  518. addc %r0,$nmhi0,$hi1
  519. ldw 4($xfer),$ablo
  520. ldw 0($xfer),$abhi
  521. nop
  522. L\$1st_pa11
  523. xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
  524. flddx $idx($ap),${fai} ; ap[j,j+1]
  525. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
  526. flddx $idx($np),${fni} ; np[j,j+1]
  527. add $hi0,$ablo,$ablo
  528. ldw 12($xfer),$nmlo1
  529. addc %r0,$abhi,$hi0
  530. ldw 8($xfer),$nmhi1
  531. add $ablo,$nmlo1,$nmlo1
  532. fstds ${fab1},0($xfer)
  533. addc %r0,$nmhi1,$nmhi1
  534. fstds ${fnm1},8($xfer)
  535. add $hi1,$nmlo1,$nmlo1
  536. ldw -12($xfer),$ablo
  537. addc %r0,$nmhi1,$hi1
  538. ldw -16($xfer),$abhi
  539. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
  540. ldw -4($xfer),$nmlo0
  541. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  542. ldw -8($xfer),$nmhi0
  543. add $hi0,$ablo,$ablo
  544. stw $nmlo1,-4($tp) ; tp[j-1]
  545. addc %r0,$abhi,$hi0
  546. fstds ${fab0},-16($xfer)
  547. add $ablo,$nmlo0,$nmlo0
  548. fstds ${fnm0},-8($xfer)
  549. addc %r0,$nmhi0,$nmhi0
  550. ldw 0($xfer),$abhi
  551. add $hi1,$nmlo0,$nmlo0
  552. ldw 4($xfer),$ablo
  553. stws,ma $nmlo0,8($tp) ; tp[j-1]
  554. addib,<> 8,$idx,L\$1st_pa11 ; j++++
  555. addc %r0,$nmhi0,$hi1
  556. ldw 8($xfer),$nmhi1
  557. ldw 12($xfer),$nmlo1
  558. xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
  559. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
  560. add $hi0,$ablo,$ablo
  561. fstds ${fab1},0($xfer)
  562. addc %r0,$abhi,$hi0
  563. fstds ${fnm1},8($xfer)
  564. add $ablo,$nmlo1,$nmlo1
  565. ldw -16($xfer),$abhi
  566. addc %r0,$nmhi1,$nmhi1
  567. ldw -12($xfer),$ablo
  568. add $hi1,$nmlo1,$nmlo1
  569. ldw -8($xfer),$nmhi0
  570. addc %r0,$nmhi1,$hi1
  571. ldw -4($xfer),$nmlo0
  572. add $hi0,$ablo,$ablo
  573. stw $nmlo1,-4($tp) ; tp[j-1]
  574. addc %r0,$abhi,$hi0
  575. ldw 0($xfer),$abhi
  576. add $ablo,$nmlo0,$nmlo0
  577. ldw 4($xfer),$ablo
  578. addc %r0,$nmhi0,$nmhi0
  579. ldws,mb 8($xfer),$nmhi1
  580. add $hi1,$nmlo0,$nmlo0
  581. ldw 4($xfer),$nmlo1
  582. addc %r0,$nmhi0,$hi1
  583. stws,ma $nmlo0,8($tp) ; tp[j-1]
  584. ldo -1($num),$num ; i--
  585. subi 0,$arrsz,$idx ; j=0
  586. fldws,ma 4($bp),${fbi} ; bp[1]
  587. flddx $idx($ap),${fai} ; ap[0,1]
  588. flddx $idx($np),${fni} ; np[0,1]
  589. fldws 8($xfer),${fti}R ; tp[0]
  590. add $hi0,$ablo,$ablo
  591. addc %r0,$abhi,$hi0
  592. ldo 8($idx),$idx ; j++++
  593. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
  594. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
  595. add $hi1,$nmlo1,$nmlo1
  596. addc %r0,$nmhi1,$nmhi1
  597. add $ablo,$nmlo1,$nmlo1
  598. addc %r0,$nmhi1,$hi1
  599. fstws,mb ${fab0}L,-8($xfer) ; save high part
  600. stw $nmlo1,-4($tp) ; tp[j-1]
  601. fcpy,sgl %fr0,${fti}L ; zero high part
  602. fcpy,sgl %fr0,${fab0}L
  603. add $hi1,$hi0,$hi0
  604. addc %r0,%r0,$hi1
  605. fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
  606. fcnvxf,dbl,dbl ${fab0},${fab0}
  607. stw $hi0,0($tp)
  608. stw $hi1,4($tp)
  609. fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
  610. fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
  611. xmpyu ${fn0},${fab0}R,${fm0}
  612. ldo `$LOCALS+32+4`($fp),$tp
  613. L\$outer_pa11
  614. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
  615. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
  616. fstds ${fab0},-16($xfer) ; 33-bit value
  617. fstds ${fnm0},-8($xfer)
  618. flddx $idx($ap),${fai} ; ap[2,3]
  619. flddx $idx($np),${fni} ; np[2,3]
  620. ldw -16($xfer),$abhi ; carry bit actually
  621. ldo 8($idx),$idx ; j++++
  622. ldw -12($xfer),$ablo
  623. ldw -8($xfer),$nmhi0
  624. ldw -4($xfer),$nmlo0
  625. ldw 0($xfer),$hi0 ; high part
  626. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
  627. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  628. fstds ${fab1},0($xfer)
  629. addl $abhi,$hi0,$hi0 ; account carry bit
  630. fstds ${fnm1},8($xfer)
  631. add $ablo,$nmlo0,$nmlo0 ; discarded
  632. ldw 0($tp),$ti1 ; tp[1]
  633. addc %r0,$nmhi0,$hi1
  634. fstds ${fab0},-16($xfer)
  635. fstds ${fnm0},-8($xfer)
  636. ldw 4($xfer),$ablo
  637. ldw 0($xfer),$abhi
  638. L\$inner_pa11
  639. xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
  640. flddx $idx($ap),${fai} ; ap[j,j+1]
  641. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
  642. flddx $idx($np),${fni} ; np[j,j+1]
  643. add $hi0,$ablo,$ablo
  644. ldw 4($tp),$ti0 ; tp[j]
  645. addc %r0,$abhi,$abhi
  646. ldw 12($xfer),$nmlo1
  647. add $ti1,$ablo,$ablo
  648. ldw 8($xfer),$nmhi1
  649. addc %r0,$abhi,$hi0
  650. fstds ${fab1},0($xfer)
  651. add $ablo,$nmlo1,$nmlo1
  652. fstds ${fnm1},8($xfer)
  653. addc %r0,$nmhi1,$nmhi1
  654. ldw -12($xfer),$ablo
  655. add $hi1,$nmlo1,$nmlo1
  656. ldw -16($xfer),$abhi
  657. addc %r0,$nmhi1,$hi1
  658. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
  659. ldw 8($tp),$ti1 ; tp[j]
  660. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  661. ldw -4($xfer),$nmlo0
  662. add $hi0,$ablo,$ablo
  663. ldw -8($xfer),$nmhi0
  664. addc %r0,$abhi,$abhi
  665. stw $nmlo1,-4($tp) ; tp[j-1]
  666. add $ti0,$ablo,$ablo
  667. fstds ${fab0},-16($xfer)
  668. addc %r0,$abhi,$hi0
  669. fstds ${fnm0},-8($xfer)
  670. add $ablo,$nmlo0,$nmlo0
  671. ldw 4($xfer),$ablo
  672. addc %r0,$nmhi0,$nmhi0
  673. ldw 0($xfer),$abhi
  674. add $hi1,$nmlo0,$nmlo0
  675. stws,ma $nmlo0,8($tp) ; tp[j-1]
  676. addib,<> 8,$idx,L\$inner_pa11 ; j++++
  677. addc %r0,$nmhi0,$hi1
  678. xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
  679. ldw 12($xfer),$nmlo1
  680. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
  681. ldw 8($xfer),$nmhi1
  682. add $hi0,$ablo,$ablo
  683. ldw 4($tp),$ti0 ; tp[j]
  684. addc %r0,$abhi,$abhi
  685. fstds ${fab1},0($xfer)
  686. add $ti1,$ablo,$ablo
  687. fstds ${fnm1},8($xfer)
  688. addc %r0,$abhi,$hi0
  689. ldw -16($xfer),$abhi
  690. add $ablo,$nmlo1,$nmlo1
  691. ldw -12($xfer),$ablo
  692. addc %r0,$nmhi1,$nmhi1
  693. ldw -8($xfer),$nmhi0
  694. add $hi1,$nmlo1,$nmlo1
  695. ldw -4($xfer),$nmlo0
  696. addc %r0,$nmhi1,$hi1
  697. add $hi0,$ablo,$ablo
  698. stw $nmlo1,-4($tp) ; tp[j-1]
  699. addc %r0,$abhi,$abhi
  700. add $ti0,$ablo,$ablo
  701. ldw 8($tp),$ti1 ; tp[j]
  702. addc %r0,$abhi,$hi0
  703. ldw 0($xfer),$abhi
  704. add $ablo,$nmlo0,$nmlo0
  705. ldw 4($xfer),$ablo
  706. addc %r0,$nmhi0,$nmhi0
  707. ldws,mb 8($xfer),$nmhi1
  708. add $hi1,$nmlo0,$nmlo0
  709. ldw 4($xfer),$nmlo1
  710. addc %r0,$nmhi0,$hi1
  711. stws,ma $nmlo0,8($tp) ; tp[j-1]
  712. addib,= -1,$num,L\$outerdone_pa11; i--
  713. subi 0,$arrsz,$idx ; j=0
  714. fldws,ma 4($bp),${fbi} ; bp[i]
  715. flddx $idx($ap),${fai} ; ap[0]
  716. add $hi0,$ablo,$ablo
  717. addc %r0,$abhi,$abhi
  718. flddx $idx($np),${fni} ; np[0]
  719. fldws 8($xfer),${fti}R ; tp[0]
  720. add $ti1,$ablo,$ablo
  721. addc %r0,$abhi,$hi0
  722. ldo 8($idx),$idx ; j++++
  723. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
  724. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
  725. ldw 4($tp),$ti0 ; tp[j]
  726. add $hi1,$nmlo1,$nmlo1
  727. addc %r0,$nmhi1,$nmhi1
  728. fstws,mb ${fab0}L,-8($xfer) ; save high part
  729. add $ablo,$nmlo1,$nmlo1
  730. addc %r0,$nmhi1,$hi1
  731. fcpy,sgl %fr0,${fti}L ; zero high part
  732. fcpy,sgl %fr0,${fab0}L
  733. stw $nmlo1,-4($tp) ; tp[j-1]
  734. fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
  735. fcnvxf,dbl,dbl ${fab0},${fab0}
  736. add $hi1,$hi0,$hi0
  737. addc %r0,%r0,$hi1
  738. fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
  739. add $ti0,$hi0,$hi0
  740. addc %r0,$hi1,$hi1
  741. fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
  742. stw $hi0,0($tp)
  743. stw $hi1,4($tp)
  744. xmpyu ${fn0},${fab0}R,${fm0}
  745. b L\$outer_pa11
  746. ldo `$LOCALS+32+4`($fp),$tp
  747. L\$outerdone_pa11
  748. add $hi0,$ablo,$ablo
  749. addc %r0,$abhi,$abhi
  750. add $ti1,$ablo,$ablo
  751. addc %r0,$abhi,$hi0
  752. ldw 4($tp),$ti0 ; tp[j]
  753. add $hi1,$nmlo1,$nmlo1
  754. addc %r0,$nmhi1,$nmhi1
  755. add $ablo,$nmlo1,$nmlo1
  756. addc %r0,$nmhi1,$hi1
  757. stw $nmlo1,-4($tp) ; tp[j-1]
  758. add $hi1,$hi0,$hi0
  759. addc %r0,%r0,$hi1
  760. add $ti0,$hi0,$hi0
  761. addc %r0,$hi1,$hi1
  762. stw $hi0,0($tp)
  763. stw $hi1,4($tp)
  764. ldo `$LOCALS+32+4`($fp),$tp
  765. sub %r0,%r0,%r0 ; clear borrow
  766. ldw -4($tp),$ti0
  767. addl $tp,$arrsz,$tp
  768. L\$sub_pa11
  769. ldwx $idx($np),$hi0
  770. subb $ti0,$hi0,$hi1
  771. ldwx $idx($tp),$ti0
  772. addib,<> 4,$idx,L\$sub_pa11
  773. stws,ma $hi1,4($rp)
  774. subb $ti0,%r0,$hi1
  775. ldo -4($tp),$tp
  776. and $tp,$hi1,$ap
  777. andcm $rp,$hi1,$bp
  778. or $ap,$bp,$np
  779. sub $rp,$arrsz,$rp ; rewind rp
  780. subi 0,$arrsz,$idx
  781. ldo `$LOCALS+32`($fp),$tp
  782. L\$copy_pa11
  783. ldwx $idx($np),$hi0
  784. stws,ma %r0,4($tp)
  785. addib,<> 4,$idx,L\$copy_pa11
  786. stws,ma $hi0,4($rp)
  787. nop ; alignment
  788. L\$done
  789. ___
  790. }
  791. $code.=<<___;
  792. ldi 1,%r28 ; signal "handled"
  793. ldo $FRAME($fp),%sp ; destroy tp[num+1]
  794. $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue
  795. $POP `-$FRAME+1*$SIZE_T`(%sp),%r4
  796. $POP `-$FRAME+2*$SIZE_T`(%sp),%r5
  797. $POP `-$FRAME+3*$SIZE_T`(%sp),%r6
  798. $POP `-$FRAME+4*$SIZE_T`(%sp),%r7
  799. $POP `-$FRAME+5*$SIZE_T`(%sp),%r8
  800. $POP `-$FRAME+6*$SIZE_T`(%sp),%r9
  801. $POP `-$FRAME+7*$SIZE_T`(%sp),%r10
  802. L\$abort
  803. bv (%r2)
  804. .EXIT
  805. $POPMB -$FRAME(%sp),%r3
  806. .PROCEND
  807. .STRINGZ "Montgomery Multiplication for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
  808. ___
  809. # Explicitly encode PA-RISC 2.0 instructions used in this module, so
  810. # that it can be compiled with .LEVEL 1.0. It should be noted that I
  811. # wouldn't have to do this, if GNU assembler understood .ALLOW 2.0
  812. # directive...
  813. my $ldd = sub {
  814. my ($mod,$args) = @_;
  815. my $orig = "ldd$mod\t$args";
  816. if ($args =~ /%r([0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 4
  817. { my $opcode=(0x03<<26)|($2<<21)|($1<<16)|(3<<6)|$3;
  818. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  819. }
  820. elsif ($args =~ /(\-?[0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 5
  821. { my $opcode=(0x03<<26)|($2<<21)|(1<<12)|(3<<6)|$3;
  822. $opcode|=(($1&0xF)<<17)|(($1&0x10)<<12); # encode offset
  823. $opcode|=(1<<5) if ($mod =~ /^,m/);
  824. $opcode|=(1<<13) if ($mod =~ /^,mb/);
  825. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  826. }
  827. else { "\t".$orig; }
  828. };
  829. my $std = sub {
  830. my ($mod,$args) = @_;
  831. my $orig = "std$mod\t$args";
  832. if ($args =~ /%r([0-9]+),(\-?[0-9]+)\(%r([0-9]+)\)/) # format 6
  833. { my $opcode=(0x03<<26)|($3<<21)|($1<<16)|(1<<12)|(0xB<<6);
  834. $opcode|=(($2&0xF)<<1)|(($2&0x10)>>4); # encode offset
  835. $opcode|=(1<<5) if ($mod =~ /^,m/);
  836. $opcode|=(1<<13) if ($mod =~ /^,mb/);
  837. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  838. }
  839. else { "\t".$orig; }
  840. };
  841. my $extrd = sub {
  842. my ($mod,$args) = @_;
  843. my $orig = "extrd$mod\t$args";
  844. # I only have ",u" completer, it's implicitly encoded...
  845. if ($args =~ /%r([0-9]+),([0-9]+),([0-9]+),%r([0-9]+)/) # format 15
  846. { my $opcode=(0x36<<26)|($1<<21)|($4<<16);
  847. my $len=32-$3;
  848. $opcode |= (($2&0x20)<<6)|(($2&0x1f)<<5); # encode pos
  849. $opcode |= (($len&0x20)<<7)|($len&0x1f); # encode len
  850. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  851. }
  852. elsif ($args =~ /%r([0-9]+),%sar,([0-9]+),%r([0-9]+)/) # format 12
  853. { my $opcode=(0x34<<26)|($1<<21)|($3<<16)|(2<<11)|(1<<9);
  854. my $len=32-$2;
  855. $opcode |= (($len&0x20)<<3)|($len&0x1f); # encode len
  856. $opcode |= (1<<13) if ($mod =~ /,\**=/);
  857. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  858. }
  859. else { "\t".$orig; }
  860. };
  861. my $shrpd = sub {
  862. my ($mod,$args) = @_;
  863. my $orig = "shrpd$mod\t$args";
  864. if ($args =~ /%r([0-9]+),%r([0-9]+),([0-9]+),%r([0-9]+)/) # format 14
  865. { my $opcode=(0x34<<26)|($2<<21)|($1<<16)|(1<<10)|$4;
  866. my $cpos=63-$3;
  867. $opcode |= (($cpos&0x20)<<6)|(($cpos&0x1f)<<5); # encode sa
  868. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  869. }
  870. else { "\t".$orig; }
  871. };
  872. my $sub = sub {
  873. my ($mod,$args) = @_;
  874. my $orig = "sub$mod\t$args";
  875. if ($mod eq ",db" && $args =~ /%r([0-9]+),%r([0-9]+),%r([0-9]+)/) {
  876. my $opcode=(0x02<<26)|($2<<21)|($1<<16)|$3;
  877. $opcode|=(1<<10); # e1
  878. $opcode|=(1<<8); # e2
  879. $opcode|=(1<<5); # d
  880. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig
  881. }
  882. else { "\t".$orig; }
  883. };
  884. sub assemble {
  885. my ($mnemonic,$mod,$args)=@_;
  886. my $opcode = eval("\$$mnemonic");
  887. ref($opcode) eq 'CODE' ? &$opcode($mod,$args) : "\t$mnemonic$mod\t$args";
  888. }
  889. foreach (split("\n",$code)) {
  890. s/\`([^\`]*)\`/eval $1/ge;
  891. # flip word order in 64-bit mode...
  892. s/(xmpyu\s+)($fai|$fni)([LR])/$1.$2.($3 eq "L"?"R":"L")/e if ($BN_SZ==8);
  893. # assemble 2.0 instructions in 32-bit mode...
  894. s/^\s+([a-z]+)([\S]*)\s+([\S]*)/&assemble($1,$2,$3)/e if ($BN_SZ==4);
  895. print $_,"\n";
  896. }
  897. close STDOUT;