2
0

parisc-mont.pl 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008
  1. #! /usr/bin/env perl
  2. # Copyright 2009-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. # ====================================================================
  9. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  10. # project. The module is, however, dual licensed under OpenSSL and
  11. # CRYPTOGAMS licenses depending on where you obtain it. For further
  12. # details see http://www.openssl.org/~appro/cryptogams/.
  13. # ====================================================================
  14. # On PA-7100LC this module performs ~90-50% better, less for longer
  15. # keys, than code generated by gcc 3.2 for PA-RISC 1.1. Latter means
  16. # that compiler utilized xmpyu instruction to perform 32x32=64-bit
  17. # multiplication, which in turn means that "baseline" performance was
  18. # optimal in respect to instruction set capabilities. Fair comparison
  19. # with vendor compiler is problematic, because OpenSSL doesn't define
  20. # BN_LLONG [presumably] for historical reasons, which drives compiler
  21. # toward 4 times 16x16=32-bit multiplications [plus complementary
  22. # shifts and additions] instead. This means that you should observe
  23. # several times improvement over code generated by vendor compiler
  24. # for PA-RISC 1.1, but the "baseline" is far from optimal. The actual
  25. # improvement coefficient was never collected on PA-7100LC, or any
  26. # other 1.1 CPU, because I don't have access to such machine with
  27. # vendor compiler. But to give you a taste, PA-RISC 1.1 code path
  28. # reportedly outperformed code generated by cc +DA1.1 +O3 by factor
  29. # of ~5x on PA-8600.
  30. #
  31. # On PA-RISC 2.0 it has to compete with pa-risc2[W].s, which is
  32. # reportedly ~2x faster than vendor compiler generated code [according
  33. # to comment in pa-risc2[W].s]. Here comes a catch. Execution core of
  34. # this implementation is actually 32-bit one, in the sense that it
  35. # operates on 32-bit values. But pa-risc2[W].s operates on arrays of
  36. # 64-bit BN_LONGs... How do they interoperate then? No problem. This
  37. # module picks halves of 64-bit values in reverse order and pretends
  38. # they were 32-bit BN_LONGs. But can 32-bit core compete with "pure"
  39. # 64-bit code such as pa-risc2[W].s then? Well, the thing is that
  40. # 32x32=64-bit multiplication is the best even PA-RISC 2.0 can do,
  41. # i.e. there is no "wider" multiplication like on most other 64-bit
  42. # platforms. This means that even being effectively 32-bit, this
  43. # implementation performs "64-bit" computational task in same amount
  44. # of arithmetic operations, most notably multiplications. It requires
  45. # more memory references, most notably to tp[num], but this doesn't
  46. # seem to exhaust memory port capacity. And indeed, dedicated PA-RISC
  47. # 2.0 code path provides virtually same performance as pa-risc2[W].s:
  48. # it's ~10% better for shortest key length and ~10% worse for longest
  49. # one.
  50. #
  51. # In case it wasn't clear. The module has two distinct code paths:
  52. # PA-RISC 1.1 and PA-RISC 2.0 ones. Latter features carry-free 64-bit
  53. # additions and 64-bit integer loads, not to mention specific
  54. # instruction scheduling. In 64-bit build naturally only 2.0 code path
  55. # is assembled. In 32-bit application context both code paths are
  56. # assembled, PA-RISC 2.0 CPU is detected at run-time and proper path
  57. # is taken automatically. Also, in 32-bit build the module imposes
  58. # couple of limitations: vector lengths has to be even and vector
  59. # addresses has to be 64-bit aligned. Normally neither is a problem:
  60. # most common key lengths are even and vectors are commonly malloc-ed,
  61. # which ensures alignment.
  62. #
  63. # Special thanks to polarhome.com for providing HP-UX account on
  64. # PA-RISC 1.1 machine, and to correspondent who chose to remain
  65. # anonymous for testing the code on PA-RISC 2.0 machine.
  66. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  67. # $output is the last argument if it looks like a file (it has an extension)
  68. # $flavour is the first argument if it doesn't look like a file
  69. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  70. $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  71. $output and open STDOUT,">$output";
  72. if ($flavour =~ /64/) {
  73. $LEVEL ="2.0W";
  74. $SIZE_T =8;
  75. $FRAME_MARKER =80;
  76. $SAVED_RP =16;
  77. $PUSH ="std";
  78. $PUSHMA ="std,ma";
  79. $POP ="ldd";
  80. $POPMB ="ldd,mb";
  81. $BN_SZ =$SIZE_T;
  82. } else {
  83. $LEVEL ="1.1"; #$LEVEL.="\n\t.ALLOW\t2.0";
  84. $SIZE_T =4;
  85. $FRAME_MARKER =48;
  86. $SAVED_RP =20;
  87. $PUSH ="stw";
  88. $PUSHMA ="stwm";
  89. $POP ="ldw";
  90. $POPMB ="ldwm";
  91. $BN_SZ =$SIZE_T;
  92. if (open CONF,"<${dir}../../opensslconf.h") {
  93. while(<CONF>) {
  94. if (m/#\s*define\s+SIXTY_FOUR_BIT/) {
  95. $BN_SZ=8;
  96. $LEVEL="2.0";
  97. last;
  98. }
  99. }
  100. close CONF;
  101. }
  102. }
  103. $FRAME=8*$SIZE_T+$FRAME_MARKER; # 8 saved regs + frame marker
  104. # [+ argument transfer]
  105. $LOCALS=$FRAME-$FRAME_MARKER;
  106. $FRAME+=32; # local variables
  107. $tp="%r31";
  108. $ti1="%r29";
  109. $ti0="%r28";
  110. $rp="%r26";
  111. $ap="%r25";
  112. $bp="%r24";
  113. $np="%r23";
  114. $n0="%r22"; # passed through stack in 32-bit
  115. $num="%r21"; # passed through stack in 32-bit
  116. $idx="%r20";
  117. $arrsz="%r19";
  118. $nm1="%r7";
  119. $nm0="%r6";
  120. $ab1="%r5";
  121. $ab0="%r4";
  122. $fp="%r3";
  123. $hi1="%r2";
  124. $hi0="%r1";
  125. $xfer=$n0; # accommodates [-16..15] offset in fld[dw]s
  126. $fm0="%fr4"; $fti=$fm0;
  127. $fbi="%fr5L";
  128. $fn0="%fr5R";
  129. $fai="%fr6"; $fab0="%fr7"; $fab1="%fr8";
  130. $fni="%fr9"; $fnm0="%fr10"; $fnm1="%fr11";
  131. $code=<<___;
  132. .LEVEL $LEVEL
  133. .SPACE \$TEXT\$
  134. .SUBSPA \$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
  135. .EXPORT bn_mul_mont,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR
  136. .ALIGN 64
  137. bn_mul_mont
  138. .PROC
  139. .CALLINFO FRAME=`$FRAME-8*$SIZE_T`,NO_CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=6
  140. .ENTRY
  141. $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue
  142. $PUSHMA %r3,$FRAME(%sp)
  143. $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp)
  144. $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp)
  145. $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp)
  146. $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp)
  147. $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp)
  148. $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp)
  149. $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp)
  150. ldo -$FRAME(%sp),$fp
  151. ___
  152. $code.=<<___ if ($SIZE_T==4);
  153. ldw `-$FRAME_MARKER-4`($fp),$n0
  154. ldw `-$FRAME_MARKER-8`($fp),$num
  155. nop
  156. nop ; alignment
  157. ___
  158. $code.=<<___ if ($BN_SZ==4);
  159. comiclr,<= 6,$num,%r0 ; are vectors long enough?
  160. b L\$abort
  161. ldi 0,%r28 ; signal "unhandled"
  162. add,ev %r0,$num,$num ; is $num even?
  163. b L\$abort
  164. nop
  165. or $ap,$np,$ti1
  166. extru,= $ti1,31,3,%r0 ; are ap and np 64-bit aligned?
  167. b L\$abort
  168. nop
  169. nop ; alignment
  170. nop
  171. fldws 0($n0),${fn0}
  172. fldws,ma 4($bp),${fbi} ; bp[0]
  173. ___
  174. $code.=<<___ if ($BN_SZ==8);
  175. comib,> 3,$num,L\$abort ; are vectors long enough?
  176. ldi 0,%r28 ; signal "unhandled"
  177. addl $num,$num,$num ; I operate on 32-bit values
  178. fldws 4($n0),${fn0} ; only low part of n0
  179. fldws 4($bp),${fbi} ; bp[0] in flipped word order
  180. ___
  181. $code.=<<___;
  182. fldds 0($ap),${fai} ; ap[0,1]
  183. fldds 0($np),${fni} ; np[0,1]
  184. sh2addl $num,%r0,$arrsz
  185. ldi 31,$hi0
  186. ldo 36($arrsz),$hi1 ; space for tp[num+1]
  187. andcm $hi1,$hi0,$hi1 ; align
  188. addl $hi1,%sp,%sp
  189. $PUSH $fp,-$SIZE_T(%sp)
  190. ldo `$LOCALS+16`($fp),$xfer
  191. ldo `$LOCALS+32+4`($fp),$tp
  192. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[0]
  193. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[0]
  194. xmpyu ${fn0},${fab0}R,${fm0}
  195. addl $arrsz,$ap,$ap ; point at the end
  196. addl $arrsz,$np,$np
  197. subi 0,$arrsz,$idx ; j=0
  198. ldo 8($idx),$idx ; j++++
  199. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
  200. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
  201. fstds ${fab0},-16($xfer)
  202. fstds ${fnm0},-8($xfer)
  203. fstds ${fab1},0($xfer)
  204. fstds ${fnm1},8($xfer)
  205. flddx $idx($ap),${fai} ; ap[2,3]
  206. flddx $idx($np),${fni} ; np[2,3]
  207. ___
  208. $code.=<<___ if ($BN_SZ==4);
  209. mtctl $hi0,%cr11 ; $hi0 still holds 31
  210. extrd,u,*= $hi0,%sar,1,$hi0 ; executes on PA-RISC 1.0
  211. b L\$parisc11
  212. nop
  213. ___
  214. $code.=<<___; # PA-RISC 2.0 code-path
  215. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
  216. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  217. ldd -16($xfer),$ab0
  218. fstds ${fab0},-16($xfer)
  219. extrd,u $ab0,31,32,$hi0
  220. extrd,u $ab0,63,32,$ab0
  221. ldd -8($xfer),$nm0
  222. fstds ${fnm0},-8($xfer)
  223. ldo 8($idx),$idx ; j++++
  224. addl $ab0,$nm0,$nm0 ; low part is discarded
  225. extrd,u $nm0,31,32,$hi1
  226. L\$1st
  227. xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
  228. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
  229. ldd 0($xfer),$ab1
  230. fstds ${fab1},0($xfer)
  231. addl $hi0,$ab1,$ab1
  232. extrd,u $ab1,31,32,$hi0
  233. ldd 8($xfer),$nm1
  234. fstds ${fnm1},8($xfer)
  235. extrd,u $ab1,63,32,$ab1
  236. addl $hi1,$nm1,$nm1
  237. flddx $idx($ap),${fai} ; ap[j,j+1]
  238. flddx $idx($np),${fni} ; np[j,j+1]
  239. addl $ab1,$nm1,$nm1
  240. extrd,u $nm1,31,32,$hi1
  241. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
  242. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  243. ldd -16($xfer),$ab0
  244. fstds ${fab0},-16($xfer)
  245. addl $hi0,$ab0,$ab0
  246. extrd,u $ab0,31,32,$hi0
  247. ldd -8($xfer),$nm0
  248. fstds ${fnm0},-8($xfer)
  249. extrd,u $ab0,63,32,$ab0
  250. addl $hi1,$nm0,$nm0
  251. stw $nm1,-4($tp) ; tp[j-1]
  252. addl $ab0,$nm0,$nm0
  253. stw,ma $nm0,8($tp) ; tp[j-1]
  254. addib,<> 8,$idx,L\$1st ; j++++
  255. extrd,u $nm0,31,32,$hi1
  256. xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
  257. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
  258. ldd 0($xfer),$ab1
  259. fstds ${fab1},0($xfer)
  260. addl $hi0,$ab1,$ab1
  261. extrd,u $ab1,31,32,$hi0
  262. ldd 8($xfer),$nm1
  263. fstds ${fnm1},8($xfer)
  264. extrd,u $ab1,63,32,$ab1
  265. addl $hi1,$nm1,$nm1
  266. ldd -16($xfer),$ab0
  267. addl $ab1,$nm1,$nm1
  268. ldd -8($xfer),$nm0
  269. extrd,u $nm1,31,32,$hi1
  270. addl $hi0,$ab0,$ab0
  271. extrd,u $ab0,31,32,$hi0
  272. stw $nm1,-4($tp) ; tp[j-1]
  273. extrd,u $ab0,63,32,$ab0
  274. addl $hi1,$nm0,$nm0
  275. ldd 0($xfer),$ab1
  276. addl $ab0,$nm0,$nm0
  277. ldd,mb 8($xfer),$nm1
  278. extrd,u $nm0,31,32,$hi1
  279. stw,ma $nm0,8($tp) ; tp[j-1]
  280. ldo -1($num),$num ; i--
  281. subi 0,$arrsz,$idx ; j=0
  282. ___
  283. $code.=<<___ if ($BN_SZ==4);
  284. fldws,ma 4($bp),${fbi} ; bp[1]
  285. ___
  286. $code.=<<___ if ($BN_SZ==8);
  287. fldws 0($bp),${fbi} ; bp[1] in flipped word order
  288. ___
  289. $code.=<<___;
  290. flddx $idx($ap),${fai} ; ap[0,1]
  291. flddx $idx($np),${fni} ; np[0,1]
  292. fldws 8($xfer),${fti}R ; tp[0]
  293. addl $hi0,$ab1,$ab1
  294. extrd,u $ab1,31,32,$hi0
  295. extrd,u $ab1,63,32,$ab1
  296. ldo 8($idx),$idx ; j++++
  297. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
  298. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
  299. addl $hi1,$nm1,$nm1
  300. addl $ab1,$nm1,$nm1
  301. extrd,u $nm1,31,32,$hi1
  302. fstws,mb ${fab0}L,-8($xfer) ; save high part
  303. stw $nm1,-4($tp) ; tp[j-1]
  304. fcpy,sgl %fr0,${fti}L ; zero high part
  305. fcpy,sgl %fr0,${fab0}L
  306. addl $hi1,$hi0,$hi0
  307. extrd,u $hi0,31,32,$hi1
  308. fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
  309. fcnvxf,dbl,dbl ${fab0},${fab0}
  310. stw $hi0,0($tp)
  311. stw $hi1,4($tp)
  312. fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
  313. fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
  314. xmpyu ${fn0},${fab0}R,${fm0}
  315. ldo `$LOCALS+32+4`($fp),$tp
  316. L\$outer
  317. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
  318. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
  319. fstds ${fab0},-16($xfer) ; 33-bit value
  320. fstds ${fnm0},-8($xfer)
  321. flddx $idx($ap),${fai} ; ap[2]
  322. flddx $idx($np),${fni} ; np[2]
  323. ldo 8($idx),$idx ; j++++
  324. ldd -16($xfer),$ab0 ; 33-bit value
  325. ldd -8($xfer),$nm0
  326. ldw 0($xfer),$hi0 ; high part
  327. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
  328. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  329. extrd,u $ab0,31,32,$ti0 ; carry bit
  330. extrd,u $ab0,63,32,$ab0
  331. fstds ${fab1},0($xfer)
  332. addl $ti0,$hi0,$hi0 ; account carry bit
  333. fstds ${fnm1},8($xfer)
  334. addl $ab0,$nm0,$nm0 ; low part is discarded
  335. ldw 0($tp),$ti1 ; tp[1]
  336. extrd,u $nm0,31,32,$hi1
  337. fstds ${fab0},-16($xfer)
  338. fstds ${fnm0},-8($xfer)
  339. L\$inner
  340. xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
  341. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
  342. ldd 0($xfer),$ab1
  343. fstds ${fab1},0($xfer)
  344. addl $hi0,$ti1,$ti1
  345. addl $ti1,$ab1,$ab1
  346. ldd 8($xfer),$nm1
  347. fstds ${fnm1},8($xfer)
  348. extrd,u $ab1,31,32,$hi0
  349. extrd,u $ab1,63,32,$ab1
  350. flddx $idx($ap),${fai} ; ap[j,j+1]
  351. flddx $idx($np),${fni} ; np[j,j+1]
  352. addl $hi1,$nm1,$nm1
  353. addl $ab1,$nm1,$nm1
  354. ldw 4($tp),$ti0 ; tp[j]
  355. stw $nm1,-4($tp) ; tp[j-1]
  356. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
  357. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  358. ldd -16($xfer),$ab0
  359. fstds ${fab0},-16($xfer)
  360. addl $hi0,$ti0,$ti0
  361. addl $ti0,$ab0,$ab0
  362. ldd -8($xfer),$nm0
  363. fstds ${fnm0},-8($xfer)
  364. extrd,u $ab0,31,32,$hi0
  365. extrd,u $nm1,31,32,$hi1
  366. ldw 8($tp),$ti1 ; tp[j]
  367. extrd,u $ab0,63,32,$ab0
  368. addl $hi1,$nm0,$nm0
  369. addl $ab0,$nm0,$nm0
  370. stw,ma $nm0,8($tp) ; tp[j-1]
  371. addib,<> 8,$idx,L\$inner ; j++++
  372. extrd,u $nm0,31,32,$hi1
  373. xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
  374. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
  375. ldd 0($xfer),$ab1
  376. fstds ${fab1},0($xfer)
  377. addl $hi0,$ti1,$ti1
  378. addl $ti1,$ab1,$ab1
  379. ldd 8($xfer),$nm1
  380. fstds ${fnm1},8($xfer)
  381. extrd,u $ab1,31,32,$hi0
  382. extrd,u $ab1,63,32,$ab1
  383. ldw 4($tp),$ti0 ; tp[j]
  384. addl $hi1,$nm1,$nm1
  385. addl $ab1,$nm1,$nm1
  386. ldd -16($xfer),$ab0
  387. ldd -8($xfer),$nm0
  388. extrd,u $nm1,31,32,$hi1
  389. addl $hi0,$ab0,$ab0
  390. addl $ti0,$ab0,$ab0
  391. stw $nm1,-4($tp) ; tp[j-1]
  392. extrd,u $ab0,31,32,$hi0
  393. ldw 8($tp),$ti1 ; tp[j]
  394. extrd,u $ab0,63,32,$ab0
  395. addl $hi1,$nm0,$nm0
  396. ldd 0($xfer),$ab1
  397. addl $ab0,$nm0,$nm0
  398. ldd,mb 8($xfer),$nm1
  399. extrd,u $nm0,31,32,$hi1
  400. stw,ma $nm0,8($tp) ; tp[j-1]
  401. addib,= -1,$num,L\$outerdone ; i--
  402. subi 0,$arrsz,$idx ; j=0
  403. ___
  404. $code.=<<___ if ($BN_SZ==4);
  405. fldws,ma 4($bp),${fbi} ; bp[i]
  406. ___
  407. $code.=<<___ if ($BN_SZ==8);
  408. ldi 12,$ti0 ; bp[i] in flipped word order
  409. addl,ev %r0,$num,$num
  410. ldi -4,$ti0
  411. addl $ti0,$bp,$bp
  412. fldws 0($bp),${fbi}
  413. ___
  414. $code.=<<___;
  415. flddx $idx($ap),${fai} ; ap[0]
  416. addl $hi0,$ab1,$ab1
  417. flddx $idx($np),${fni} ; np[0]
  418. fldws 8($xfer),${fti}R ; tp[0]
  419. addl $ti1,$ab1,$ab1
  420. extrd,u $ab1,31,32,$hi0
  421. extrd,u $ab1,63,32,$ab1
  422. ldo 8($idx),$idx ; j++++
  423. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
  424. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
  425. ldw 4($tp),$ti0 ; tp[j]
  426. addl $hi1,$nm1,$nm1
  427. fstws,mb ${fab0}L,-8($xfer) ; save high part
  428. addl $ab1,$nm1,$nm1
  429. extrd,u $nm1,31,32,$hi1
  430. fcpy,sgl %fr0,${fti}L ; zero high part
  431. fcpy,sgl %fr0,${fab0}L
  432. stw $nm1,-4($tp) ; tp[j-1]
  433. fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
  434. fcnvxf,dbl,dbl ${fab0},${fab0}
  435. addl $hi1,$hi0,$hi0
  436. fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
  437. addl $ti0,$hi0,$hi0
  438. extrd,u $hi0,31,32,$hi1
  439. fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
  440. stw $hi0,0($tp)
  441. stw $hi1,4($tp)
  442. xmpyu ${fn0},${fab0}R,${fm0}
  443. b L\$outer
  444. ldo `$LOCALS+32+4`($fp),$tp
  445. L\$outerdone
  446. addl $hi0,$ab1,$ab1
  447. addl $ti1,$ab1,$ab1
  448. extrd,u $ab1,31,32,$hi0
  449. extrd,u $ab1,63,32,$ab1
  450. ldw 4($tp),$ti0 ; tp[j]
  451. addl $hi1,$nm1,$nm1
  452. addl $ab1,$nm1,$nm1
  453. extrd,u $nm1,31,32,$hi1
  454. stw $nm1,-4($tp) ; tp[j-1]
  455. addl $hi1,$hi0,$hi0
  456. addl $ti0,$hi0,$hi0
  457. extrd,u $hi0,31,32,$hi1
  458. stw $hi0,0($tp)
  459. stw $hi1,4($tp)
  460. ldo `$LOCALS+32`($fp),$tp
  461. sub %r0,%r0,%r0 ; clear borrow
  462. ___
  463. $code.=<<___ if ($BN_SZ==4);
  464. ldws,ma 4($tp),$ti0
  465. extru,= $rp,31,3,%r0 ; is rp 64-bit aligned?
  466. b L\$sub_pa11
  467. addl $tp,$arrsz,$tp
  468. L\$sub
  469. ldwx $idx($np),$hi0
  470. subb $ti0,$hi0,$hi1
  471. ldwx $idx($tp),$ti0
  472. addib,<> 4,$idx,L\$sub
  473. stws,ma $hi1,4($rp)
  474. subb $ti0,%r0,$hi1
  475. ___
  476. $code.=<<___ if ($BN_SZ==8);
  477. ldd,ma 8($tp),$ti0
  478. L\$sub
  479. ldd $idx($np),$hi0
  480. shrpd $ti0,$ti0,32,$ti0 ; flip word order
  481. std $ti0,-8($tp) ; save flipped value
  482. sub,db $ti0,$hi0,$hi1
  483. ldd,ma 8($tp),$ti0
  484. addib,<> 8,$idx,L\$sub
  485. std,ma $hi1,8($rp)
  486. extrd,u $ti0,31,32,$ti0 ; carry in flipped word order
  487. sub,db $ti0,%r0,$hi1
  488. ___
  489. $code.=<<___;
  490. ldo `$LOCALS+32`($fp),$tp
  491. sub $rp,$arrsz,$rp ; rewind rp
  492. subi 0,$arrsz,$idx
  493. L\$copy
  494. ldd 0($tp),$ti0
  495. ldd 0($rp),$hi0
  496. std,ma %r0,8($tp)
  497. comiclr,= 0,$hi1,%r0
  498. copy $ti0,$hi0
  499. addib,<> 8,$idx,L\$copy
  500. std,ma $hi0,8($rp)
  501. ___
  502. if ($BN_SZ==4) { # PA-RISC 1.1 code-path
  503. $ablo=$ab0;
  504. $abhi=$ab1;
  505. $nmlo0=$nm0;
  506. $nmhi0=$nm1;
  507. $nmlo1="%r9";
  508. $nmhi1="%r8";
  509. $code.=<<___;
  510. b L\$done
  511. nop
  512. .ALIGN 8
  513. L\$parisc11
  514. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
  515. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  516. ldw -12($xfer),$ablo
  517. ldw -16($xfer),$hi0
  518. ldw -4($xfer),$nmlo0
  519. ldw -8($xfer),$nmhi0
  520. fstds ${fab0},-16($xfer)
  521. fstds ${fnm0},-8($xfer)
  522. ldo 8($idx),$idx ; j++++
  523. add $ablo,$nmlo0,$nmlo0 ; discarded
  524. addc %r0,$nmhi0,$hi1
  525. ldw 4($xfer),$ablo
  526. ldw 0($xfer),$abhi
  527. nop
  528. L\$1st_pa11
  529. xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
  530. flddx $idx($ap),${fai} ; ap[j,j+1]
  531. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
  532. flddx $idx($np),${fni} ; np[j,j+1]
  533. add $hi0,$ablo,$ablo
  534. ldw 12($xfer),$nmlo1
  535. addc %r0,$abhi,$hi0
  536. ldw 8($xfer),$nmhi1
  537. add $ablo,$nmlo1,$nmlo1
  538. fstds ${fab1},0($xfer)
  539. addc %r0,$nmhi1,$nmhi1
  540. fstds ${fnm1},8($xfer)
  541. add $hi1,$nmlo1,$nmlo1
  542. ldw -12($xfer),$ablo
  543. addc %r0,$nmhi1,$hi1
  544. ldw -16($xfer),$abhi
  545. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
  546. ldw -4($xfer),$nmlo0
  547. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  548. ldw -8($xfer),$nmhi0
  549. add $hi0,$ablo,$ablo
  550. stw $nmlo1,-4($tp) ; tp[j-1]
  551. addc %r0,$abhi,$hi0
  552. fstds ${fab0},-16($xfer)
  553. add $ablo,$nmlo0,$nmlo0
  554. fstds ${fnm0},-8($xfer)
  555. addc %r0,$nmhi0,$nmhi0
  556. ldw 0($xfer),$abhi
  557. add $hi1,$nmlo0,$nmlo0
  558. ldw 4($xfer),$ablo
  559. stws,ma $nmlo0,8($tp) ; tp[j-1]
  560. addib,<> 8,$idx,L\$1st_pa11 ; j++++
  561. addc %r0,$nmhi0,$hi1
  562. ldw 8($xfer),$nmhi1
  563. ldw 12($xfer),$nmlo1
  564. xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
  565. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
  566. add $hi0,$ablo,$ablo
  567. fstds ${fab1},0($xfer)
  568. addc %r0,$abhi,$hi0
  569. fstds ${fnm1},8($xfer)
  570. add $ablo,$nmlo1,$nmlo1
  571. ldw -16($xfer),$abhi
  572. addc %r0,$nmhi1,$nmhi1
  573. ldw -12($xfer),$ablo
  574. add $hi1,$nmlo1,$nmlo1
  575. ldw -8($xfer),$nmhi0
  576. addc %r0,$nmhi1,$hi1
  577. ldw -4($xfer),$nmlo0
  578. add $hi0,$ablo,$ablo
  579. stw $nmlo1,-4($tp) ; tp[j-1]
  580. addc %r0,$abhi,$hi0
  581. ldw 0($xfer),$abhi
  582. add $ablo,$nmlo0,$nmlo0
  583. ldw 4($xfer),$ablo
  584. addc %r0,$nmhi0,$nmhi0
  585. ldws,mb 8($xfer),$nmhi1
  586. add $hi1,$nmlo0,$nmlo0
  587. ldw 4($xfer),$nmlo1
  588. addc %r0,$nmhi0,$hi1
  589. stws,ma $nmlo0,8($tp) ; tp[j-1]
  590. ldo -1($num),$num ; i--
  591. subi 0,$arrsz,$idx ; j=0
  592. fldws,ma 4($bp),${fbi} ; bp[1]
  593. flddx $idx($ap),${fai} ; ap[0,1]
  594. flddx $idx($np),${fni} ; np[0,1]
  595. fldws 8($xfer),${fti}R ; tp[0]
  596. add $hi0,$ablo,$ablo
  597. addc %r0,$abhi,$hi0
  598. ldo 8($idx),$idx ; j++++
  599. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
  600. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
  601. add $hi1,$nmlo1,$nmlo1
  602. addc %r0,$nmhi1,$nmhi1
  603. add $ablo,$nmlo1,$nmlo1
  604. addc %r0,$nmhi1,$hi1
  605. fstws,mb ${fab0}L,-8($xfer) ; save high part
  606. stw $nmlo1,-4($tp) ; tp[j-1]
  607. fcpy,sgl %fr0,${fti}L ; zero high part
  608. fcpy,sgl %fr0,${fab0}L
  609. add $hi1,$hi0,$hi0
  610. addc %r0,%r0,$hi1
  611. fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
  612. fcnvxf,dbl,dbl ${fab0},${fab0}
  613. stw $hi0,0($tp)
  614. stw $hi1,4($tp)
  615. fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
  616. fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
  617. xmpyu ${fn0},${fab0}R,${fm0}
  618. ldo `$LOCALS+32+4`($fp),$tp
  619. L\$outer_pa11
  620. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
  621. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
  622. fstds ${fab0},-16($xfer) ; 33-bit value
  623. fstds ${fnm0},-8($xfer)
  624. flddx $idx($ap),${fai} ; ap[2,3]
  625. flddx $idx($np),${fni} ; np[2,3]
  626. ldw -16($xfer),$abhi ; carry bit actually
  627. ldo 8($idx),$idx ; j++++
  628. ldw -12($xfer),$ablo
  629. ldw -8($xfer),$nmhi0
  630. ldw -4($xfer),$nmlo0
  631. ldw 0($xfer),$hi0 ; high part
  632. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
  633. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  634. fstds ${fab1},0($xfer)
  635. addl $abhi,$hi0,$hi0 ; account carry bit
  636. fstds ${fnm1},8($xfer)
  637. add $ablo,$nmlo0,$nmlo0 ; discarded
  638. ldw 0($tp),$ti1 ; tp[1]
  639. addc %r0,$nmhi0,$hi1
  640. fstds ${fab0},-16($xfer)
  641. fstds ${fnm0},-8($xfer)
  642. ldw 4($xfer),$ablo
  643. ldw 0($xfer),$abhi
  644. L\$inner_pa11
  645. xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
  646. flddx $idx($ap),${fai} ; ap[j,j+1]
  647. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
  648. flddx $idx($np),${fni} ; np[j,j+1]
  649. add $hi0,$ablo,$ablo
  650. ldw 4($tp),$ti0 ; tp[j]
  651. addc %r0,$abhi,$abhi
  652. ldw 12($xfer),$nmlo1
  653. add $ti1,$ablo,$ablo
  654. ldw 8($xfer),$nmhi1
  655. addc %r0,$abhi,$hi0
  656. fstds ${fab1},0($xfer)
  657. add $ablo,$nmlo1,$nmlo1
  658. fstds ${fnm1},8($xfer)
  659. addc %r0,$nmhi1,$nmhi1
  660. ldw -12($xfer),$ablo
  661. add $hi1,$nmlo1,$nmlo1
  662. ldw -16($xfer),$abhi
  663. addc %r0,$nmhi1,$hi1
  664. xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
  665. ldw 8($tp),$ti1 ; tp[j]
  666. xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
  667. ldw -4($xfer),$nmlo0
  668. add $hi0,$ablo,$ablo
  669. ldw -8($xfer),$nmhi0
  670. addc %r0,$abhi,$abhi
  671. stw $nmlo1,-4($tp) ; tp[j-1]
  672. add $ti0,$ablo,$ablo
  673. fstds ${fab0},-16($xfer)
  674. addc %r0,$abhi,$hi0
  675. fstds ${fnm0},-8($xfer)
  676. add $ablo,$nmlo0,$nmlo0
  677. ldw 4($xfer),$ablo
  678. addc %r0,$nmhi0,$nmhi0
  679. ldw 0($xfer),$abhi
  680. add $hi1,$nmlo0,$nmlo0
  681. stws,ma $nmlo0,8($tp) ; tp[j-1]
  682. addib,<> 8,$idx,L\$inner_pa11 ; j++++
  683. addc %r0,$nmhi0,$hi1
  684. xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
  685. ldw 12($xfer),$nmlo1
  686. xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
  687. ldw 8($xfer),$nmhi1
  688. add $hi0,$ablo,$ablo
  689. ldw 4($tp),$ti0 ; tp[j]
  690. addc %r0,$abhi,$abhi
  691. fstds ${fab1},0($xfer)
  692. add $ti1,$ablo,$ablo
  693. fstds ${fnm1},8($xfer)
  694. addc %r0,$abhi,$hi0
  695. ldw -16($xfer),$abhi
  696. add $ablo,$nmlo1,$nmlo1
  697. ldw -12($xfer),$ablo
  698. addc %r0,$nmhi1,$nmhi1
  699. ldw -8($xfer),$nmhi0
  700. add $hi1,$nmlo1,$nmlo1
  701. ldw -4($xfer),$nmlo0
  702. addc %r0,$nmhi1,$hi1
  703. add $hi0,$ablo,$ablo
  704. stw $nmlo1,-4($tp) ; tp[j-1]
  705. addc %r0,$abhi,$abhi
  706. add $ti0,$ablo,$ablo
  707. ldw 8($tp),$ti1 ; tp[j]
  708. addc %r0,$abhi,$hi0
  709. ldw 0($xfer),$abhi
  710. add $ablo,$nmlo0,$nmlo0
  711. ldw 4($xfer),$ablo
  712. addc %r0,$nmhi0,$nmhi0
  713. ldws,mb 8($xfer),$nmhi1
  714. add $hi1,$nmlo0,$nmlo0
  715. ldw 4($xfer),$nmlo1
  716. addc %r0,$nmhi0,$hi1
  717. stws,ma $nmlo0,8($tp) ; tp[j-1]
  718. addib,= -1,$num,L\$outerdone_pa11; i--
  719. subi 0,$arrsz,$idx ; j=0
  720. fldws,ma 4($bp),${fbi} ; bp[i]
  721. flddx $idx($ap),${fai} ; ap[0]
  722. add $hi0,$ablo,$ablo
  723. addc %r0,$abhi,$abhi
  724. flddx $idx($np),${fni} ; np[0]
  725. fldws 8($xfer),${fti}R ; tp[0]
  726. add $ti1,$ablo,$ablo
  727. addc %r0,$abhi,$hi0
  728. ldo 8($idx),$idx ; j++++
  729. xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
  730. xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
  731. ldw 4($tp),$ti0 ; tp[j]
  732. add $hi1,$nmlo1,$nmlo1
  733. addc %r0,$nmhi1,$nmhi1
  734. fstws,mb ${fab0}L,-8($xfer) ; save high part
  735. add $ablo,$nmlo1,$nmlo1
  736. addc %r0,$nmhi1,$hi1
  737. fcpy,sgl %fr0,${fti}L ; zero high part
  738. fcpy,sgl %fr0,${fab0}L
  739. stw $nmlo1,-4($tp) ; tp[j-1]
  740. fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
  741. fcnvxf,dbl,dbl ${fab0},${fab0}
  742. add $hi1,$hi0,$hi0
  743. addc %r0,%r0,$hi1
  744. fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
  745. add $ti0,$hi0,$hi0
  746. addc %r0,$hi1,$hi1
  747. fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
  748. stw $hi0,0($tp)
  749. stw $hi1,4($tp)
  750. xmpyu ${fn0},${fab0}R,${fm0}
  751. b L\$outer_pa11
  752. ldo `$LOCALS+32+4`($fp),$tp
  753. L\$outerdone_pa11
  754. add $hi0,$ablo,$ablo
  755. addc %r0,$abhi,$abhi
  756. add $ti1,$ablo,$ablo
  757. addc %r0,$abhi,$hi0
  758. ldw 4($tp),$ti0 ; tp[j]
  759. add $hi1,$nmlo1,$nmlo1
  760. addc %r0,$nmhi1,$nmhi1
  761. add $ablo,$nmlo1,$nmlo1
  762. addc %r0,$nmhi1,$hi1
  763. stw $nmlo1,-4($tp) ; tp[j-1]
  764. add $hi1,$hi0,$hi0
  765. addc %r0,%r0,$hi1
  766. add $ti0,$hi0,$hi0
  767. addc %r0,$hi1,$hi1
  768. stw $hi0,0($tp)
  769. stw $hi1,4($tp)
  770. ldo `$LOCALS+32+4`($fp),$tp
  771. sub %r0,%r0,%r0 ; clear borrow
  772. ldw -4($tp),$ti0
  773. addl $tp,$arrsz,$tp
  774. L\$sub_pa11
  775. ldwx $idx($np),$hi0
  776. subb $ti0,$hi0,$hi1
  777. ldwx $idx($tp),$ti0
  778. addib,<> 4,$idx,L\$sub_pa11
  779. stws,ma $hi1,4($rp)
  780. subb $ti0,%r0,$hi1
  781. ldo `$LOCALS+32`($fp),$tp
  782. sub $rp,$arrsz,$rp ; rewind rp
  783. subi 0,$arrsz,$idx
  784. L\$copy_pa11
  785. ldw 0($tp),$ti0
  786. ldw 0($rp),$hi0
  787. stws,ma %r0,4($tp)
  788. comiclr,= 0,$hi1,%r0
  789. copy $ti0,$hi0
  790. addib,<> 4,$idx,L\$copy_pa11
  791. stws,ma $hi0,4($rp)
  792. nop ; alignment
  793. L\$done
  794. ___
  795. }
  796. $code.=<<___;
  797. ldi 1,%r28 ; signal "handled"
  798. ldo $FRAME($fp),%sp ; destroy tp[num+1]
  799. $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue
  800. $POP `-$FRAME+1*$SIZE_T`(%sp),%r4
  801. $POP `-$FRAME+2*$SIZE_T`(%sp),%r5
  802. $POP `-$FRAME+3*$SIZE_T`(%sp),%r6
  803. $POP `-$FRAME+4*$SIZE_T`(%sp),%r7
  804. $POP `-$FRAME+5*$SIZE_T`(%sp),%r8
  805. $POP `-$FRAME+6*$SIZE_T`(%sp),%r9
  806. $POP `-$FRAME+7*$SIZE_T`(%sp),%r10
  807. L\$abort
  808. bv (%r2)
  809. .EXIT
  810. $POPMB -$FRAME(%sp),%r3
  811. .PROCEND
  812. .STRINGZ "Montgomery Multiplication for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
  813. ___
  814. # Explicitly encode PA-RISC 2.0 instructions used in this module, so
  815. # that it can be compiled with .LEVEL 1.0. It should be noted that I
  816. # wouldn't have to do this, if GNU assembler understood .ALLOW 2.0
  817. # directive...
  818. my $ldd = sub {
  819. my ($mod,$args) = @_;
  820. my $orig = "ldd$mod\t$args";
  821. if ($args =~ /%r([0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 4
  822. { my $opcode=(0x03<<26)|($2<<21)|($1<<16)|(3<<6)|$3;
  823. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  824. }
  825. elsif ($args =~ /(\-?[0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 5
  826. { my $opcode=(0x03<<26)|($2<<21)|(1<<12)|(3<<6)|$3;
  827. $opcode|=(($1&0xF)<<17)|(($1&0x10)<<12); # encode offset
  828. $opcode|=(1<<5) if ($mod =~ /^,m/);
  829. $opcode|=(1<<13) if ($mod =~ /^,mb/);
  830. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  831. }
  832. else { "\t".$orig; }
  833. };
  834. my $std = sub {
  835. my ($mod,$args) = @_;
  836. my $orig = "std$mod\t$args";
  837. if ($args =~ /%r([0-9]+),(\-?[0-9]+)\(%r([0-9]+)\)/) # format 6
  838. { my $opcode=(0x03<<26)|($3<<21)|($1<<16)|(1<<12)|(0xB<<6);
  839. $opcode|=(($2&0xF)<<1)|(($2&0x10)>>4); # encode offset
  840. $opcode|=(1<<5) if ($mod =~ /^,m/);
  841. $opcode|=(1<<13) if ($mod =~ /^,mb/);
  842. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  843. }
  844. else { "\t".$orig; }
  845. };
  846. my $extrd = sub {
  847. my ($mod,$args) = @_;
  848. my $orig = "extrd$mod\t$args";
  849. # I only have ",u" completer, it's implicitly encoded...
  850. if ($args =~ /%r([0-9]+),([0-9]+),([0-9]+),%r([0-9]+)/) # format 15
  851. { my $opcode=(0x36<<26)|($1<<21)|($4<<16);
  852. my $len=32-$3;
  853. $opcode |= (($2&0x20)<<6)|(($2&0x1f)<<5); # encode pos
  854. $opcode |= (($len&0x20)<<7)|($len&0x1f); # encode len
  855. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  856. }
  857. elsif ($args =~ /%r([0-9]+),%sar,([0-9]+),%r([0-9]+)/) # format 12
  858. { my $opcode=(0x34<<26)|($1<<21)|($3<<16)|(2<<11)|(1<<9);
  859. my $len=32-$2;
  860. $opcode |= (($len&0x20)<<3)|($len&0x1f); # encode len
  861. $opcode |= (1<<13) if ($mod =~ /,\**=/);
  862. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  863. }
  864. else { "\t".$orig; }
  865. };
  866. my $shrpd = sub {
  867. my ($mod,$args) = @_;
  868. my $orig = "shrpd$mod\t$args";
  869. if ($args =~ /%r([0-9]+),%r([0-9]+),([0-9]+),%r([0-9]+)/) # format 14
  870. { my $opcode=(0x34<<26)|($2<<21)|($1<<16)|(1<<10)|$4;
  871. my $cpos=63-$3;
  872. $opcode |= (($cpos&0x20)<<6)|(($cpos&0x1f)<<5); # encode sa
  873. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
  874. }
  875. else { "\t".$orig; }
  876. };
  877. my $sub = sub {
  878. my ($mod,$args) = @_;
  879. my $orig = "sub$mod\t$args";
  880. if ($mod eq ",db" && $args =~ /%r([0-9]+),%r([0-9]+),%r([0-9]+)/) {
  881. my $opcode=(0x02<<26)|($2<<21)|($1<<16)|$3;
  882. $opcode|=(1<<10); # e1
  883. $opcode|=(1<<8); # e2
  884. $opcode|=(1<<5); # d
  885. sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig
  886. }
  887. else { "\t".$orig; }
  888. };
  889. sub assemble {
  890. my ($mnemonic,$mod,$args)=@_;
  891. my $opcode = eval("\$$mnemonic");
  892. ref($opcode) eq 'CODE' ? &$opcode($mod,$args) : "\t$mnemonic$mod\t$args";
  893. }
  894. if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  895. =~ /GNU assembler/) {
  896. $gnuas = 1;
  897. }
  898. foreach (split("\n",$code)) {
  899. s/\`([^\`]*)\`/eval $1/ge;
  900. # flip word order in 64-bit mode...
  901. s/(xmpyu\s+)($fai|$fni)([LR])/$1.$2.($3 eq "L"?"R":"L")/e if ($BN_SZ==8);
  902. # assemble 2.0 instructions in 32-bit mode...
  903. s/^\s+([a-z]+)([\S]*)\s+([\S]*)/&assemble($1,$2,$3)/e if ($BN_SZ==4);
  904. s/(\.LEVEL\s+2\.0)W/$1w/ if ($gnuas && $SIZE_T==8);
  905. s/\.SPACE\s+\$TEXT\$/.text/ if ($gnuas && $SIZE_T==8);
  906. s/\.SUBSPA.*// if ($gnuas && $SIZE_T==8);
  907. s/\bbv\b/bve/ if ($SIZE_T==8);
  908. print $_,"\n";
  909. }
  910. close STDOUT or die "error closing STDOUT: $!";