ia64-mont.pl 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. # January 2010
  10. #
  11. # "Teaser" Montgomery multiplication module for IA-64. There are
  12. # several possibilities for improvement:
  13. #
  14. # - modulo-scheduling outer loop would eliminate quite a number of
  15. # stalls after ldf8, xma and getf.sig outside inner loop and
  16. # improve shorter key performance;
  17. # - shorter vector support [with input vectors being fetched only
  18. # once] should be added;
  19. # - 2x unroll with help of n0[1] would make the code scalable on
  20. # "wider" IA-64, "wider" than Itanium 2 that is, which is not of
  21. # acute interest, because upcoming Tukwila's individual cores are
  22. # reportedly based on Itanium 2 design;
  23. # - dedicated squaring procedure(?);
  24. #
  25. # January 2010
  26. #
  27. # Shorter vector support is implemented by zero-padding ap and np
  28. # vectors up to 8 elements, or 512 bits. This means that 256-bit
  29. # inputs will be processed only 2 times faster than 512-bit inputs,
  30. # not 4 [as one would expect, because algorithm complexity is n^2].
  31. # The reason for padding is that inputs shorter than 512 bits won't
  32. # be processed faster anyway, because minimal critical path of the
  33. # core loop happens to match 512-bit timing. Either way, it resulted
  34. # in >100% improvement of 512-bit RSA sign benchmark and 50% - of
  35. # 1024-bit one [in comparison to original version of *this* module].
  36. #
  37. # So far 'openssl speed rsa dsa' output on 900MHz Itanium 2 *with*
  38. # this module is:
  39. # sign verify sign/s verify/s
  40. # rsa 512 bits 0.000290s 0.000024s 3452.8 42031.4
  41. # rsa 1024 bits 0.000793s 0.000058s 1261.7 17172.0
  42. # rsa 2048 bits 0.005908s 0.000148s 169.3 6754.0
  43. # rsa 4096 bits 0.033456s 0.000469s 29.9 2133.6
  44. # dsa 512 bits 0.000253s 0.000198s 3949.9 5057.0
  45. # dsa 1024 bits 0.000585s 0.000607s 1708.4 1647.4
  46. # dsa 2048 bits 0.001453s 0.001703s 688.1 587.4
  47. #
  48. # ... and *without* (but still with ia64.S):
  49. #
  50. # rsa 512 bits 0.000670s 0.000041s 1491.8 24145.5
  51. # rsa 1024 bits 0.001988s 0.000080s 502.9 12499.3
  52. # rsa 2048 bits 0.008702s 0.000189s 114.9 5293.9
  53. # rsa 4096 bits 0.043860s 0.000533s 22.8 1875.9
  54. # dsa 512 bits 0.000441s 0.000427s 2265.3 2340.6
  55. # dsa 1024 bits 0.000823s 0.000867s 1215.6 1153.2
  56. # dsa 2048 bits 0.001894s 0.002179s 528.1 458.9
  57. #
  58. # As it can be seen, RSA sign performance improves by 130-30%,
  59. # hereafter less for longer keys, while verify - by 74-13%.
  60. # DSA performance improves by 115-30%.
  61. if ($^O eq "hpux") {
  62. $ADDP="addp4";
  63. for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); }
  64. } else { $ADDP="add"; }
  65. $code=<<___;
  66. .explicit
  67. .text
  68. // int bn_mul_mont (BN_ULONG *rp,const BN_ULONG *ap,
  69. // const BN_ULONG *bp,const BN_ULONG *np,
  70. // const BN_ULONG *n0p,int num);
  71. .align 64
  72. .global bn_mul_mont#
  73. .proc bn_mul_mont#
  74. bn_mul_mont:
  75. .prologue
  76. .body
  77. { .mmi; cmp4.le p6,p7=2,r37;;
  78. (p6) cmp4.lt.unc p8,p9=8,r37
  79. mov ret0=r0 };;
  80. { .bbb;
  81. (p9) br.cond.dptk.many bn_mul_mont_8
  82. (p8) br.cond.dpnt.many bn_mul_mont_general
  83. (p7) br.ret.spnt.many b0 };;
  84. .endp bn_mul_mont#
  85. prevfs=r2; prevpr=r3; prevlc=r10; prevsp=r11;
  86. rptr=r8; aptr=r9; bptr=r14; nptr=r15;
  87. tptr=r16; // &tp[0]
  88. tp_1=r17; // &tp[-1]
  89. num=r18; len=r19; lc=r20;
  90. topbit=r21; // carry bit from tmp[num]
  91. n0=f6;
  92. m0=f7;
  93. bi=f8;
  94. .align 64
  95. .local bn_mul_mont_general#
  96. .proc bn_mul_mont_general#
  97. bn_mul_mont_general:
  98. .prologue
  99. { .mmi; .save ar.pfs,prevfs
  100. alloc prevfs=ar.pfs,6,2,0,8
  101. $ADDP aptr=0,in1
  102. .save ar.lc,prevlc
  103. mov prevlc=ar.lc }
  104. { .mmi; .vframe prevsp
  105. mov prevsp=sp
  106. $ADDP bptr=0,in2
  107. .save pr,prevpr
  108. mov prevpr=pr };;
  109. .body
  110. .rotf alo[6],nlo[4],ahi[8],nhi[6]
  111. .rotr a[3],n[3],t[2]
  112. { .mmi; ldf8 bi=[bptr],8 // (*bp++)
  113. ldf8 alo[4]=[aptr],16 // ap[0]
  114. $ADDP r30=8,in1 };;
  115. { .mmi; ldf8 alo[3]=[r30],16 // ap[1]
  116. ldf8 alo[2]=[aptr],16 // ap[2]
  117. $ADDP in4=0,in4 };;
  118. { .mmi; ldf8 alo[1]=[r30] // ap[3]
  119. ldf8 n0=[in4] // n0
  120. $ADDP rptr=0,in0 }
  121. { .mmi; $ADDP nptr=0,in3
  122. mov r31=16
  123. zxt4 num=in5 };;
  124. { .mmi; ldf8 nlo[2]=[nptr],8 // np[0]
  125. shladd len=num,3,r0
  126. shladd r31=num,3,r31 };;
  127. { .mmi; ldf8 nlo[1]=[nptr],8 // np[1]
  128. add lc=-5,num
  129. sub r31=sp,r31 };;
  130. { .mfb; and sp=-16,r31 // alloca
  131. xmpy.hu ahi[2]=alo[4],bi // ap[0]*bp[0]
  132. nop.b 0 }
  133. { .mfb; nop.m 0
  134. xmpy.lu alo[4]=alo[4],bi
  135. brp.loop.imp .L1st_ctop,.L1st_cend-16
  136. };;
  137. { .mfi; nop.m 0
  138. xma.hu ahi[1]=alo[3],bi,ahi[2] // ap[1]*bp[0]
  139. add tp_1=8,sp }
  140. { .mfi; nop.m 0
  141. xma.lu alo[3]=alo[3],bi,ahi[2]
  142. mov pr.rot=0x20001f<<16
  143. // ------^----- (p40) at first (p23)
  144. // ----------^^ p[16:20]=1
  145. };;
  146. { .mfi; nop.m 0
  147. xmpy.lu m0=alo[4],n0 // (ap[0]*bp[0])*n0
  148. mov ar.lc=lc }
  149. { .mfi; nop.m 0
  150. fcvt.fxu.s1 nhi[1]=f0
  151. mov ar.ec=8 };;
  152. .align 32
  153. .L1st_ctop:
  154. .pred.rel "mutex",p40,p42
  155. { .mfi; (p16) ldf8 alo[0]=[aptr],8 // *(aptr++)
  156. (p18) xma.hu ahi[0]=alo[2],bi,ahi[1]
  157. (p40) add n[2]=n[2],a[2] } // (p23) }
  158. { .mfi; (p18) ldf8 nlo[0]=[nptr],8 // *(nptr++)(p16)
  159. (p18) xma.lu alo[2]=alo[2],bi,ahi[1]
  160. (p42) add n[2]=n[2],a[2],1 };; // (p23)
  161. { .mfi; (p21) getf.sig a[0]=alo[5]
  162. (p20) xma.hu nhi[0]=nlo[2],m0,nhi[1]
  163. (p42) cmp.leu p41,p39=n[2],a[2] } // (p23)
  164. { .mfi; (p23) st8 [tp_1]=n[2],8
  165. (p20) xma.lu nlo[2]=nlo[2],m0,nhi[1]
  166. (p40) cmp.ltu p41,p39=n[2],a[2] } // (p23)
  167. { .mmb; (p21) getf.sig n[0]=nlo[3]
  168. (p16) nop.m 0
  169. br.ctop.sptk .L1st_ctop };;
  170. .L1st_cend:
  171. { .mmi; getf.sig a[0]=ahi[6] // (p24)
  172. getf.sig n[0]=nhi[4]
  173. add num=-1,num };; // num--
  174. { .mmi; .pred.rel "mutex",p40,p42
  175. (p40) add n[0]=n[0],a[0]
  176. (p42) add n[0]=n[0],a[0],1
  177. sub aptr=aptr,len };; // rewind
  178. { .mmi; .pred.rel "mutex",p40,p42
  179. (p40) cmp.ltu p41,p39=n[0],a[0]
  180. (p42) cmp.leu p41,p39=n[0],a[0]
  181. sub nptr=nptr,len };;
  182. { .mmi; .pred.rel "mutex",p39,p41
  183. (p39) add topbit=r0,r0
  184. (p41) add topbit=r0,r0,1
  185. nop.i 0 }
  186. { .mmi; st8 [tp_1]=n[0]
  187. add tptr=16,sp
  188. add tp_1=8,sp };;
  189. .Louter:
  190. { .mmi; ldf8 bi=[bptr],8 // (*bp++)
  191. ldf8 ahi[3]=[tptr] // tp[0]
  192. add r30=8,aptr };;
  193. { .mmi; ldf8 alo[4]=[aptr],16 // ap[0]
  194. ldf8 alo[3]=[r30],16 // ap[1]
  195. add r31=8,nptr };;
  196. { .mfb; ldf8 alo[2]=[aptr],16 // ap[2]
  197. xma.hu ahi[2]=alo[4],bi,ahi[3] // ap[0]*bp[i]+tp[0]
  198. brp.loop.imp .Linner_ctop,.Linner_cend-16
  199. }
  200. { .mfb; ldf8 alo[1]=[r30] // ap[3]
  201. xma.lu alo[4]=alo[4],bi,ahi[3]
  202. clrrrb.pr };;
  203. { .mfi; ldf8 nlo[2]=[nptr],16 // np[0]
  204. xma.hu ahi[1]=alo[3],bi,ahi[2] // ap[1]*bp[i]
  205. nop.i 0 }
  206. { .mfi; ldf8 nlo[1]=[r31] // np[1]
  207. xma.lu alo[3]=alo[3],bi,ahi[2]
  208. mov pr.rot=0x20101f<<16
  209. // ------^----- (p40) at first (p23)
  210. // --------^--- (p30) at first (p22)
  211. // ----------^^ p[16:20]=1
  212. };;
  213. { .mfi; st8 [tptr]=r0 // tp[0] is already accounted
  214. xmpy.lu m0=alo[4],n0 // (ap[0]*bp[i]+tp[0])*n0
  215. mov ar.lc=lc }
  216. { .mfi;
  217. fcvt.fxu.s1 nhi[1]=f0
  218. mov ar.ec=8 };;
  219. // This loop spins in 4*(n+7) ticks on Itanium 2 and should spin in
  220. // 7*(n+7) ticks on Itanium (the one codenamed Merced). Factor of 7
  221. // in latter case accounts for two-tick pipeline stall, which means
  222. // that its performance would be ~20% lower than optimal one. No
  223. // attempt was made to address this, because original Itanium is
  224. // hardly represented out in the wild...
  225. .align 32
  226. .Linner_ctop:
  227. .pred.rel "mutex",p40,p42
  228. .pred.rel "mutex",p30,p32
  229. { .mfi; (p16) ldf8 alo[0]=[aptr],8 // *(aptr++)
  230. (p18) xma.hu ahi[0]=alo[2],bi,ahi[1]
  231. (p40) add n[2]=n[2],a[2] } // (p23)
  232. { .mfi; (p16) nop.m 0
  233. (p18) xma.lu alo[2]=alo[2],bi,ahi[1]
  234. (p42) add n[2]=n[2],a[2],1 };; // (p23)
  235. { .mfi; (p21) getf.sig a[0]=alo[5]
  236. (p16) nop.f 0
  237. (p40) cmp.ltu p41,p39=n[2],a[2] } // (p23)
  238. { .mfi; (p21) ld8 t[0]=[tptr],8
  239. (p16) nop.f 0
  240. (p42) cmp.leu p41,p39=n[2],a[2] };; // (p23)
  241. { .mfi; (p18) ldf8 nlo[0]=[nptr],8 // *(nptr++)
  242. (p20) xma.hu nhi[0]=nlo[2],m0,nhi[1]
  243. (p30) add a[1]=a[1],t[1] } // (p22)
  244. { .mfi; (p16) nop.m 0
  245. (p20) xma.lu nlo[2]=nlo[2],m0,nhi[1]
  246. (p32) add a[1]=a[1],t[1],1 };; // (p22)
  247. { .mmi; (p21) getf.sig n[0]=nlo[3]
  248. (p16) nop.m 0
  249. (p30) cmp.ltu p31,p29=a[1],t[1] } // (p22)
  250. { .mmb; (p23) st8 [tp_1]=n[2],8
  251. (p32) cmp.leu p31,p29=a[1],t[1] // (p22)
  252. br.ctop.sptk .Linner_ctop };;
  253. .Linner_cend:
  254. { .mmi; getf.sig a[0]=ahi[6] // (p24)
  255. getf.sig n[0]=nhi[4]
  256. nop.i 0 };;
  257. { .mmi; .pred.rel "mutex",p31,p33
  258. (p31) add a[0]=a[0],topbit
  259. (p33) add a[0]=a[0],topbit,1
  260. mov topbit=r0 };;
  261. { .mfi; .pred.rel "mutex",p31,p33
  262. (p31) cmp.ltu p32,p30=a[0],topbit
  263. (p33) cmp.leu p32,p30=a[0],topbit
  264. }
  265. { .mfi; .pred.rel "mutex",p40,p42
  266. (p40) add n[0]=n[0],a[0]
  267. (p42) add n[0]=n[0],a[0],1
  268. };;
  269. { .mmi; .pred.rel "mutex",p44,p46
  270. (p40) cmp.ltu p41,p39=n[0],a[0]
  271. (p42) cmp.leu p41,p39=n[0],a[0]
  272. (p32) add topbit=r0,r0,1 }
  273. { .mmi; st8 [tp_1]=n[0],8
  274. cmp4.ne p6,p0=1,num
  275. sub aptr=aptr,len };; // rewind
  276. { .mmi; sub nptr=nptr,len
  277. (p41) add topbit=r0,r0,1
  278. add tptr=16,sp }
  279. { .mmb; add tp_1=8,sp
  280. add num=-1,num // num--
  281. (p6) br.cond.sptk.many .Louter };;
  282. { .mbb; add lc=4,lc
  283. brp.loop.imp .Lsub_ctop,.Lsub_cend-16
  284. clrrrb.pr };;
  285. { .mii; nop.m 0
  286. mov pr.rot=0x10001<<16
  287. // ------^---- (p33) at first (p17)
  288. mov ar.lc=lc }
  289. { .mii; nop.m 0
  290. mov ar.ec=3
  291. nop.i 0 };;
  292. .Lsub_ctop:
  293. .pred.rel "mutex",p33,p35
  294. { .mfi; (p16) ld8 t[0]=[tptr],8 // t=*(tp++)
  295. (p16) nop.f 0
  296. (p33) sub n[1]=t[1],n[1] } // (p17)
  297. { .mfi; (p16) ld8 n[0]=[nptr],8 // n=*(np++)
  298. (p16) nop.f 0
  299. (p35) sub n[1]=t[1],n[1],1 };; // (p17)
  300. { .mib; (p18) st8 [rptr]=n[2],8 // *(rp++)=r
  301. (p33) cmp.gtu p34,p32=n[1],t[1] // (p17)
  302. (p18) nop.b 0 }
  303. { .mib; (p18) nop.m 0
  304. (p35) cmp.geu p34,p32=n[1],t[1] // (p17)
  305. br.ctop.sptk .Lsub_ctop };;
  306. .Lsub_cend:
  307. { .mmb; .pred.rel "mutex",p34,p36
  308. (p34) sub topbit=topbit,r0 // (p19)
  309. (p36) sub topbit=topbit,r0,1
  310. brp.loop.imp .Lcopy_ctop,.Lcopy_cend-16
  311. }
  312. { .mmb; sub rptr=rptr,len // rewind
  313. sub tptr=tptr,len
  314. clrrrb.pr };;
  315. { .mmi; and aptr=tptr,topbit
  316. andcm bptr=rptr,topbit
  317. mov pr.rot=1<<16 };;
  318. { .mii; or nptr=aptr,bptr
  319. mov ar.lc=lc
  320. mov ar.ec=3 };;
  321. .Lcopy_ctop:
  322. { .mmb; (p16) ld8 n[0]=[nptr],8
  323. (p18) st8 [tptr]=r0,8
  324. (p16) nop.b 0 }
  325. { .mmb; (p16) nop.m 0
  326. (p18) st8 [rptr]=n[2],8
  327. br.ctop.sptk .Lcopy_ctop };;
  328. .Lcopy_cend:
  329. { .mmi; mov ret0=1 // signal "handled"
  330. rum 1<<5 // clear um.mfh
  331. mov ar.lc=prevlc }
  332. { .mib; .restore sp
  333. mov sp=prevsp
  334. mov pr=prevpr,0x1ffff
  335. br.ret.sptk.many b0 };;
  336. .endp bn_mul_mont_general#
  337. a1=r16; a2=r17; a3=r18; a4=r19; a5=r20; a6=r21; a7=r22; a8=r23;
  338. n1=r24; n2=r25; n3=r26; n4=r27; n5=r28; n6=r29; n7=r30; n8=r31;
  339. t0=r15;
  340. ai0=f8; ai1=f9; ai2=f10; ai3=f11; ai4=f12; ai5=f13; ai6=f14; ai7=f15;
  341. ni0=f16; ni1=f17; ni2=f18; ni3=f19; ni4=f20; ni5=f21; ni6=f22; ni7=f23;
  342. .align 64
  343. .skip 48 // aligns loop body
  344. .local bn_mul_mont_8#
  345. .proc bn_mul_mont_8#
  346. bn_mul_mont_8:
  347. .prologue
  348. { .mmi; .save ar.pfs,prevfs
  349. alloc prevfs=ar.pfs,6,2,0,8
  350. .vframe prevsp
  351. mov prevsp=sp
  352. .save ar.lc,prevlc
  353. mov prevlc=ar.lc }
  354. { .mmi; add r17=-6*16,sp
  355. add sp=-7*16,sp
  356. .save pr,prevpr
  357. mov prevpr=pr };;
  358. { .mmi; .save.gf 0,0x10
  359. stf.spill [sp]=f16,-16
  360. .save.gf 0,0x20
  361. stf.spill [r17]=f17,32
  362. add r16=-5*16,prevsp};;
  363. { .mmi; .save.gf 0,0x40
  364. stf.spill [r16]=f18,32
  365. .save.gf 0,0x80
  366. stf.spill [r17]=f19,32
  367. $ADDP aptr=0,in1 };;
  368. { .mmi; .save.gf 0,0x100
  369. stf.spill [r16]=f20,32
  370. .save.gf 0,0x200
  371. stf.spill [r17]=f21,32
  372. $ADDP r29=8,in1 };;
  373. { .mmi; .save.gf 0,0x400
  374. stf.spill [r16]=f22
  375. .save.gf 0,0x800
  376. stf.spill [r17]=f23
  377. $ADDP rptr=0,in0 };;
  378. .body
  379. .rotf bj[8],mj[2],tf[2],alo[10],ahi[10],nlo[10],nhi[10]
  380. .rotr t[8]
  381. // load input vectors padding them to 8 elements
  382. { .mmi; ldf8 ai0=[aptr],16 // ap[0]
  383. ldf8 ai1=[r29],16 // ap[1]
  384. $ADDP bptr=0,in2 }
  385. { .mmi; $ADDP r30=8,in2
  386. $ADDP nptr=0,in3
  387. $ADDP r31=8,in3 };;
  388. { .mmi; ldf8 bj[7]=[bptr],16 // bp[0]
  389. ldf8 bj[6]=[r30],16 // bp[1]
  390. cmp4.le p4,p5=3,in5 }
  391. { .mmi; ldf8 ni0=[nptr],16 // np[0]
  392. ldf8 ni1=[r31],16 // np[1]
  393. cmp4.le p6,p7=4,in5 };;
  394. { .mfi; (p4)ldf8 ai2=[aptr],16 // ap[2]
  395. (p5)fcvt.fxu ai2=f0
  396. cmp4.le p8,p9=5,in5 }
  397. { .mfi; (p6)ldf8 ai3=[r29],16 // ap[3]
  398. (p7)fcvt.fxu ai3=f0
  399. cmp4.le p10,p11=6,in5 }
  400. { .mfi; (p4)ldf8 bj[5]=[bptr],16 // bp[2]
  401. (p5)fcvt.fxu bj[5]=f0
  402. cmp4.le p12,p13=7,in5 }
  403. { .mfi; (p6)ldf8 bj[4]=[r30],16 // bp[3]
  404. (p7)fcvt.fxu bj[4]=f0
  405. cmp4.le p14,p15=8,in5 }
  406. { .mfi; (p4)ldf8 ni2=[nptr],16 // np[2]
  407. (p5)fcvt.fxu ni2=f0
  408. addp4 r28=-1,in5 }
  409. { .mfi; (p6)ldf8 ni3=[r31],16 // np[3]
  410. (p7)fcvt.fxu ni3=f0
  411. $ADDP in4=0,in4 };;
  412. { .mfi; ldf8 n0=[in4]
  413. fcvt.fxu tf[1]=f0
  414. nop.i 0 }
  415. { .mfi; (p8)ldf8 ai4=[aptr],16 // ap[4]
  416. (p9)fcvt.fxu ai4=f0
  417. mov t[0]=r0 }
  418. { .mfi; (p10)ldf8 ai5=[r29],16 // ap[5]
  419. (p11)fcvt.fxu ai5=f0
  420. mov t[1]=r0 }
  421. { .mfi; (p8)ldf8 bj[3]=[bptr],16 // bp[4]
  422. (p9)fcvt.fxu bj[3]=f0
  423. mov t[2]=r0 }
  424. { .mfi; (p10)ldf8 bj[2]=[r30],16 // bp[5]
  425. (p11)fcvt.fxu bj[2]=f0
  426. mov t[3]=r0 }
  427. { .mfi; (p8)ldf8 ni4=[nptr],16 // np[4]
  428. (p9)fcvt.fxu ni4=f0
  429. mov t[4]=r0 }
  430. { .mfi; (p10)ldf8 ni5=[r31],16 // np[5]
  431. (p11)fcvt.fxu ni5=f0
  432. mov t[5]=r0 };;
  433. { .mfi; (p12)ldf8 ai6=[aptr],16 // ap[6]
  434. (p13)fcvt.fxu ai6=f0
  435. mov t[6]=r0 }
  436. { .mfi; (p14)ldf8 ai7=[r29],16 // ap[7]
  437. (p15)fcvt.fxu ai7=f0
  438. mov t[7]=r0 }
  439. { .mfi; (p12)ldf8 bj[1]=[bptr],16 // bp[6]
  440. (p13)fcvt.fxu bj[1]=f0
  441. mov ar.lc=r28 }
  442. { .mfi; (p14)ldf8 bj[0]=[r30],16 // bp[7]
  443. (p15)fcvt.fxu bj[0]=f0
  444. mov ar.ec=1 }
  445. { .mfi; (p12)ldf8 ni6=[nptr],16 // np[6]
  446. (p13)fcvt.fxu ni6=f0
  447. mov pr.rot=1<<16 }
  448. { .mfb; (p14)ldf8 ni7=[r31],16 // np[7]
  449. (p15)fcvt.fxu ni7=f0
  450. brp.loop.imp .Louter_8_ctop,.Louter_8_cend-16
  451. };;
  452. // The loop is scheduled for 32*n ticks on Itanium 2. Actual attempt
  453. // to measure with help of Interval Time Counter indicated that the
  454. // factor is a tad higher: 33 or 34, if not 35. Exact measurement and
  455. // addressing the issue is problematic, because I don't have access
  456. // to platform-specific instruction-level profiler. On Itanium it
  457. // should run in 56*n ticks, because of higher xma latency...
  458. .Louter_8_ctop:
  459. .pred.rel "mutex",p40,p42
  460. .pred.rel "mutex",p48,p50
  461. { .mfi; (p16) nop.m 0 // 0:
  462. (p16) xma.hu ahi[0]=ai0,bj[7],tf[1] // ap[0]*b[i]+t[0]
  463. (p40) add a3=a3,n3 } // (p17) a3+=n3
  464. { .mfi; (p42) add a3=a3,n3,1
  465. (p16) xma.lu alo[0]=ai0,bj[7],tf[1]
  466. (p16) nop.i 0 };;
  467. { .mii; (p17) getf.sig a7=alo[8] // 1:
  468. (p48) add t[6]=t[6],a3 // (p17) t[6]+=a3
  469. (p50) add t[6]=t[6],a3,1 };;
  470. { .mfi; (p17) getf.sig a8=ahi[8] // 2:
  471. (p17) xma.hu nhi[7]=ni6,mj[1],nhi[6] // np[6]*m0
  472. (p40) cmp.ltu p43,p41=a3,n3 }
  473. { .mfi; (p42) cmp.leu p43,p41=a3,n3
  474. (p17) xma.lu nlo[7]=ni6,mj[1],nhi[6]
  475. (p16) nop.i 0 };;
  476. { .mii; (p17) getf.sig n5=nlo[6] // 3:
  477. (p48) cmp.ltu p51,p49=t[6],a3
  478. (p50) cmp.leu p51,p49=t[6],a3 };;
  479. .pred.rel "mutex",p41,p43
  480. .pred.rel "mutex",p49,p51
  481. { .mfi; (p16) nop.m 0 // 4:
  482. (p16) xma.hu ahi[1]=ai1,bj[7],ahi[0] // ap[1]*b[i]
  483. (p41) add a4=a4,n4 } // (p17) a4+=n4
  484. { .mfi; (p43) add a4=a4,n4,1
  485. (p16) xma.lu alo[1]=ai1,bj[7],ahi[0]
  486. (p16) nop.i 0 };;
  487. { .mfi; (p49) add t[5]=t[5],a4 // 5: (p17) t[5]+=a4
  488. (p16) xmpy.lu mj[0]=alo[0],n0 // (ap[0]*b[i]+t[0])*n0
  489. (p51) add t[5]=t[5],a4,1 };;
  490. { .mfi; (p16) nop.m 0 // 6:
  491. (p17) xma.hu nhi[8]=ni7,mj[1],nhi[7] // np[7]*m0
  492. (p41) cmp.ltu p42,p40=a4,n4 }
  493. { .mfi; (p43) cmp.leu p42,p40=a4,n4
  494. (p17) xma.lu nlo[8]=ni7,mj[1],nhi[7]
  495. (p16) nop.i 0 };;
  496. { .mii; (p17) getf.sig n6=nlo[7] // 7:
  497. (p49) cmp.ltu p50,p48=t[5],a4
  498. (p51) cmp.leu p50,p48=t[5],a4 };;
  499. .pred.rel "mutex",p40,p42
  500. .pred.rel "mutex",p48,p50
  501. { .mfi; (p16) nop.m 0 // 8:
  502. (p16) xma.hu ahi[2]=ai2,bj[7],ahi[1] // ap[2]*b[i]
  503. (p40) add a5=a5,n5 } // (p17) a5+=n5
  504. { .mfi; (p42) add a5=a5,n5,1
  505. (p16) xma.lu alo[2]=ai2,bj[7],ahi[1]
  506. (p16) nop.i 0 };;
  507. { .mii; (p16) getf.sig a1=alo[1] // 9:
  508. (p48) add t[4]=t[4],a5 // p(17) t[4]+=a5
  509. (p50) add t[4]=t[4],a5,1 };;
  510. { .mfi; (p16) nop.m 0 // 10:
  511. (p16) xma.hu nhi[0]=ni0,mj[0],alo[0] // np[0]*m0
  512. (p40) cmp.ltu p43,p41=a5,n5 }
  513. { .mfi; (p42) cmp.leu p43,p41=a5,n5
  514. (p16) xma.lu nlo[0]=ni0,mj[0],alo[0]
  515. (p16) nop.i 0 };;
  516. { .mii; (p17) getf.sig n7=nlo[8] // 11:
  517. (p48) cmp.ltu p51,p49=t[4],a5
  518. (p50) cmp.leu p51,p49=t[4],a5 };;
  519. .pred.rel "mutex",p41,p43
  520. .pred.rel "mutex",p49,p51
  521. { .mfi; (p17) getf.sig n8=nhi[8] // 12:
  522. (p16) xma.hu ahi[3]=ai3,bj[7],ahi[2] // ap[3]*b[i]
  523. (p41) add a6=a6,n6 } // (p17) a6+=n6
  524. { .mfi; (p43) add a6=a6,n6,1
  525. (p16) xma.lu alo[3]=ai3,bj[7],ahi[2]
  526. (p16) nop.i 0 };;
  527. { .mii; (p16) getf.sig a2=alo[2] // 13:
  528. (p49) add t[3]=t[3],a6 // (p17) t[3]+=a6
  529. (p51) add t[3]=t[3],a6,1 };;
  530. { .mfi; (p16) nop.m 0 // 14:
  531. (p16) xma.hu nhi[1]=ni1,mj[0],nhi[0] // np[1]*m0
  532. (p41) cmp.ltu p42,p40=a6,n6 }
  533. { .mfi; (p43) cmp.leu p42,p40=a6,n6
  534. (p16) xma.lu nlo[1]=ni1,mj[0],nhi[0]
  535. (p16) nop.i 0 };;
  536. { .mii; (p16) nop.m 0 // 15:
  537. (p49) cmp.ltu p50,p48=t[3],a6
  538. (p51) cmp.leu p50,p48=t[3],a6 };;
  539. .pred.rel "mutex",p40,p42
  540. .pred.rel "mutex",p48,p50
  541. { .mfi; (p16) nop.m 0 // 16:
  542. (p16) xma.hu ahi[4]=ai4,bj[7],ahi[3] // ap[4]*b[i]
  543. (p40) add a7=a7,n7 } // (p17) a7+=n7
  544. { .mfi; (p42) add a7=a7,n7,1
  545. (p16) xma.lu alo[4]=ai4,bj[7],ahi[3]
  546. (p16) nop.i 0 };;
  547. { .mii; (p16) getf.sig a3=alo[3] // 17:
  548. (p48) add t[2]=t[2],a7 // (p17) t[2]+=a7
  549. (p50) add t[2]=t[2],a7,1 };;
  550. { .mfi; (p16) nop.m 0 // 18:
  551. (p16) xma.hu nhi[2]=ni2,mj[0],nhi[1] // np[2]*m0
  552. (p40) cmp.ltu p43,p41=a7,n7 }
  553. { .mfi; (p42) cmp.leu p43,p41=a7,n7
  554. (p16) xma.lu nlo[2]=ni2,mj[0],nhi[1]
  555. (p16) nop.i 0 };;
  556. { .mii; (p16) getf.sig n1=nlo[1] // 19:
  557. (p48) cmp.ltu p51,p49=t[2],a7
  558. (p50) cmp.leu p51,p49=t[2],a7 };;
  559. .pred.rel "mutex",p41,p43
  560. .pred.rel "mutex",p49,p51
  561. { .mfi; (p16) nop.m 0 // 20:
  562. (p16) xma.hu ahi[5]=ai5,bj[7],ahi[4] // ap[5]*b[i]
  563. (p41) add a8=a8,n8 } // (p17) a8+=n8
  564. { .mfi; (p43) add a8=a8,n8,1
  565. (p16) xma.lu alo[5]=ai5,bj[7],ahi[4]
  566. (p16) nop.i 0 };;
  567. { .mii; (p16) getf.sig a4=alo[4] // 21:
  568. (p49) add t[1]=t[1],a8 // (p17) t[1]+=a8
  569. (p51) add t[1]=t[1],a8,1 };;
  570. { .mfi; (p16) nop.m 0 // 22:
  571. (p16) xma.hu nhi[3]=ni3,mj[0],nhi[2] // np[3]*m0
  572. (p41) cmp.ltu p42,p40=a8,n8 }
  573. { .mfi; (p43) cmp.leu p42,p40=a8,n8
  574. (p16) xma.lu nlo[3]=ni3,mj[0],nhi[2]
  575. (p16) nop.i 0 };;
  576. { .mii; (p16) getf.sig n2=nlo[2] // 23:
  577. (p49) cmp.ltu p50,p48=t[1],a8
  578. (p51) cmp.leu p50,p48=t[1],a8 };;
  579. { .mfi; (p16) nop.m 0 // 24:
  580. (p16) xma.hu ahi[6]=ai6,bj[7],ahi[5] // ap[6]*b[i]
  581. (p16) add a1=a1,n1 } // (p16) a1+=n1
  582. { .mfi; (p16) nop.m 0
  583. (p16) xma.lu alo[6]=ai6,bj[7],ahi[5]
  584. (p17) mov t[0]=r0 };;
  585. { .mii; (p16) getf.sig a5=alo[5] // 25:
  586. (p16) add t0=t[7],a1 // (p16) t[7]+=a1
  587. (p42) add t[0]=t[0],r0,1 };;
  588. { .mfi; (p16) setf.sig tf[0]=t0 // 26:
  589. (p16) xma.hu nhi[4]=ni4,mj[0],nhi[3] // np[4]*m0
  590. (p50) add t[0]=t[0],r0,1 }
  591. { .mfi; (p16) cmp.ltu.unc p42,p40=a1,n1
  592. (p16) xma.lu nlo[4]=ni4,mj[0],nhi[3]
  593. (p16) nop.i 0 };;
  594. { .mii; (p16) getf.sig n3=nlo[3] // 27:
  595. (p16) cmp.ltu.unc p50,p48=t0,a1
  596. (p16) nop.i 0 };;
  597. .pred.rel "mutex",p40,p42
  598. .pred.rel "mutex",p48,p50
  599. { .mfi; (p16) nop.m 0 // 28:
  600. (p16) xma.hu ahi[7]=ai7,bj[7],ahi[6] // ap[7]*b[i]
  601. (p40) add a2=a2,n2 } // (p16) a2+=n2
  602. { .mfi; (p42) add a2=a2,n2,1
  603. (p16) xma.lu alo[7]=ai7,bj[7],ahi[6]
  604. (p16) nop.i 0 };;
  605. { .mii; (p16) getf.sig a6=alo[6] // 29:
  606. (p48) add t[6]=t[6],a2 // (p16) t[6]+=a2
  607. (p50) add t[6]=t[6],a2,1 };;
  608. { .mfi; (p16) nop.m 0 // 30:
  609. (p16) xma.hu nhi[5]=ni5,mj[0],nhi[4] // np[5]*m0
  610. (p40) cmp.ltu p41,p39=a2,n2 }
  611. { .mfi; (p42) cmp.leu p41,p39=a2,n2
  612. (p16) xma.lu nlo[5]=ni5,mj[0],nhi[4]
  613. (p16) nop.i 0 };;
  614. { .mfi; (p16) getf.sig n4=nlo[4] // 31:
  615. (p16) nop.f 0
  616. (p48) cmp.ltu p49,p47=t[6],a2 }
  617. { .mfb; (p50) cmp.leu p49,p47=t[6],a2
  618. (p16) nop.f 0
  619. br.ctop.sptk.many .Louter_8_ctop };;
  620. .Louter_8_cend:
  621. // above loop has to execute one more time, without (p16), which is
  622. // replaced with merged move of np[8] to GPR bank
  623. .pred.rel "mutex",p40,p42
  624. .pred.rel "mutex",p48,p50
  625. { .mmi; (p0) getf.sig n1=ni0 // 0:
  626. (p40) add a3=a3,n3 // (p17) a3+=n3
  627. (p42) add a3=a3,n3,1 };;
  628. { .mii; (p17) getf.sig a7=alo[8] // 1:
  629. (p48) add t[6]=t[6],a3 // (p17) t[6]+=a3
  630. (p50) add t[6]=t[6],a3,1 };;
  631. { .mfi; (p17) getf.sig a8=ahi[8] // 2:
  632. (p17) xma.hu nhi[7]=ni6,mj[1],nhi[6] // np[6]*m0
  633. (p40) cmp.ltu p43,p41=a3,n3 }
  634. { .mfi; (p42) cmp.leu p43,p41=a3,n3
  635. (p17) xma.lu nlo[7]=ni6,mj[1],nhi[6]
  636. (p0) nop.i 0 };;
  637. { .mii; (p17) getf.sig n5=nlo[6] // 3:
  638. (p48) cmp.ltu p51,p49=t[6],a3
  639. (p50) cmp.leu p51,p49=t[6],a3 };;
  640. .pred.rel "mutex",p41,p43
  641. .pred.rel "mutex",p49,p51
  642. { .mmi; (p0) getf.sig n2=ni1 // 4:
  643. (p41) add a4=a4,n4 // (p17) a4+=n4
  644. (p43) add a4=a4,n4,1 };;
  645. { .mfi; (p49) add t[5]=t[5],a4 // 5: (p17) t[5]+=a4
  646. (p0) nop.f 0
  647. (p51) add t[5]=t[5],a4,1 };;
  648. { .mfi; (p0) getf.sig n3=ni2 // 6:
  649. (p17) xma.hu nhi[8]=ni7,mj[1],nhi[7] // np[7]*m0
  650. (p41) cmp.ltu p42,p40=a4,n4 }
  651. { .mfi; (p43) cmp.leu p42,p40=a4,n4
  652. (p17) xma.lu nlo[8]=ni7,mj[1],nhi[7]
  653. (p0) nop.i 0 };;
  654. { .mii; (p17) getf.sig n6=nlo[7] // 7:
  655. (p49) cmp.ltu p50,p48=t[5],a4
  656. (p51) cmp.leu p50,p48=t[5],a4 };;
  657. .pred.rel "mutex",p40,p42
  658. .pred.rel "mutex",p48,p50
  659. { .mii; (p0) getf.sig n4=ni3 // 8:
  660. (p40) add a5=a5,n5 // (p17) a5+=n5
  661. (p42) add a5=a5,n5,1 };;
  662. { .mii; (p0) nop.m 0 // 9:
  663. (p48) add t[4]=t[4],a5 // p(17) t[4]+=a5
  664. (p50) add t[4]=t[4],a5,1 };;
  665. { .mii; (p0) nop.m 0 // 10:
  666. (p40) cmp.ltu p43,p41=a5,n5
  667. (p42) cmp.leu p43,p41=a5,n5 };;
  668. { .mii; (p17) getf.sig n7=nlo[8] // 11:
  669. (p48) cmp.ltu p51,p49=t[4],a5
  670. (p50) cmp.leu p51,p49=t[4],a5 };;
  671. .pred.rel "mutex",p41,p43
  672. .pred.rel "mutex",p49,p51
  673. { .mii; (p17) getf.sig n8=nhi[8] // 12:
  674. (p41) add a6=a6,n6 // (p17) a6+=n6
  675. (p43) add a6=a6,n6,1 };;
  676. { .mii; (p0) getf.sig n5=ni4 // 13:
  677. (p49) add t[3]=t[3],a6 // (p17) t[3]+=a6
  678. (p51) add t[3]=t[3],a6,1 };;
  679. { .mii; (p0) nop.m 0 // 14:
  680. (p41) cmp.ltu p42,p40=a6,n6
  681. (p43) cmp.leu p42,p40=a6,n6 };;
  682. { .mii; (p0) getf.sig n6=ni5 // 15:
  683. (p49) cmp.ltu p50,p48=t[3],a6
  684. (p51) cmp.leu p50,p48=t[3],a6 };;
  685. .pred.rel "mutex",p40,p42
  686. .pred.rel "mutex",p48,p50
  687. { .mii; (p0) nop.m 0 // 16:
  688. (p40) add a7=a7,n7 // (p17) a7+=n7
  689. (p42) add a7=a7,n7,1 };;
  690. { .mii; (p0) nop.m 0 // 17:
  691. (p48) add t[2]=t[2],a7 // (p17) t[2]+=a7
  692. (p50) add t[2]=t[2],a7,1 };;
  693. { .mii; (p0) nop.m 0 // 18:
  694. (p40) cmp.ltu p43,p41=a7,n7
  695. (p42) cmp.leu p43,p41=a7,n7 };;
  696. { .mii; (p0) getf.sig n7=ni6 // 19:
  697. (p48) cmp.ltu p51,p49=t[2],a7
  698. (p50) cmp.leu p51,p49=t[2],a7 };;
  699. .pred.rel "mutex",p41,p43
  700. .pred.rel "mutex",p49,p51
  701. { .mii; (p0) nop.m 0 // 20:
  702. (p41) add a8=a8,n8 // (p17) a8+=n8
  703. (p43) add a8=a8,n8,1 };;
  704. { .mmi; (p0) nop.m 0 // 21:
  705. (p49) add t[1]=t[1],a8 // (p17) t[1]+=a8
  706. (p51) add t[1]=t[1],a8,1 }
  707. { .mmi; (p17) mov t[0]=r0
  708. (p41) cmp.ltu p42,p40=a8,n8
  709. (p43) cmp.leu p42,p40=a8,n8 };;
  710. { .mmi; (p0) getf.sig n8=ni7 // 22:
  711. (p49) cmp.ltu p50,p48=t[1],a8
  712. (p51) cmp.leu p50,p48=t[1],a8 }
  713. { .mmi; (p42) add t[0]=t[0],r0,1
  714. (p0) add r16=-7*16,prevsp
  715. (p0) add r17=-6*16,prevsp };;
  716. // subtract np[8] from carrybit|tmp[8]
  717. // carrybit|tmp[8] layout upon exit from above loop is:
  718. // t[0]|t[1]|t[2]|t[3]|t[4]|t[5]|t[6]|t[7]|t0 (least significant)
  719. { .mmi; (p50)add t[0]=t[0],r0,1
  720. add r18=-5*16,prevsp
  721. sub n1=t0,n1 };;
  722. { .mmi; cmp.gtu p34,p32=n1,t0;;
  723. .pred.rel "mutex",p32,p34
  724. (p32)sub n2=t[7],n2
  725. (p34)sub n2=t[7],n2,1 };;
  726. { .mii; (p32)cmp.gtu p35,p33=n2,t[7]
  727. (p34)cmp.geu p35,p33=n2,t[7];;
  728. .pred.rel "mutex",p33,p35
  729. (p33)sub n3=t[6],n3 }
  730. { .mmi; (p35)sub n3=t[6],n3,1;;
  731. (p33)cmp.gtu p34,p32=n3,t[6]
  732. (p35)cmp.geu p34,p32=n3,t[6] };;
  733. .pred.rel "mutex",p32,p34
  734. { .mii; (p32)sub n4=t[5],n4
  735. (p34)sub n4=t[5],n4,1;;
  736. (p32)cmp.gtu p35,p33=n4,t[5] }
  737. { .mmi; (p34)cmp.geu p35,p33=n4,t[5];;
  738. .pred.rel "mutex",p33,p35
  739. (p33)sub n5=t[4],n5
  740. (p35)sub n5=t[4],n5,1 };;
  741. { .mii; (p33)cmp.gtu p34,p32=n5,t[4]
  742. (p35)cmp.geu p34,p32=n5,t[4];;
  743. .pred.rel "mutex",p32,p34
  744. (p32)sub n6=t[3],n6 }
  745. { .mmi; (p34)sub n6=t[3],n6,1;;
  746. (p32)cmp.gtu p35,p33=n6,t[3]
  747. (p34)cmp.geu p35,p33=n6,t[3] };;
  748. .pred.rel "mutex",p33,p35
  749. { .mii; (p33)sub n7=t[2],n7
  750. (p35)sub n7=t[2],n7,1;;
  751. (p33)cmp.gtu p34,p32=n7,t[2] }
  752. { .mmi; (p35)cmp.geu p34,p32=n7,t[2];;
  753. .pred.rel "mutex",p32,p34
  754. (p32)sub n8=t[1],n8
  755. (p34)sub n8=t[1],n8,1 };;
  756. { .mii; (p32)cmp.gtu p35,p33=n8,t[1]
  757. (p34)cmp.geu p35,p33=n8,t[1];;
  758. .pred.rel "mutex",p33,p35
  759. (p33)sub a8=t[0],r0 }
  760. { .mmi; (p35)sub a8=t[0],r0,1;;
  761. (p33)cmp.gtu p34,p32=a8,t[0]
  762. (p35)cmp.geu p34,p32=a8,t[0] };;
  763. // save the result, either tmp[num] or tmp[num]-np[num]
  764. .pred.rel "mutex",p32,p34
  765. { .mmi; (p32)st8 [rptr]=n1,8
  766. (p34)st8 [rptr]=t0,8
  767. add r19=-4*16,prevsp};;
  768. { .mmb; (p32)st8 [rptr]=n2,8
  769. (p34)st8 [rptr]=t[7],8
  770. (p5)br.cond.dpnt.few .Ldone };;
  771. { .mmb; (p32)st8 [rptr]=n3,8
  772. (p34)st8 [rptr]=t[6],8
  773. (p7)br.cond.dpnt.few .Ldone };;
  774. { .mmb; (p32)st8 [rptr]=n4,8
  775. (p34)st8 [rptr]=t[5],8
  776. (p9)br.cond.dpnt.few .Ldone };;
  777. { .mmb; (p32)st8 [rptr]=n5,8
  778. (p34)st8 [rptr]=t[4],8
  779. (p11)br.cond.dpnt.few .Ldone };;
  780. { .mmb; (p32)st8 [rptr]=n6,8
  781. (p34)st8 [rptr]=t[3],8
  782. (p13)br.cond.dpnt.few .Ldone };;
  783. { .mmb; (p32)st8 [rptr]=n7,8
  784. (p34)st8 [rptr]=t[2],8
  785. (p15)br.cond.dpnt.few .Ldone };;
  786. { .mmb; (p32)st8 [rptr]=n8,8
  787. (p34)st8 [rptr]=t[1],8
  788. nop.b 0 };;
  789. .Ldone: // epilogue
  790. { .mmi; ldf.fill f16=[r16],64
  791. ldf.fill f17=[r17],64
  792. nop.i 0 }
  793. { .mmi; ldf.fill f18=[r18],64
  794. ldf.fill f19=[r19],64
  795. mov pr=prevpr,0x1ffff };;
  796. { .mmi; ldf.fill f20=[r16]
  797. ldf.fill f21=[r17]
  798. mov ar.lc=prevlc }
  799. { .mmi; ldf.fill f22=[r18]
  800. ldf.fill f23=[r19]
  801. mov ret0=1 } // signal "handled"
  802. { .mib; rum 1<<5
  803. .restore sp
  804. mov sp=prevsp
  805. br.ret.sptk.many b0 };;
  806. .endp bn_mul_mont_8#
  807. .type copyright#,\@object
  808. copyright:
  809. stringz "Montgomery multiplication for IA-64, CRYPTOGAMS by <appro\@openssl.org>"
  810. ___
  811. $output=shift and open STDOUT,">$output";
  812. print $code;
  813. close STDOUT;