ppc.pl 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008
  1. #!/usr/bin/env perl
  2. #
  3. # Implemented as a Perl wrapper as we want to support several different
  4. # architectures with single file. We pick up the target based on the
  5. # file name we are asked to generate.
  6. #
  7. # It should be noted though that this perl code is nothing like
  8. # <openssl>/crypto/perlasm/x86*. In this case perl is used pretty much
  9. # as pre-processor to cover for platform differences in name decoration,
  10. # linker tables, 32-/64-bit instruction sets...
  11. #
  12. # As you might know there're several PowerPC ABI in use. Most notably
  13. # Linux and AIX use different 32-bit ABIs. Good news are that these ABIs
  14. # are similar enough to implement leaf(!) functions, which would be ABI
  15. # neutral. And that's what you find here: ABI neutral leaf functions.
  16. # In case you wonder what that is...
  17. #
  18. # AIX performance
  19. #
  20. # MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e.
  21. #
  22. # The following is the performance of 32-bit compiler
  23. # generated code:
  24. #
  25. # OpenSSL 0.9.6c 21 dec 2001
  26. # built on: Tue Jun 11 11:06:51 EDT 2002
  27. # options:bn(64,32) ...
  28. #compiler: cc -DTHREADS -DAIX -DB_ENDIAN -DBN_LLONG -O3
  29. # sign verify sign/s verify/s
  30. #rsa 512 bits 0.0098s 0.0009s 102.0 1170.6
  31. #rsa 1024 bits 0.0507s 0.0026s 19.7 387.5
  32. #rsa 2048 bits 0.3036s 0.0085s 3.3 117.1
  33. #rsa 4096 bits 2.0040s 0.0299s 0.5 33.4
  34. #dsa 512 bits 0.0087s 0.0106s 114.3 94.5
  35. #dsa 1024 bits 0.0256s 0.0313s 39.0 32.0
  36. #
  37. # Same bechmark with this assembler code:
  38. #
  39. #rsa 512 bits 0.0056s 0.0005s 178.6 2049.2
  40. #rsa 1024 bits 0.0283s 0.0015s 35.3 674.1
  41. #rsa 2048 bits 0.1744s 0.0050s 5.7 201.2
  42. #rsa 4096 bits 1.1644s 0.0179s 0.9 55.7
  43. #dsa 512 bits 0.0052s 0.0062s 191.6 162.0
  44. #dsa 1024 bits 0.0149s 0.0180s 67.0 55.5
  45. #
  46. # Number of operations increases by at almost 75%
  47. #
  48. # Here are performance numbers for 64-bit compiler
  49. # generated code:
  50. #
  51. # OpenSSL 0.9.6g [engine] 9 Aug 2002
  52. # built on: Fri Apr 18 16:59:20 EDT 2003
  53. # options:bn(64,64) ...
  54. # compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3
  55. # sign verify sign/s verify/s
  56. #rsa 512 bits 0.0028s 0.0003s 357.1 3844.4
  57. #rsa 1024 bits 0.0148s 0.0008s 67.5 1239.7
  58. #rsa 2048 bits 0.0963s 0.0028s 10.4 353.0
  59. #rsa 4096 bits 0.6538s 0.0102s 1.5 98.1
  60. #dsa 512 bits 0.0026s 0.0032s 382.5 313.7
  61. #dsa 1024 bits 0.0081s 0.0099s 122.8 100.6
  62. #
  63. # Same benchmark with this assembler code:
  64. #
  65. #rsa 512 bits 0.0020s 0.0002s 510.4 6273.7
  66. #rsa 1024 bits 0.0088s 0.0005s 114.1 2128.3
  67. #rsa 2048 bits 0.0540s 0.0016s 18.5 622.5
  68. #rsa 4096 bits 0.3700s 0.0058s 2.7 171.0
  69. #dsa 512 bits 0.0016s 0.0020s 610.7 507.1
  70. #dsa 1024 bits 0.0047s 0.0058s 212.5 173.2
  71. #
  72. # Again, performance increases by at about 75%
  73. #
  74. # Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code)
  75. # OpenSSL 0.9.7c 30 Sep 2003
  76. #
  77. # Original code.
  78. #
  79. #rsa 512 bits 0.0011s 0.0001s 906.1 11012.5
  80. #rsa 1024 bits 0.0060s 0.0003s 166.6 3363.1
  81. #rsa 2048 bits 0.0370s 0.0010s 27.1 982.4
  82. #rsa 4096 bits 0.2426s 0.0036s 4.1 280.4
  83. #dsa 512 bits 0.0010s 0.0012s 1038.1 841.5
  84. #dsa 1024 bits 0.0030s 0.0037s 329.6 269.7
  85. #dsa 2048 bits 0.0101s 0.0127s 98.9 78.6
  86. #
  87. # Same benchmark with this assembler code:
  88. #
  89. #rsa 512 bits 0.0007s 0.0001s 1416.2 16645.9
  90. #rsa 1024 bits 0.0036s 0.0002s 274.4 5380.6
  91. #rsa 2048 bits 0.0222s 0.0006s 45.1 1589.5
  92. #rsa 4096 bits 0.1469s 0.0022s 6.8 449.6
  93. #dsa 512 bits 0.0006s 0.0007s 1664.2 1376.2
  94. #dsa 1024 bits 0.0018s 0.0023s 545.0 442.2
  95. #dsa 2048 bits 0.0061s 0.0075s 163.5 132.8
  96. #
  97. # Performance increase of ~60%
  98. #
  99. # If you have comments or suggestions to improve code send
  100. # me a note at schari@us.ibm.com
  101. #
  102. $flavour = shift;
  103. if ($flavour =~ /32/) {
  104. $BITS= 32;
  105. $BNSZ= $BITS/8;
  106. $ISA= "\"ppc\"";
  107. $LD= "lwz"; # load
  108. $LDU= "lwzu"; # load and update
  109. $ST= "stw"; # store
  110. $STU= "stwu"; # store and update
  111. $UMULL= "mullw"; # unsigned multiply low
  112. $UMULH= "mulhwu"; # unsigned multiply high
  113. $UDIV= "divwu"; # unsigned divide
  114. $UCMPI= "cmplwi"; # unsigned compare with immediate
  115. $UCMP= "cmplw"; # unsigned compare
  116. $CNTLZ= "cntlzw"; # count leading zeros
  117. $SHL= "slw"; # shift left
  118. $SHR= "srw"; # unsigned shift right
  119. $SHRI= "srwi"; # unsigned shift right by immediate
  120. $SHLI= "slwi"; # shift left by immediate
  121. $CLRU= "clrlwi"; # clear upper bits
  122. $INSR= "insrwi"; # insert right
  123. $ROTL= "rotlwi"; # rotate left by immediate
  124. $TR= "tw"; # conditional trap
  125. } elsif ($flavour =~ /64/) {
  126. $BITS= 64;
  127. $BNSZ= $BITS/8;
  128. $ISA= "\"ppc64\"";
  129. # same as above, but 64-bit mnemonics...
  130. $LD= "ld"; # load
  131. $LDU= "ldu"; # load and update
  132. $ST= "std"; # store
  133. $STU= "stdu"; # store and update
  134. $UMULL= "mulld"; # unsigned multiply low
  135. $UMULH= "mulhdu"; # unsigned multiply high
  136. $UDIV= "divdu"; # unsigned divide
  137. $UCMPI= "cmpldi"; # unsigned compare with immediate
  138. $UCMP= "cmpld"; # unsigned compare
  139. $CNTLZ= "cntlzd"; # count leading zeros
  140. $SHL= "sld"; # shift left
  141. $SHR= "srd"; # unsigned shift right
  142. $SHRI= "srdi"; # unsigned shift right by immediate
  143. $SHLI= "sldi"; # shift left by immediate
  144. $CLRU= "clrldi"; # clear upper bits
  145. $INSR= "insrdi"; # insert right
  146. $ROTL= "rotldi"; # rotate left by immediate
  147. $TR= "td"; # conditional trap
  148. } else { die "nonsense $flavour"; }
  149. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  150. ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
  151. ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
  152. die "can't locate ppc-xlate.pl";
  153. open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
  154. $data=<<EOF;
  155. #--------------------------------------------------------------------
  156. #
  157. #
  158. #
  159. #
  160. # File: ppc32.s
  161. #
  162. # Created by: Suresh Chari
  163. # IBM Thomas J. Watson Research Library
  164. # Hawthorne, NY
  165. #
  166. #
  167. # Description: Optimized assembly routines for OpenSSL crypto
  168. # on the 32 bitPowerPC platform.
  169. #
  170. #
  171. # Version History
  172. #
  173. # 2. Fixed bn_add,bn_sub and bn_div_words, added comments,
  174. # cleaned up code. Also made a single version which can
  175. # be used for both the AIX and Linux compilers. See NOTE
  176. # below.
  177. # 12/05/03 Suresh Chari
  178. # (with lots of help from) Andy Polyakov
  179. ##
  180. # 1. Initial version 10/20/02 Suresh Chari
  181. #
  182. #
  183. # The following file works for the xlc,cc
  184. # and gcc compilers.
  185. #
  186. # NOTE: To get the file to link correctly with the gcc compiler
  187. # you have to change the names of the routines and remove
  188. # the first .(dot) character. This should automatically
  189. # be done in the build process.
  190. #
  191. # Hand optimized assembly code for the following routines
  192. #
  193. # bn_sqr_comba4
  194. # bn_sqr_comba8
  195. # bn_mul_comba4
  196. # bn_mul_comba8
  197. # bn_sub_words
  198. # bn_add_words
  199. # bn_div_words
  200. # bn_sqr_words
  201. # bn_mul_words
  202. # bn_mul_add_words
  203. #
  204. # NOTE: It is possible to optimize this code more for
  205. # specific PowerPC or Power architectures. On the Northstar
  206. # architecture the optimizations in this file do
  207. # NOT provide much improvement.
  208. #
  209. # If you have comments or suggestions to improve code send
  210. # me a note at schari\@us.ibm.com
  211. #
  212. #--------------------------------------------------------------------------
  213. #
  214. # Defines to be used in the assembly code.
  215. #
  216. #.set r0,0 # we use it as storage for value of 0
  217. #.set SP,1 # preserved
  218. #.set RTOC,2 # preserved
  219. #.set r3,3 # 1st argument/return value
  220. #.set r4,4 # 2nd argument/volatile register
  221. #.set r5,5 # 3rd argument/volatile register
  222. #.set r6,6 # ...
  223. #.set r7,7
  224. #.set r8,8
  225. #.set r9,9
  226. #.set r10,10
  227. #.set r11,11
  228. #.set r12,12
  229. #.set r13,13 # not used, nor any other "below" it...
  230. # Declare function names to be global
  231. # NOTE: For gcc these names MUST be changed to remove
  232. # the first . i.e. for example change ".bn_sqr_comba4"
  233. # to "bn_sqr_comba4". This should be automatically done
  234. # in the build.
  235. .globl .bn_sqr_comba4
  236. .globl .bn_sqr_comba8
  237. .globl .bn_mul_comba4
  238. .globl .bn_mul_comba8
  239. .globl .bn_sub_words
  240. .globl .bn_add_words
  241. .globl .bn_div_words
  242. .globl .bn_sqr_words
  243. .globl .bn_mul_words
  244. .globl .bn_mul_add_words
  245. # .text section
  246. .machine "any"
  247. #
  248. # NOTE: The following label name should be changed to
  249. # "bn_sqr_comba4" i.e. remove the first dot
  250. # for the gcc compiler. This should be automatically
  251. # done in the build
  252. #
  253. .align 4
  254. .bn_sqr_comba4:
  255. #
  256. # Optimized version of bn_sqr_comba4.
  257. #
  258. # void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
  259. # r3 contains r
  260. # r4 contains a
  261. #
  262. # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
  263. #
  264. # r5,r6 are the two BN_ULONGs being multiplied.
  265. # r7,r8 are the results of the 32x32 giving 64 bit multiply.
  266. # r9,r10, r11 are the equivalents of c1,c2, c3.
  267. # Here's the assembly
  268. #
  269. #
  270. xor r0,r0,r0 # set r0 = 0. Used in the addze
  271. # instructions below
  272. #sqr_add_c(a,0,c1,c2,c3)
  273. $LD r5,`0*$BNSZ`(r4)
  274. $UMULL r9,r5,r5
  275. $UMULH r10,r5,r5 #in first iteration. No need
  276. #to add since c1=c2=c3=0.
  277. # Note c3(r11) is NOT set to 0
  278. # but will be.
  279. $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
  280. # sqr_add_c2(a,1,0,c2,c3,c1);
  281. $LD r6,`1*$BNSZ`(r4)
  282. $UMULL r7,r5,r6
  283. $UMULH r8,r5,r6
  284. addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8)
  285. adde r8,r8,r8
  286. addze r9,r0 # catch carry if any.
  287. # r9= r0(=0) and carry
  288. addc r10,r7,r10 # now add to temp result.
  289. addze r11,r8 # r8 added to r11 which is 0
  290. addze r9,r9
  291. $ST r10,`1*$BNSZ`(r3) #r[1]=c2;
  292. #sqr_add_c(a,1,c3,c1,c2)
  293. $UMULL r7,r6,r6
  294. $UMULH r8,r6,r6
  295. addc r11,r7,r11
  296. adde r9,r8,r9
  297. addze r10,r0
  298. #sqr_add_c2(a,2,0,c3,c1,c2)
  299. $LD r6,`2*$BNSZ`(r4)
  300. $UMULL r7,r5,r6
  301. $UMULH r8,r5,r6
  302. addc r7,r7,r7
  303. adde r8,r8,r8
  304. addze r10,r10
  305. addc r11,r7,r11
  306. adde r9,r8,r9
  307. addze r10,r10
  308. $ST r11,`2*$BNSZ`(r3) #r[2]=c3
  309. #sqr_add_c2(a,3,0,c1,c2,c3);
  310. $LD r6,`3*$BNSZ`(r4)
  311. $UMULL r7,r5,r6
  312. $UMULH r8,r5,r6
  313. addc r7,r7,r7
  314. adde r8,r8,r8
  315. addze r11,r0
  316. addc r9,r7,r9
  317. adde r10,r8,r10
  318. addze r11,r11
  319. #sqr_add_c2(a,2,1,c1,c2,c3);
  320. $LD r5,`1*$BNSZ`(r4)
  321. $LD r6,`2*$BNSZ`(r4)
  322. $UMULL r7,r5,r6
  323. $UMULH r8,r5,r6
  324. addc r7,r7,r7
  325. adde r8,r8,r8
  326. addze r11,r11
  327. addc r9,r7,r9
  328. adde r10,r8,r10
  329. addze r11,r11
  330. $ST r9,`3*$BNSZ`(r3) #r[3]=c1
  331. #sqr_add_c(a,2,c2,c3,c1);
  332. $UMULL r7,r6,r6
  333. $UMULH r8,r6,r6
  334. addc r10,r7,r10
  335. adde r11,r8,r11
  336. addze r9,r0
  337. #sqr_add_c2(a,3,1,c2,c3,c1);
  338. $LD r6,`3*$BNSZ`(r4)
  339. $UMULL r7,r5,r6
  340. $UMULH r8,r5,r6
  341. addc r7,r7,r7
  342. adde r8,r8,r8
  343. addze r9,r9
  344. addc r10,r7,r10
  345. adde r11,r8,r11
  346. addze r9,r9
  347. $ST r10,`4*$BNSZ`(r3) #r[4]=c2
  348. #sqr_add_c2(a,3,2,c3,c1,c2);
  349. $LD r5,`2*$BNSZ`(r4)
  350. $UMULL r7,r5,r6
  351. $UMULH r8,r5,r6
  352. addc r7,r7,r7
  353. adde r8,r8,r8
  354. addze r10,r0
  355. addc r11,r7,r11
  356. adde r9,r8,r9
  357. addze r10,r10
  358. $ST r11,`5*$BNSZ`(r3) #r[5] = c3
  359. #sqr_add_c(a,3,c1,c2,c3);
  360. $UMULL r7,r6,r6
  361. $UMULH r8,r6,r6
  362. addc r9,r7,r9
  363. adde r10,r8,r10
  364. $ST r9,`6*$BNSZ`(r3) #r[6]=c1
  365. $ST r10,`7*$BNSZ`(r3) #r[7]=c2
  366. blr
  367. .long 0
  368. .byte 0,12,0x14,0,0,0,2,0
  369. .long 0
  370. .size .bn_sqr_comba4,.-.bn_sqr_comba4
  371. #
  372. # NOTE: The following label name should be changed to
  373. # "bn_sqr_comba8" i.e. remove the first dot
  374. # for the gcc compiler. This should be automatically
  375. # done in the build
  376. #
  377. .align 4
  378. .bn_sqr_comba8:
  379. #
  380. # This is an optimized version of the bn_sqr_comba8 routine.
  381. # Tightly uses the adde instruction
  382. #
  383. #
  384. # void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
  385. # r3 contains r
  386. # r4 contains a
  387. #
  388. # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
  389. #
  390. # r5,r6 are the two BN_ULONGs being multiplied.
  391. # r7,r8 are the results of the 32x32 giving 64 bit multiply.
  392. # r9,r10, r11 are the equivalents of c1,c2, c3.
  393. #
  394. # Possible optimization of loading all 8 longs of a into registers
  395. # doesnt provide any speedup
  396. #
  397. xor r0,r0,r0 #set r0 = 0.Used in addze
  398. #instructions below.
  399. #sqr_add_c(a,0,c1,c2,c3);
  400. $LD r5,`0*$BNSZ`(r4)
  401. $UMULL r9,r5,r5 #1st iteration: no carries.
  402. $UMULH r10,r5,r5
  403. $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
  404. #sqr_add_c2(a,1,0,c2,c3,c1);
  405. $LD r6,`1*$BNSZ`(r4)
  406. $UMULL r7,r5,r6
  407. $UMULH r8,r5,r6
  408. addc r10,r7,r10 #add the two register number
  409. adde r11,r8,r0 # (r8,r7) to the three register
  410. addze r9,r0 # number (r9,r11,r10).NOTE:r0=0
  411. addc r10,r7,r10 #add the two register number
  412. adde r11,r8,r11 # (r8,r7) to the three register
  413. addze r9,r9 # number (r9,r11,r10).
  414. $ST r10,`1*$BNSZ`(r3) # r[1]=c2
  415. #sqr_add_c(a,1,c3,c1,c2);
  416. $UMULL r7,r6,r6
  417. $UMULH r8,r6,r6
  418. addc r11,r7,r11
  419. adde r9,r8,r9
  420. addze r10,r0
  421. #sqr_add_c2(a,2,0,c3,c1,c2);
  422. $LD r6,`2*$BNSZ`(r4)
  423. $UMULL r7,r5,r6
  424. $UMULH r8,r5,r6
  425. addc r11,r7,r11
  426. adde r9,r8,r9
  427. addze r10,r10
  428. addc r11,r7,r11
  429. adde r9,r8,r9
  430. addze r10,r10
  431. $ST r11,`2*$BNSZ`(r3) #r[2]=c3
  432. #sqr_add_c2(a,3,0,c1,c2,c3);
  433. $LD r6,`3*$BNSZ`(r4) #r6 = a[3]. r5 is already a[0].
  434. $UMULL r7,r5,r6
  435. $UMULH r8,r5,r6
  436. addc r9,r7,r9
  437. adde r10,r8,r10
  438. addze r11,r0
  439. addc r9,r7,r9
  440. adde r10,r8,r10
  441. addze r11,r11
  442. #sqr_add_c2(a,2,1,c1,c2,c3);
  443. $LD r5,`1*$BNSZ`(r4)
  444. $LD r6,`2*$BNSZ`(r4)
  445. $UMULL r7,r5,r6
  446. $UMULH r8,r5,r6
  447. addc r9,r7,r9
  448. adde r10,r8,r10
  449. addze r11,r11
  450. addc r9,r7,r9
  451. adde r10,r8,r10
  452. addze r11,r11
  453. $ST r9,`3*$BNSZ`(r3) #r[3]=c1;
  454. #sqr_add_c(a,2,c2,c3,c1);
  455. $UMULL r7,r6,r6
  456. $UMULH r8,r6,r6
  457. addc r10,r7,r10
  458. adde r11,r8,r11
  459. addze r9,r0
  460. #sqr_add_c2(a,3,1,c2,c3,c1);
  461. $LD r6,`3*$BNSZ`(r4)
  462. $UMULL r7,r5,r6
  463. $UMULH r8,r5,r6
  464. addc r10,r7,r10
  465. adde r11,r8,r11
  466. addze r9,r9
  467. addc r10,r7,r10
  468. adde r11,r8,r11
  469. addze r9,r9
  470. #sqr_add_c2(a,4,0,c2,c3,c1);
  471. $LD r5,`0*$BNSZ`(r4)
  472. $LD r6,`4*$BNSZ`(r4)
  473. $UMULL r7,r5,r6
  474. $UMULH r8,r5,r6
  475. addc r10,r7,r10
  476. adde r11,r8,r11
  477. addze r9,r9
  478. addc r10,r7,r10
  479. adde r11,r8,r11
  480. addze r9,r9
  481. $ST r10,`4*$BNSZ`(r3) #r[4]=c2;
  482. #sqr_add_c2(a,5,0,c3,c1,c2);
  483. $LD r6,`5*$BNSZ`(r4)
  484. $UMULL r7,r5,r6
  485. $UMULH r8,r5,r6
  486. addc r11,r7,r11
  487. adde r9,r8,r9
  488. addze r10,r0
  489. addc r11,r7,r11
  490. adde r9,r8,r9
  491. addze r10,r10
  492. #sqr_add_c2(a,4,1,c3,c1,c2);
  493. $LD r5,`1*$BNSZ`(r4)
  494. $LD r6,`4*$BNSZ`(r4)
  495. $UMULL r7,r5,r6
  496. $UMULH r8,r5,r6
  497. addc r11,r7,r11
  498. adde r9,r8,r9
  499. addze r10,r10
  500. addc r11,r7,r11
  501. adde r9,r8,r9
  502. addze r10,r10
  503. #sqr_add_c2(a,3,2,c3,c1,c2);
  504. $LD r5,`2*$BNSZ`(r4)
  505. $LD r6,`3*$BNSZ`(r4)
  506. $UMULL r7,r5,r6
  507. $UMULH r8,r5,r6
  508. addc r11,r7,r11
  509. adde r9,r8,r9
  510. addze r10,r10
  511. addc r11,r7,r11
  512. adde r9,r8,r9
  513. addze r10,r10
  514. $ST r11,`5*$BNSZ`(r3) #r[5]=c3;
  515. #sqr_add_c(a,3,c1,c2,c3);
  516. $UMULL r7,r6,r6
  517. $UMULH r8,r6,r6
  518. addc r9,r7,r9
  519. adde r10,r8,r10
  520. addze r11,r0
  521. #sqr_add_c2(a,4,2,c1,c2,c3);
  522. $LD r6,`4*$BNSZ`(r4)
  523. $UMULL r7,r5,r6
  524. $UMULH r8,r5,r6
  525. addc r9,r7,r9
  526. adde r10,r8,r10
  527. addze r11,r11
  528. addc r9,r7,r9
  529. adde r10,r8,r10
  530. addze r11,r11
  531. #sqr_add_c2(a,5,1,c1,c2,c3);
  532. $LD r5,`1*$BNSZ`(r4)
  533. $LD r6,`5*$BNSZ`(r4)
  534. $UMULL r7,r5,r6
  535. $UMULH r8,r5,r6
  536. addc r9,r7,r9
  537. adde r10,r8,r10
  538. addze r11,r11
  539. addc r9,r7,r9
  540. adde r10,r8,r10
  541. addze r11,r11
  542. #sqr_add_c2(a,6,0,c1,c2,c3);
  543. $LD r5,`0*$BNSZ`(r4)
  544. $LD r6,`6*$BNSZ`(r4)
  545. $UMULL r7,r5,r6
  546. $UMULH r8,r5,r6
  547. addc r9,r7,r9
  548. adde r10,r8,r10
  549. addze r11,r11
  550. addc r9,r7,r9
  551. adde r10,r8,r10
  552. addze r11,r11
  553. $ST r9,`6*$BNSZ`(r3) #r[6]=c1;
  554. #sqr_add_c2(a,7,0,c2,c3,c1);
  555. $LD r6,`7*$BNSZ`(r4)
  556. $UMULL r7,r5,r6
  557. $UMULH r8,r5,r6
  558. addc r10,r7,r10
  559. adde r11,r8,r11
  560. addze r9,r0
  561. addc r10,r7,r10
  562. adde r11,r8,r11
  563. addze r9,r9
  564. #sqr_add_c2(a,6,1,c2,c3,c1);
  565. $LD r5,`1*$BNSZ`(r4)
  566. $LD r6,`6*$BNSZ`(r4)
  567. $UMULL r7,r5,r6
  568. $UMULH r8,r5,r6
  569. addc r10,r7,r10
  570. adde r11,r8,r11
  571. addze r9,r9
  572. addc r10,r7,r10
  573. adde r11,r8,r11
  574. addze r9,r9
  575. #sqr_add_c2(a,5,2,c2,c3,c1);
  576. $LD r5,`2*$BNSZ`(r4)
  577. $LD r6,`5*$BNSZ`(r4)
  578. $UMULL r7,r5,r6
  579. $UMULH r8,r5,r6
  580. addc r10,r7,r10
  581. adde r11,r8,r11
  582. addze r9,r9
  583. addc r10,r7,r10
  584. adde r11,r8,r11
  585. addze r9,r9
  586. #sqr_add_c2(a,4,3,c2,c3,c1);
  587. $LD r5,`3*$BNSZ`(r4)
  588. $LD r6,`4*$BNSZ`(r4)
  589. $UMULL r7,r5,r6
  590. $UMULH r8,r5,r6
  591. addc r10,r7,r10
  592. adde r11,r8,r11
  593. addze r9,r9
  594. addc r10,r7,r10
  595. adde r11,r8,r11
  596. addze r9,r9
  597. $ST r10,`7*$BNSZ`(r3) #r[7]=c2;
  598. #sqr_add_c(a,4,c3,c1,c2);
  599. $UMULL r7,r6,r6
  600. $UMULH r8,r6,r6
  601. addc r11,r7,r11
  602. adde r9,r8,r9
  603. addze r10,r0
  604. #sqr_add_c2(a,5,3,c3,c1,c2);
  605. $LD r6,`5*$BNSZ`(r4)
  606. $UMULL r7,r5,r6
  607. $UMULH r8,r5,r6
  608. addc r11,r7,r11
  609. adde r9,r8,r9
  610. addze r10,r10
  611. addc r11,r7,r11
  612. adde r9,r8,r9
  613. addze r10,r10
  614. #sqr_add_c2(a,6,2,c3,c1,c2);
  615. $LD r5,`2*$BNSZ`(r4)
  616. $LD r6,`6*$BNSZ`(r4)
  617. $UMULL r7,r5,r6
  618. $UMULH r8,r5,r6
  619. addc r11,r7,r11
  620. adde r9,r8,r9
  621. addze r10,r10
  622. addc r11,r7,r11
  623. adde r9,r8,r9
  624. addze r10,r10
  625. #sqr_add_c2(a,7,1,c3,c1,c2);
  626. $LD r5,`1*$BNSZ`(r4)
  627. $LD r6,`7*$BNSZ`(r4)
  628. $UMULL r7,r5,r6
  629. $UMULH r8,r5,r6
  630. addc r11,r7,r11
  631. adde r9,r8,r9
  632. addze r10,r10
  633. addc r11,r7,r11
  634. adde r9,r8,r9
  635. addze r10,r10
  636. $ST r11,`8*$BNSZ`(r3) #r[8]=c3;
  637. #sqr_add_c2(a,7,2,c1,c2,c3);
  638. $LD r5,`2*$BNSZ`(r4)
  639. $UMULL r7,r5,r6
  640. $UMULH r8,r5,r6
  641. addc r9,r7,r9
  642. adde r10,r8,r10
  643. addze r11,r0
  644. addc r9,r7,r9
  645. adde r10,r8,r10
  646. addze r11,r11
  647. #sqr_add_c2(a,6,3,c1,c2,c3);
  648. $LD r5,`3*$BNSZ`(r4)
  649. $LD r6,`6*$BNSZ`(r4)
  650. $UMULL r7,r5,r6
  651. $UMULH r8,r5,r6
  652. addc r9,r7,r9
  653. adde r10,r8,r10
  654. addze r11,r11
  655. addc r9,r7,r9
  656. adde r10,r8,r10
  657. addze r11,r11
  658. #sqr_add_c2(a,5,4,c1,c2,c3);
  659. $LD r5,`4*$BNSZ`(r4)
  660. $LD r6,`5*$BNSZ`(r4)
  661. $UMULL r7,r5,r6
  662. $UMULH r8,r5,r6
  663. addc r9,r7,r9
  664. adde r10,r8,r10
  665. addze r11,r11
  666. addc r9,r7,r9
  667. adde r10,r8,r10
  668. addze r11,r11
  669. $ST r9,`9*$BNSZ`(r3) #r[9]=c1;
  670. #sqr_add_c(a,5,c2,c3,c1);
  671. $UMULL r7,r6,r6
  672. $UMULH r8,r6,r6
  673. addc r10,r7,r10
  674. adde r11,r8,r11
  675. addze r9,r0
  676. #sqr_add_c2(a,6,4,c2,c3,c1);
  677. $LD r6,`6*$BNSZ`(r4)
  678. $UMULL r7,r5,r6
  679. $UMULH r8,r5,r6
  680. addc r10,r7,r10
  681. adde r11,r8,r11
  682. addze r9,r9
  683. addc r10,r7,r10
  684. adde r11,r8,r11
  685. addze r9,r9
  686. #sqr_add_c2(a,7,3,c2,c3,c1);
  687. $LD r5,`3*$BNSZ`(r4)
  688. $LD r6,`7*$BNSZ`(r4)
  689. $UMULL r7,r5,r6
  690. $UMULH r8,r5,r6
  691. addc r10,r7,r10
  692. adde r11,r8,r11
  693. addze r9,r9
  694. addc r10,r7,r10
  695. adde r11,r8,r11
  696. addze r9,r9
  697. $ST r10,`10*$BNSZ`(r3) #r[10]=c2;
  698. #sqr_add_c2(a,7,4,c3,c1,c2);
  699. $LD r5,`4*$BNSZ`(r4)
  700. $UMULL r7,r5,r6
  701. $UMULH r8,r5,r6
  702. addc r11,r7,r11
  703. adde r9,r8,r9
  704. addze r10,r0
  705. addc r11,r7,r11
  706. adde r9,r8,r9
  707. addze r10,r10
  708. #sqr_add_c2(a,6,5,c3,c1,c2);
  709. $LD r5,`5*$BNSZ`(r4)
  710. $LD r6,`6*$BNSZ`(r4)
  711. $UMULL r7,r5,r6
  712. $UMULH r8,r5,r6
  713. addc r11,r7,r11
  714. adde r9,r8,r9
  715. addze r10,r10
  716. addc r11,r7,r11
  717. adde r9,r8,r9
  718. addze r10,r10
  719. $ST r11,`11*$BNSZ`(r3) #r[11]=c3;
  720. #sqr_add_c(a,6,c1,c2,c3);
  721. $UMULL r7,r6,r6
  722. $UMULH r8,r6,r6
  723. addc r9,r7,r9
  724. adde r10,r8,r10
  725. addze r11,r0
  726. #sqr_add_c2(a,7,5,c1,c2,c3)
  727. $LD r6,`7*$BNSZ`(r4)
  728. $UMULL r7,r5,r6
  729. $UMULH r8,r5,r6
  730. addc r9,r7,r9
  731. adde r10,r8,r10
  732. addze r11,r11
  733. addc r9,r7,r9
  734. adde r10,r8,r10
  735. addze r11,r11
  736. $ST r9,`12*$BNSZ`(r3) #r[12]=c1;
  737. #sqr_add_c2(a,7,6,c2,c3,c1)
  738. $LD r5,`6*$BNSZ`(r4)
  739. $UMULL r7,r5,r6
  740. $UMULH r8,r5,r6
  741. addc r10,r7,r10
  742. adde r11,r8,r11
  743. addze r9,r0
  744. addc r10,r7,r10
  745. adde r11,r8,r11
  746. addze r9,r9
  747. $ST r10,`13*$BNSZ`(r3) #r[13]=c2;
  748. #sqr_add_c(a,7,c3,c1,c2);
  749. $UMULL r7,r6,r6
  750. $UMULH r8,r6,r6
  751. addc r11,r7,r11
  752. adde r9,r8,r9
  753. $ST r11,`14*$BNSZ`(r3) #r[14]=c3;
  754. $ST r9, `15*$BNSZ`(r3) #r[15]=c1;
  755. blr
  756. .long 0
  757. .byte 0,12,0x14,0,0,0,2,0
  758. .long 0
  759. .size .bn_sqr_comba8,.-.bn_sqr_comba8
  760. #
  761. # NOTE: The following label name should be changed to
  762. # "bn_mul_comba4" i.e. remove the first dot
  763. # for the gcc compiler. This should be automatically
  764. # done in the build
  765. #
  766. .align 4
  767. .bn_mul_comba4:
  768. #
  769. # This is an optimized version of the bn_mul_comba4 routine.
  770. #
  771. # void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
  772. # r3 contains r
  773. # r4 contains a
  774. # r5 contains b
  775. # r6, r7 are the 2 BN_ULONGs being multiplied.
  776. # r8, r9 are the results of the 32x32 giving 64 multiply.
  777. # r10, r11, r12 are the equivalents of c1, c2, and c3.
  778. #
  779. xor r0,r0,r0 #r0=0. Used in addze below.
  780. #mul_add_c(a[0],b[0],c1,c2,c3);
  781. $LD r6,`0*$BNSZ`(r4)
  782. $LD r7,`0*$BNSZ`(r5)
  783. $UMULL r10,r6,r7
  784. $UMULH r11,r6,r7
  785. $ST r10,`0*$BNSZ`(r3) #r[0]=c1
  786. #mul_add_c(a[0],b[1],c2,c3,c1);
  787. $LD r7,`1*$BNSZ`(r5)
  788. $UMULL r8,r6,r7
  789. $UMULH r9,r6,r7
  790. addc r11,r8,r11
  791. adde r12,r9,r0
  792. addze r10,r0
  793. #mul_add_c(a[1],b[0],c2,c3,c1);
  794. $LD r6, `1*$BNSZ`(r4)
  795. $LD r7, `0*$BNSZ`(r5)
  796. $UMULL r8,r6,r7
  797. $UMULH r9,r6,r7
  798. addc r11,r8,r11
  799. adde r12,r9,r12
  800. addze r10,r10
  801. $ST r11,`1*$BNSZ`(r3) #r[1]=c2
  802. #mul_add_c(a[2],b[0],c3,c1,c2);
  803. $LD r6,`2*$BNSZ`(r4)
  804. $UMULL r8,r6,r7
  805. $UMULH r9,r6,r7
  806. addc r12,r8,r12
  807. adde r10,r9,r10
  808. addze r11,r0
  809. #mul_add_c(a[1],b[1],c3,c1,c2);
  810. $LD r6,`1*$BNSZ`(r4)
  811. $LD r7,`1*$BNSZ`(r5)
  812. $UMULL r8,r6,r7
  813. $UMULH r9,r6,r7
  814. addc r12,r8,r12
  815. adde r10,r9,r10
  816. addze r11,r11
  817. #mul_add_c(a[0],b[2],c3,c1,c2);
  818. $LD r6,`0*$BNSZ`(r4)
  819. $LD r7,`2*$BNSZ`(r5)
  820. $UMULL r8,r6,r7
  821. $UMULH r9,r6,r7
  822. addc r12,r8,r12
  823. adde r10,r9,r10
  824. addze r11,r11
  825. $ST r12,`2*$BNSZ`(r3) #r[2]=c3
  826. #mul_add_c(a[0],b[3],c1,c2,c3);
  827. $LD r7,`3*$BNSZ`(r5)
  828. $UMULL r8,r6,r7
  829. $UMULH r9,r6,r7
  830. addc r10,r8,r10
  831. adde r11,r9,r11
  832. addze r12,r0
  833. #mul_add_c(a[1],b[2],c1,c2,c3);
  834. $LD r6,`1*$BNSZ`(r4)
  835. $LD r7,`2*$BNSZ`(r5)
  836. $UMULL r8,r6,r7
  837. $UMULH r9,r6,r7
  838. addc r10,r8,r10
  839. adde r11,r9,r11
  840. addze r12,r12
  841. #mul_add_c(a[2],b[1],c1,c2,c3);
  842. $LD r6,`2*$BNSZ`(r4)
  843. $LD r7,`1*$BNSZ`(r5)
  844. $UMULL r8,r6,r7
  845. $UMULH r9,r6,r7
  846. addc r10,r8,r10
  847. adde r11,r9,r11
  848. addze r12,r12
  849. #mul_add_c(a[3],b[0],c1,c2,c3);
  850. $LD r6,`3*$BNSZ`(r4)
  851. $LD r7,`0*$BNSZ`(r5)
  852. $UMULL r8,r6,r7
  853. $UMULH r9,r6,r7
  854. addc r10,r8,r10
  855. adde r11,r9,r11
  856. addze r12,r12
  857. $ST r10,`3*$BNSZ`(r3) #r[3]=c1
  858. #mul_add_c(a[3],b[1],c2,c3,c1);
  859. $LD r7,`1*$BNSZ`(r5)
  860. $UMULL r8,r6,r7
  861. $UMULH r9,r6,r7
  862. addc r11,r8,r11
  863. adde r12,r9,r12
  864. addze r10,r0
  865. #mul_add_c(a[2],b[2],c2,c3,c1);
  866. $LD r6,`2*$BNSZ`(r4)
  867. $LD r7,`2*$BNSZ`(r5)
  868. $UMULL r8,r6,r7
  869. $UMULH r9,r6,r7
  870. addc r11,r8,r11
  871. adde r12,r9,r12
  872. addze r10,r10
  873. #mul_add_c(a[1],b[3],c2,c3,c1);
  874. $LD r6,`1*$BNSZ`(r4)
  875. $LD r7,`3*$BNSZ`(r5)
  876. $UMULL r8,r6,r7
  877. $UMULH r9,r6,r7
  878. addc r11,r8,r11
  879. adde r12,r9,r12
  880. addze r10,r10
  881. $ST r11,`4*$BNSZ`(r3) #r[4]=c2
  882. #mul_add_c(a[2],b[3],c3,c1,c2);
  883. $LD r6,`2*$BNSZ`(r4)
  884. $UMULL r8,r6,r7
  885. $UMULH r9,r6,r7
  886. addc r12,r8,r12
  887. adde r10,r9,r10
  888. addze r11,r0
  889. #mul_add_c(a[3],b[2],c3,c1,c2);
  890. $LD r6,`3*$BNSZ`(r4)
  891. $LD r7,`2*$BNSZ`(r5)
  892. $UMULL r8,r6,r7
  893. $UMULH r9,r6,r7
  894. addc r12,r8,r12
  895. adde r10,r9,r10
  896. addze r11,r11
  897. $ST r12,`5*$BNSZ`(r3) #r[5]=c3
  898. #mul_add_c(a[3],b[3],c1,c2,c3);
  899. $LD r7,`3*$BNSZ`(r5)
  900. $UMULL r8,r6,r7
  901. $UMULH r9,r6,r7
  902. addc r10,r8,r10
  903. adde r11,r9,r11
  904. $ST r10,`6*$BNSZ`(r3) #r[6]=c1
  905. $ST r11,`7*$BNSZ`(r3) #r[7]=c2
  906. blr
  907. .long 0
  908. .byte 0,12,0x14,0,0,0,3,0
  909. .long 0
  910. .size .bn_mul_comba4,.-.bn_mul_comba4
  911. #
  912. # NOTE: The following label name should be changed to
  913. # "bn_mul_comba8" i.e. remove the first dot
  914. # for the gcc compiler. This should be automatically
  915. # done in the build
  916. #
  917. .align 4
  918. .bn_mul_comba8:
  919. #
  920. # Optimized version of the bn_mul_comba8 routine.
  921. #
  922. # void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
  923. # r3 contains r
  924. # r4 contains a
  925. # r5 contains b
  926. # r6, r7 are the 2 BN_ULONGs being multiplied.
  927. # r8, r9 are the results of the 32x32 giving 64 multiply.
  928. # r10, r11, r12 are the equivalents of c1, c2, and c3.
  929. #
  930. xor r0,r0,r0 #r0=0. Used in addze below.
  931. #mul_add_c(a[0],b[0],c1,c2,c3);
  932. $LD r6,`0*$BNSZ`(r4) #a[0]
  933. $LD r7,`0*$BNSZ`(r5) #b[0]
  934. $UMULL r10,r6,r7
  935. $UMULH r11,r6,r7
  936. $ST r10,`0*$BNSZ`(r3) #r[0]=c1;
  937. #mul_add_c(a[0],b[1],c2,c3,c1);
  938. $LD r7,`1*$BNSZ`(r5)
  939. $UMULL r8,r6,r7
  940. $UMULH r9,r6,r7
  941. addc r11,r11,r8
  942. addze r12,r9 # since we didnt set r12 to zero before.
  943. addze r10,r0
  944. #mul_add_c(a[1],b[0],c2,c3,c1);
  945. $LD r6,`1*$BNSZ`(r4)
  946. $LD r7,`0*$BNSZ`(r5)
  947. $UMULL r8,r6,r7
  948. $UMULH r9,r6,r7
  949. addc r11,r11,r8
  950. adde r12,r12,r9
  951. addze r10,r10
  952. $ST r11,`1*$BNSZ`(r3) #r[1]=c2;
  953. #mul_add_c(a[2],b[0],c3,c1,c2);
  954. $LD r6,`2*$BNSZ`(r4)
  955. $UMULL r8,r6,r7
  956. $UMULH r9,r6,r7
  957. addc r12,r12,r8
  958. adde r10,r10,r9
  959. addze r11,r0
  960. #mul_add_c(a[1],b[1],c3,c1,c2);
  961. $LD r6,`1*$BNSZ`(r4)
  962. $LD r7,`1*$BNSZ`(r5)
  963. $UMULL r8,r6,r7
  964. $UMULH r9,r6,r7
  965. addc r12,r12,r8
  966. adde r10,r10,r9
  967. addze r11,r11
  968. #mul_add_c(a[0],b[2],c3,c1,c2);
  969. $LD r6,`0*$BNSZ`(r4)
  970. $LD r7,`2*$BNSZ`(r5)
  971. $UMULL r8,r6,r7
  972. $UMULH r9,r6,r7
  973. addc r12,r12,r8
  974. adde r10,r10,r9
  975. addze r11,r11
  976. $ST r12,`2*$BNSZ`(r3) #r[2]=c3;
  977. #mul_add_c(a[0],b[3],c1,c2,c3);
  978. $LD r7,`3*$BNSZ`(r5)
  979. $UMULL r8,r6,r7
  980. $UMULH r9,r6,r7
  981. addc r10,r10,r8
  982. adde r11,r11,r9
  983. addze r12,r0
  984. #mul_add_c(a[1],b[2],c1,c2,c3);
  985. $LD r6,`1*$BNSZ`(r4)
  986. $LD r7,`2*$BNSZ`(r5)
  987. $UMULL r8,r6,r7
  988. $UMULH r9,r6,r7
  989. addc r10,r10,r8
  990. adde r11,r11,r9
  991. addze r12,r12
  992. #mul_add_c(a[2],b[1],c1,c2,c3);
  993. $LD r6,`2*$BNSZ`(r4)
  994. $LD r7,`1*$BNSZ`(r5)
  995. $UMULL r8,r6,r7
  996. $UMULH r9,r6,r7
  997. addc r10,r10,r8
  998. adde r11,r11,r9
  999. addze r12,r12
  1000. #mul_add_c(a[3],b[0],c1,c2,c3);
  1001. $LD r6,`3*$BNSZ`(r4)
  1002. $LD r7,`0*$BNSZ`(r5)
  1003. $UMULL r8,r6,r7
  1004. $UMULH r9,r6,r7
  1005. addc r10,r10,r8
  1006. adde r11,r11,r9
  1007. addze r12,r12
  1008. $ST r10,`3*$BNSZ`(r3) #r[3]=c1;
  1009. #mul_add_c(a[4],b[0],c2,c3,c1);
  1010. $LD r6,`4*$BNSZ`(r4)
  1011. $UMULL r8,r6,r7
  1012. $UMULH r9,r6,r7
  1013. addc r11,r11,r8
  1014. adde r12,r12,r9
  1015. addze r10,r0
  1016. #mul_add_c(a[3],b[1],c2,c3,c1);
  1017. $LD r6,`3*$BNSZ`(r4)
  1018. $LD r7,`1*$BNSZ`(r5)
  1019. $UMULL r8,r6,r7
  1020. $UMULH r9,r6,r7
  1021. addc r11,r11,r8
  1022. adde r12,r12,r9
  1023. addze r10,r10
  1024. #mul_add_c(a[2],b[2],c2,c3,c1);
  1025. $LD r6,`2*$BNSZ`(r4)
  1026. $LD r7,`2*$BNSZ`(r5)
  1027. $UMULL r8,r6,r7
  1028. $UMULH r9,r6,r7
  1029. addc r11,r11,r8
  1030. adde r12,r12,r9
  1031. addze r10,r10
  1032. #mul_add_c(a[1],b[3],c2,c3,c1);
  1033. $LD r6,`1*$BNSZ`(r4)
  1034. $LD r7,`3*$BNSZ`(r5)
  1035. $UMULL r8,r6,r7
  1036. $UMULH r9,r6,r7
  1037. addc r11,r11,r8
  1038. adde r12,r12,r9
  1039. addze r10,r10
  1040. #mul_add_c(a[0],b[4],c2,c3,c1);
  1041. $LD r6,`0*$BNSZ`(r4)
  1042. $LD r7,`4*$BNSZ`(r5)
  1043. $UMULL r8,r6,r7
  1044. $UMULH r9,r6,r7
  1045. addc r11,r11,r8
  1046. adde r12,r12,r9
  1047. addze r10,r10
  1048. $ST r11,`4*$BNSZ`(r3) #r[4]=c2;
  1049. #mul_add_c(a[0],b[5],c3,c1,c2);
  1050. $LD r7,`5*$BNSZ`(r5)
  1051. $UMULL r8,r6,r7
  1052. $UMULH r9,r6,r7
  1053. addc r12,r12,r8
  1054. adde r10,r10,r9
  1055. addze r11,r0
  1056. #mul_add_c(a[1],b[4],c3,c1,c2);
  1057. $LD r6,`1*$BNSZ`(r4)
  1058. $LD r7,`4*$BNSZ`(r5)
  1059. $UMULL r8,r6,r7
  1060. $UMULH r9,r6,r7
  1061. addc r12,r12,r8
  1062. adde r10,r10,r9
  1063. addze r11,r11
  1064. #mul_add_c(a[2],b[3],c3,c1,c2);
  1065. $LD r6,`2*$BNSZ`(r4)
  1066. $LD r7,`3*$BNSZ`(r5)
  1067. $UMULL r8,r6,r7
  1068. $UMULH r9,r6,r7
  1069. addc r12,r12,r8
  1070. adde r10,r10,r9
  1071. addze r11,r11
  1072. #mul_add_c(a[3],b[2],c3,c1,c2);
  1073. $LD r6,`3*$BNSZ`(r4)
  1074. $LD r7,`2*$BNSZ`(r5)
  1075. $UMULL r8,r6,r7
  1076. $UMULH r9,r6,r7
  1077. addc r12,r12,r8
  1078. adde r10,r10,r9
  1079. addze r11,r11
  1080. #mul_add_c(a[4],b[1],c3,c1,c2);
  1081. $LD r6,`4*$BNSZ`(r4)
  1082. $LD r7,`1*$BNSZ`(r5)
  1083. $UMULL r8,r6,r7
  1084. $UMULH r9,r6,r7
  1085. addc r12,r12,r8
  1086. adde r10,r10,r9
  1087. addze r11,r11
  1088. #mul_add_c(a[5],b[0],c3,c1,c2);
  1089. $LD r6,`5*$BNSZ`(r4)
  1090. $LD r7,`0*$BNSZ`(r5)
  1091. $UMULL r8,r6,r7
  1092. $UMULH r9,r6,r7
  1093. addc r12,r12,r8
  1094. adde r10,r10,r9
  1095. addze r11,r11
  1096. $ST r12,`5*$BNSZ`(r3) #r[5]=c3;
  1097. #mul_add_c(a[6],b[0],c1,c2,c3);
  1098. $LD r6,`6*$BNSZ`(r4)
  1099. $UMULL r8,r6,r7
  1100. $UMULH r9,r6,r7
  1101. addc r10,r10,r8
  1102. adde r11,r11,r9
  1103. addze r12,r0
  1104. #mul_add_c(a[5],b[1],c1,c2,c3);
  1105. $LD r6,`5*$BNSZ`(r4)
  1106. $LD r7,`1*$BNSZ`(r5)
  1107. $UMULL r8,r6,r7
  1108. $UMULH r9,r6,r7
  1109. addc r10,r10,r8
  1110. adde r11,r11,r9
  1111. addze r12,r12
  1112. #mul_add_c(a[4],b[2],c1,c2,c3);
  1113. $LD r6,`4*$BNSZ`(r4)
  1114. $LD r7,`2*$BNSZ`(r5)
  1115. $UMULL r8,r6,r7
  1116. $UMULH r9,r6,r7
  1117. addc r10,r10,r8
  1118. adde r11,r11,r9
  1119. addze r12,r12
  1120. #mul_add_c(a[3],b[3],c1,c2,c3);
  1121. $LD r6,`3*$BNSZ`(r4)
  1122. $LD r7,`3*$BNSZ`(r5)
  1123. $UMULL r8,r6,r7
  1124. $UMULH r9,r6,r7
  1125. addc r10,r10,r8
  1126. adde r11,r11,r9
  1127. addze r12,r12
  1128. #mul_add_c(a[2],b[4],c1,c2,c3);
  1129. $LD r6,`2*$BNSZ`(r4)
  1130. $LD r7,`4*$BNSZ`(r5)
  1131. $UMULL r8,r6,r7
  1132. $UMULH r9,r6,r7
  1133. addc r10,r10,r8
  1134. adde r11,r11,r9
  1135. addze r12,r12
  1136. #mul_add_c(a[1],b[5],c1,c2,c3);
  1137. $LD r6,`1*$BNSZ`(r4)
  1138. $LD r7,`5*$BNSZ`(r5)
  1139. $UMULL r8,r6,r7
  1140. $UMULH r9,r6,r7
  1141. addc r10,r10,r8
  1142. adde r11,r11,r9
  1143. addze r12,r12
  1144. #mul_add_c(a[0],b[6],c1,c2,c3);
  1145. $LD r6,`0*$BNSZ`(r4)
  1146. $LD r7,`6*$BNSZ`(r5)
  1147. $UMULL r8,r6,r7
  1148. $UMULH r9,r6,r7
  1149. addc r10,r10,r8
  1150. adde r11,r11,r9
  1151. addze r12,r12
  1152. $ST r10,`6*$BNSZ`(r3) #r[6]=c1;
  1153. #mul_add_c(a[0],b[7],c2,c3,c1);
  1154. $LD r7,`7*$BNSZ`(r5)
  1155. $UMULL r8,r6,r7
  1156. $UMULH r9,r6,r7
  1157. addc r11,r11,r8
  1158. adde r12,r12,r9
  1159. addze r10,r0
  1160. #mul_add_c(a[1],b[6],c2,c3,c1);
  1161. $LD r6,`1*$BNSZ`(r4)
  1162. $LD r7,`6*$BNSZ`(r5)
  1163. $UMULL r8,r6,r7
  1164. $UMULH r9,r6,r7
  1165. addc r11,r11,r8
  1166. adde r12,r12,r9
  1167. addze r10,r10
  1168. #mul_add_c(a[2],b[5],c2,c3,c1);
  1169. $LD r6,`2*$BNSZ`(r4)
  1170. $LD r7,`5*$BNSZ`(r5)
  1171. $UMULL r8,r6,r7
  1172. $UMULH r9,r6,r7
  1173. addc r11,r11,r8
  1174. adde r12,r12,r9
  1175. addze r10,r10
  1176. #mul_add_c(a[3],b[4],c2,c3,c1);
  1177. $LD r6,`3*$BNSZ`(r4)
  1178. $LD r7,`4*$BNSZ`(r5)
  1179. $UMULL r8,r6,r7
  1180. $UMULH r9,r6,r7
  1181. addc r11,r11,r8
  1182. adde r12,r12,r9
  1183. addze r10,r10
  1184. #mul_add_c(a[4],b[3],c2,c3,c1);
  1185. $LD r6,`4*$BNSZ`(r4)
  1186. $LD r7,`3*$BNSZ`(r5)
  1187. $UMULL r8,r6,r7
  1188. $UMULH r9,r6,r7
  1189. addc r11,r11,r8
  1190. adde r12,r12,r9
  1191. addze r10,r10
  1192. #mul_add_c(a[5],b[2],c2,c3,c1);
  1193. $LD r6,`5*$BNSZ`(r4)
  1194. $LD r7,`2*$BNSZ`(r5)
  1195. $UMULL r8,r6,r7
  1196. $UMULH r9,r6,r7
  1197. addc r11,r11,r8
  1198. adde r12,r12,r9
  1199. addze r10,r10
  1200. #mul_add_c(a[6],b[1],c2,c3,c1);
  1201. $LD r6,`6*$BNSZ`(r4)
  1202. $LD r7,`1*$BNSZ`(r5)
  1203. $UMULL r8,r6,r7
  1204. $UMULH r9,r6,r7
  1205. addc r11,r11,r8
  1206. adde r12,r12,r9
  1207. addze r10,r10
  1208. #mul_add_c(a[7],b[0],c2,c3,c1);
  1209. $LD r6,`7*$BNSZ`(r4)
  1210. $LD r7,`0*$BNSZ`(r5)
  1211. $UMULL r8,r6,r7
  1212. $UMULH r9,r6,r7
  1213. addc r11,r11,r8
  1214. adde r12,r12,r9
  1215. addze r10,r10
  1216. $ST r11,`7*$BNSZ`(r3) #r[7]=c2;
  1217. #mul_add_c(a[7],b[1],c3,c1,c2);
  1218. $LD r7,`1*$BNSZ`(r5)
  1219. $UMULL r8,r6,r7
  1220. $UMULH r9,r6,r7
  1221. addc r12,r12,r8
  1222. adde r10,r10,r9
  1223. addze r11,r0
  1224. #mul_add_c(a[6],b[2],c3,c1,c2);
  1225. $LD r6,`6*$BNSZ`(r4)
  1226. $LD r7,`2*$BNSZ`(r5)
  1227. $UMULL r8,r6,r7
  1228. $UMULH r9,r6,r7
  1229. addc r12,r12,r8
  1230. adde r10,r10,r9
  1231. addze r11,r11
  1232. #mul_add_c(a[5],b[3],c3,c1,c2);
  1233. $LD r6,`5*$BNSZ`(r4)
  1234. $LD r7,`3*$BNSZ`(r5)
  1235. $UMULL r8,r6,r7
  1236. $UMULH r9,r6,r7
  1237. addc r12,r12,r8
  1238. adde r10,r10,r9
  1239. addze r11,r11
  1240. #mul_add_c(a[4],b[4],c3,c1,c2);
  1241. $LD r6,`4*$BNSZ`(r4)
  1242. $LD r7,`4*$BNSZ`(r5)
  1243. $UMULL r8,r6,r7
  1244. $UMULH r9,r6,r7
  1245. addc r12,r12,r8
  1246. adde r10,r10,r9
  1247. addze r11,r11
  1248. #mul_add_c(a[3],b[5],c3,c1,c2);
  1249. $LD r6,`3*$BNSZ`(r4)
  1250. $LD r7,`5*$BNSZ`(r5)
  1251. $UMULL r8,r6,r7
  1252. $UMULH r9,r6,r7
  1253. addc r12,r12,r8
  1254. adde r10,r10,r9
  1255. addze r11,r11
  1256. #mul_add_c(a[2],b[6],c3,c1,c2);
  1257. $LD r6,`2*$BNSZ`(r4)
  1258. $LD r7,`6*$BNSZ`(r5)
  1259. $UMULL r8,r6,r7
  1260. $UMULH r9,r6,r7
  1261. addc r12,r12,r8
  1262. adde r10,r10,r9
  1263. addze r11,r11
  1264. #mul_add_c(a[1],b[7],c3,c1,c2);
  1265. $LD r6,`1*$BNSZ`(r4)
  1266. $LD r7,`7*$BNSZ`(r5)
  1267. $UMULL r8,r6,r7
  1268. $UMULH r9,r6,r7
  1269. addc r12,r12,r8
  1270. adde r10,r10,r9
  1271. addze r11,r11
  1272. $ST r12,`8*$BNSZ`(r3) #r[8]=c3;
  1273. #mul_add_c(a[2],b[7],c1,c2,c3);
  1274. $LD r6,`2*$BNSZ`(r4)
  1275. $UMULL r8,r6,r7
  1276. $UMULH r9,r6,r7
  1277. addc r10,r10,r8
  1278. adde r11,r11,r9
  1279. addze r12,r0
  1280. #mul_add_c(a[3],b[6],c1,c2,c3);
  1281. $LD r6,`3*$BNSZ`(r4)
  1282. $LD r7,`6*$BNSZ`(r5)
  1283. $UMULL r8,r6,r7
  1284. $UMULH r9,r6,r7
  1285. addc r10,r10,r8
  1286. adde r11,r11,r9
  1287. addze r12,r12
  1288. #mul_add_c(a[4],b[5],c1,c2,c3);
  1289. $LD r6,`4*$BNSZ`(r4)
  1290. $LD r7,`5*$BNSZ`(r5)
  1291. $UMULL r8,r6,r7
  1292. $UMULH r9,r6,r7
  1293. addc r10,r10,r8
  1294. adde r11,r11,r9
  1295. addze r12,r12
  1296. #mul_add_c(a[5],b[4],c1,c2,c3);
  1297. $LD r6,`5*$BNSZ`(r4)
  1298. $LD r7,`4*$BNSZ`(r5)
  1299. $UMULL r8,r6,r7
  1300. $UMULH r9,r6,r7
  1301. addc r10,r10,r8
  1302. adde r11,r11,r9
  1303. addze r12,r12
  1304. #mul_add_c(a[6],b[3],c1,c2,c3);
  1305. $LD r6,`6*$BNSZ`(r4)
  1306. $LD r7,`3*$BNSZ`(r5)
  1307. $UMULL r8,r6,r7
  1308. $UMULH r9,r6,r7
  1309. addc r10,r10,r8
  1310. adde r11,r11,r9
  1311. addze r12,r12
  1312. #mul_add_c(a[7],b[2],c1,c2,c3);
  1313. $LD r6,`7*$BNSZ`(r4)
  1314. $LD r7,`2*$BNSZ`(r5)
  1315. $UMULL r8,r6,r7
  1316. $UMULH r9,r6,r7
  1317. addc r10,r10,r8
  1318. adde r11,r11,r9
  1319. addze r12,r12
  1320. $ST r10,`9*$BNSZ`(r3) #r[9]=c1;
  1321. #mul_add_c(a[7],b[3],c2,c3,c1);
  1322. $LD r7,`3*$BNSZ`(r5)
  1323. $UMULL r8,r6,r7
  1324. $UMULH r9,r6,r7
  1325. addc r11,r11,r8
  1326. adde r12,r12,r9
  1327. addze r10,r0
  1328. #mul_add_c(a[6],b[4],c2,c3,c1);
  1329. $LD r6,`6*$BNSZ`(r4)
  1330. $LD r7,`4*$BNSZ`(r5)
  1331. $UMULL r8,r6,r7
  1332. $UMULH r9,r6,r7
  1333. addc r11,r11,r8
  1334. adde r12,r12,r9
  1335. addze r10,r10
  1336. #mul_add_c(a[5],b[5],c2,c3,c1);
  1337. $LD r6,`5*$BNSZ`(r4)
  1338. $LD r7,`5*$BNSZ`(r5)
  1339. $UMULL r8,r6,r7
  1340. $UMULH r9,r6,r7
  1341. addc r11,r11,r8
  1342. adde r12,r12,r9
  1343. addze r10,r10
  1344. #mul_add_c(a[4],b[6],c2,c3,c1);
  1345. $LD r6,`4*$BNSZ`(r4)
  1346. $LD r7,`6*$BNSZ`(r5)
  1347. $UMULL r8,r6,r7
  1348. $UMULH r9,r6,r7
  1349. addc r11,r11,r8
  1350. adde r12,r12,r9
  1351. addze r10,r10
  1352. #mul_add_c(a[3],b[7],c2,c3,c1);
  1353. $LD r6,`3*$BNSZ`(r4)
  1354. $LD r7,`7*$BNSZ`(r5)
  1355. $UMULL r8,r6,r7
  1356. $UMULH r9,r6,r7
  1357. addc r11,r11,r8
  1358. adde r12,r12,r9
  1359. addze r10,r10
  1360. $ST r11,`10*$BNSZ`(r3) #r[10]=c2;
  1361. #mul_add_c(a[4],b[7],c3,c1,c2);
  1362. $LD r6,`4*$BNSZ`(r4)
  1363. $UMULL r8,r6,r7
  1364. $UMULH r9,r6,r7
  1365. addc r12,r12,r8
  1366. adde r10,r10,r9
  1367. addze r11,r0
  1368. #mul_add_c(a[5],b[6],c3,c1,c2);
  1369. $LD r6,`5*$BNSZ`(r4)
  1370. $LD r7,`6*$BNSZ`(r5)
  1371. $UMULL r8,r6,r7
  1372. $UMULH r9,r6,r7
  1373. addc r12,r12,r8
  1374. adde r10,r10,r9
  1375. addze r11,r11
  1376. #mul_add_c(a[6],b[5],c3,c1,c2);
  1377. $LD r6,`6*$BNSZ`(r4)
  1378. $LD r7,`5*$BNSZ`(r5)
  1379. $UMULL r8,r6,r7
  1380. $UMULH r9,r6,r7
  1381. addc r12,r12,r8
  1382. adde r10,r10,r9
  1383. addze r11,r11
  1384. #mul_add_c(a[7],b[4],c3,c1,c2);
  1385. $LD r6,`7*$BNSZ`(r4)
  1386. $LD r7,`4*$BNSZ`(r5)
  1387. $UMULL r8,r6,r7
  1388. $UMULH r9,r6,r7
  1389. addc r12,r12,r8
  1390. adde r10,r10,r9
  1391. addze r11,r11
  1392. $ST r12,`11*$BNSZ`(r3) #r[11]=c3;
  1393. #mul_add_c(a[7],b[5],c1,c2,c3);
  1394. $LD r7,`5*$BNSZ`(r5)
  1395. $UMULL r8,r6,r7
  1396. $UMULH r9,r6,r7
  1397. addc r10,r10,r8
  1398. adde r11,r11,r9
  1399. addze r12,r0
  1400. #mul_add_c(a[6],b[6],c1,c2,c3);
  1401. $LD r6,`6*$BNSZ`(r4)
  1402. $LD r7,`6*$BNSZ`(r5)
  1403. $UMULL r8,r6,r7
  1404. $UMULH r9,r6,r7
  1405. addc r10,r10,r8
  1406. adde r11,r11,r9
  1407. addze r12,r12
  1408. #mul_add_c(a[5],b[7],c1,c2,c3);
  1409. $LD r6,`5*$BNSZ`(r4)
  1410. $LD r7,`7*$BNSZ`(r5)
  1411. $UMULL r8,r6,r7
  1412. $UMULH r9,r6,r7
  1413. addc r10,r10,r8
  1414. adde r11,r11,r9
  1415. addze r12,r12
  1416. $ST r10,`12*$BNSZ`(r3) #r[12]=c1;
  1417. #mul_add_c(a[6],b[7],c2,c3,c1);
  1418. $LD r6,`6*$BNSZ`(r4)
  1419. $UMULL r8,r6,r7
  1420. $UMULH r9,r6,r7
  1421. addc r11,r11,r8
  1422. adde r12,r12,r9
  1423. addze r10,r0
  1424. #mul_add_c(a[7],b[6],c2,c3,c1);
  1425. $LD r6,`7*$BNSZ`(r4)
  1426. $LD r7,`6*$BNSZ`(r5)
  1427. $UMULL r8,r6,r7
  1428. $UMULH r9,r6,r7
  1429. addc r11,r11,r8
  1430. adde r12,r12,r9
  1431. addze r10,r10
  1432. $ST r11,`13*$BNSZ`(r3) #r[13]=c2;
  1433. #mul_add_c(a[7],b[7],c3,c1,c2);
  1434. $LD r7,`7*$BNSZ`(r5)
  1435. $UMULL r8,r6,r7
  1436. $UMULH r9,r6,r7
  1437. addc r12,r12,r8
  1438. adde r10,r10,r9
  1439. $ST r12,`14*$BNSZ`(r3) #r[14]=c3;
  1440. $ST r10,`15*$BNSZ`(r3) #r[15]=c1;
  1441. blr
  1442. .long 0
  1443. .byte 0,12,0x14,0,0,0,3,0
  1444. .long 0
  1445. .size .bn_mul_comba8,.-.bn_mul_comba8
  1446. #
  1447. # NOTE: The following label name should be changed to
  1448. # "bn_sub_words" i.e. remove the first dot
  1449. # for the gcc compiler. This should be automatically
  1450. # done in the build
  1451. #
  1452. #
  1453. .align 4
  1454. .bn_sub_words:
  1455. #
  1456. # Handcoded version of bn_sub_words
  1457. #
  1458. #BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
  1459. #
  1460. # r3 = r
  1461. # r4 = a
  1462. # r5 = b
  1463. # r6 = n
  1464. #
  1465. # Note: No loop unrolling done since this is not a performance
  1466. # critical loop.
  1467. xor r0,r0,r0 #set r0 = 0
  1468. #
  1469. # check for r6 = 0 AND set carry bit.
  1470. #
  1471. subfc. r7,r0,r6 # If r6 is 0 then result is 0.
  1472. # if r6 > 0 then result !=0
  1473. # In either case carry bit is set.
  1474. beq Lppcasm_sub_adios
  1475. addi r4,r4,-$BNSZ
  1476. addi r3,r3,-$BNSZ
  1477. addi r5,r5,-$BNSZ
  1478. mtctr r6
  1479. Lppcasm_sub_mainloop:
  1480. $LDU r7,$BNSZ(r4)
  1481. $LDU r8,$BNSZ(r5)
  1482. subfe r6,r8,r7 # r6 = r7+carry bit + onescomplement(r8)
  1483. # if carry = 1 this is r7-r8. Else it
  1484. # is r7-r8 -1 as we need.
  1485. $STU r6,$BNSZ(r3)
  1486. bdnz- Lppcasm_sub_mainloop
  1487. Lppcasm_sub_adios:
  1488. subfze r3,r0 # if carry bit is set then r3 = 0 else -1
  1489. andi. r3,r3,1 # keep only last bit.
  1490. blr
  1491. .long 0
  1492. .byte 0,12,0x14,0,0,0,4,0
  1493. .long 0
  1494. .size .bn_sub_words,.-.bn_sub_words
  1495. #
  1496. # NOTE: The following label name should be changed to
  1497. # "bn_add_words" i.e. remove the first dot
  1498. # for the gcc compiler. This should be automatically
  1499. # done in the build
  1500. #
  1501. .align 4
  1502. .bn_add_words:
  1503. #
  1504. # Handcoded version of bn_add_words
  1505. #
  1506. #BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
  1507. #
  1508. # r3 = r
  1509. # r4 = a
  1510. # r5 = b
  1511. # r6 = n
  1512. #
  1513. # Note: No loop unrolling done since this is not a performance
  1514. # critical loop.
  1515. xor r0,r0,r0
  1516. #
  1517. # check for r6 = 0. Is this needed?
  1518. #
  1519. addic. r6,r6,0 #test r6 and clear carry bit.
  1520. beq Lppcasm_add_adios
  1521. addi r4,r4,-$BNSZ
  1522. addi r3,r3,-$BNSZ
  1523. addi r5,r5,-$BNSZ
  1524. mtctr r6
  1525. Lppcasm_add_mainloop:
  1526. $LDU r7,$BNSZ(r4)
  1527. $LDU r8,$BNSZ(r5)
  1528. adde r8,r7,r8
  1529. $STU r8,$BNSZ(r3)
  1530. bdnz- Lppcasm_add_mainloop
  1531. Lppcasm_add_adios:
  1532. addze r3,r0 #return carry bit.
  1533. blr
  1534. .long 0
  1535. .byte 0,12,0x14,0,0,0,4,0
  1536. .long 0
  1537. .size .bn_add_words,.-.bn_add_words
  1538. #
  1539. # NOTE: The following label name should be changed to
  1540. # "bn_div_words" i.e. remove the first dot
  1541. # for the gcc compiler. This should be automatically
  1542. # done in the build
  1543. #
  1544. .align 4
  1545. .bn_div_words:
  1546. #
  1547. # This is a cleaned up version of code generated by
  1548. # the AIX compiler. The only optimization is to use
  1549. # the PPC instruction to count leading zeros instead
  1550. # of call to num_bits_word. Since this was compiled
  1551. # only at level -O2 we can possibly squeeze it more?
  1552. #
  1553. # r3 = h
  1554. # r4 = l
  1555. # r5 = d
  1556. $UCMPI 0,r5,0 # compare r5 and 0
  1557. bne Lppcasm_div1 # proceed if d!=0
  1558. li r3,-1 # d=0 return -1
  1559. blr
  1560. Lppcasm_div1:
  1561. xor r0,r0,r0 #r0=0
  1562. li r8,$BITS
  1563. $CNTLZ. r7,r5 #r7 = num leading 0s in d.
  1564. beq Lppcasm_div2 #proceed if no leading zeros
  1565. subf r8,r7,r8 #r8 = BN_num_bits_word(d)
  1566. $SHR. r9,r3,r8 #are there any bits above r8'th?
  1567. $TR 16,r9,r0 #if there're, signal to dump core...
  1568. Lppcasm_div2:
  1569. $UCMP 0,r3,r5 #h>=d?
  1570. blt Lppcasm_div3 #goto Lppcasm_div3 if not
  1571. subf r3,r5,r3 #h-=d ;
  1572. Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i
  1573. cmpi 0,0,r7,0 # is (i == 0)?
  1574. beq Lppcasm_div4
  1575. $SHL r3,r3,r7 # h = (h<< i)
  1576. $SHR r8,r4,r8 # r8 = (l >> BN_BITS2 -i)
  1577. $SHL r5,r5,r7 # d<<=i
  1578. or r3,r3,r8 # h = (h<<i)|(l>>(BN_BITS2-i))
  1579. $SHL r4,r4,r7 # l <<=i
  1580. Lppcasm_div4:
  1581. $SHRI r9,r5,`$BITS/2` # r9 = dh
  1582. # dl will be computed when needed
  1583. # as it saves registers.
  1584. li r6,2 #r6=2
  1585. mtctr r6 #counter will be in count.
  1586. Lppcasm_divouterloop:
  1587. $SHRI r8,r3,`$BITS/2` #r8 = (h>>BN_BITS4)
  1588. $SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4
  1589. # compute here for innerloop.
  1590. $UCMP 0,r8,r9 # is (h>>BN_BITS4)==dh
  1591. bne Lppcasm_div5 # goto Lppcasm_div5 if not
  1592. li r8,-1
  1593. $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l
  1594. b Lppcasm_div6
  1595. Lppcasm_div5:
  1596. $UDIV r8,r3,r9 #q = h/dh
  1597. Lppcasm_div6:
  1598. $UMULL r12,r9,r8 #th = q*dh
  1599. $CLRU r10,r5,`$BITS/2` #r10=dl
  1600. $UMULL r6,r8,r10 #tl = q*dl
  1601. Lppcasm_divinnerloop:
  1602. subf r10,r12,r3 #t = h -th
  1603. $SHRI r7,r10,`$BITS/2` #r7= (t &BN_MASK2H), sort of...
  1604. addic. r7,r7,0 #test if r7 == 0. used below.
  1605. # now want to compute
  1606. # r7 = (t<<BN_BITS4)|((l&BN_MASK2h)>>BN_BITS4)
  1607. # the following 2 instructions do that
  1608. $SHLI r7,r10,`$BITS/2` # r7 = (t<<BN_BITS4)
  1609. or r7,r7,r11 # r7|=((l&BN_MASK2h)>>BN_BITS4)
  1610. $UCMP cr1,r6,r7 # compare (tl <= r7)
  1611. bne Lppcasm_divinnerexit
  1612. ble cr1,Lppcasm_divinnerexit
  1613. addi r8,r8,-1 #q--
  1614. subf r12,r9,r12 #th -=dh
  1615. $CLRU r10,r5,`$BITS/2` #r10=dl. t is no longer needed in loop.
  1616. subf r6,r10,r6 #tl -=dl
  1617. b Lppcasm_divinnerloop
  1618. Lppcasm_divinnerexit:
  1619. $SHRI r10,r6,`$BITS/2` #t=(tl>>BN_BITS4)
  1620. $SHLI r11,r6,`$BITS/2` #tl=(tl<<BN_BITS4)&BN_MASK2h;
  1621. $UCMP cr1,r4,r11 # compare l and tl
  1622. add r12,r12,r10 # th+=t
  1623. bge cr1,Lppcasm_div7 # if (l>=tl) goto Lppcasm_div7
  1624. addi r12,r12,1 # th++
  1625. Lppcasm_div7:
  1626. subf r11,r11,r4 #r11=l-tl
  1627. $UCMP cr1,r3,r12 #compare h and th
  1628. bge cr1,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8
  1629. addi r8,r8,-1 # q--
  1630. add r3,r5,r3 # h+=d
  1631. Lppcasm_div8:
  1632. subf r12,r12,r3 #r12 = h-th
  1633. $SHLI r4,r11,`$BITS/2` #l=(l&BN_MASK2l)<<BN_BITS4
  1634. # want to compute
  1635. # h = ((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2
  1636. # the following 2 instructions will do this.
  1637. $INSR r11,r12,`$BITS/2`,`$BITS/2` # r11 is the value we want rotated $BITS/2.
  1638. $ROTL r3,r11,`$BITS/2` # rotate by $BITS/2 and store in r3
  1639. bdz Lppcasm_div9 #if (count==0) break ;
  1640. $SHLI r0,r8,`$BITS/2` #ret =q<<BN_BITS4
  1641. b Lppcasm_divouterloop
  1642. Lppcasm_div9:
  1643. or r3,r8,r0
  1644. blr
  1645. .long 0
  1646. .byte 0,12,0x14,0,0,0,3,0
  1647. .long 0
  1648. .size .bn_div_words,.-.bn_div_words
  1649. #
  1650. # NOTE: The following label name should be changed to
  1651. # "bn_sqr_words" i.e. remove the first dot
  1652. # for the gcc compiler. This should be automatically
  1653. # done in the build
  1654. #
  1655. .align 4
  1656. .bn_sqr_words:
  1657. #
  1658. # Optimized version of bn_sqr_words
  1659. #
  1660. # void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
  1661. #
  1662. # r3 = r
  1663. # r4 = a
  1664. # r5 = n
  1665. #
  1666. # r6 = a[i].
  1667. # r7,r8 = product.
  1668. #
  1669. # No unrolling done here. Not performance critical.
  1670. addic. r5,r5,0 #test r5.
  1671. beq Lppcasm_sqr_adios
  1672. addi r4,r4,-$BNSZ
  1673. addi r3,r3,-$BNSZ
  1674. mtctr r5
  1675. Lppcasm_sqr_mainloop:
  1676. #sqr(r[0],r[1],a[0]);
  1677. $LDU r6,$BNSZ(r4)
  1678. $UMULL r7,r6,r6
  1679. $UMULH r8,r6,r6
  1680. $STU r7,$BNSZ(r3)
  1681. $STU r8,$BNSZ(r3)
  1682. bdnz- Lppcasm_sqr_mainloop
  1683. Lppcasm_sqr_adios:
  1684. blr
  1685. .long 0
  1686. .byte 0,12,0x14,0,0,0,3,0
  1687. .long 0
  1688. .size .bn_sqr_words,.-.bn_sqr_words
  1689. #
  1690. # NOTE: The following label name should be changed to
  1691. # "bn_mul_words" i.e. remove the first dot
  1692. # for the gcc compiler. This should be automatically
  1693. # done in the build
  1694. #
  1695. .align 4
  1696. .bn_mul_words:
  1697. #
  1698. # BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
  1699. #
  1700. # r3 = rp
  1701. # r4 = ap
  1702. # r5 = num
  1703. # r6 = w
  1704. xor r0,r0,r0
  1705. xor r12,r12,r12 # used for carry
  1706. rlwinm. r7,r5,30,2,31 # num >> 2
  1707. beq Lppcasm_mw_REM
  1708. mtctr r7
  1709. Lppcasm_mw_LOOP:
  1710. #mul(rp[0],ap[0],w,c1);
  1711. $LD r8,`0*$BNSZ`(r4)
  1712. $UMULL r9,r6,r8
  1713. $UMULH r10,r6,r8
  1714. addc r9,r9,r12
  1715. #addze r10,r10 #carry is NOT ignored.
  1716. #will be taken care of
  1717. #in second spin below
  1718. #using adde.
  1719. $ST r9,`0*$BNSZ`(r3)
  1720. #mul(rp[1],ap[1],w,c1);
  1721. $LD r8,`1*$BNSZ`(r4)
  1722. $UMULL r11,r6,r8
  1723. $UMULH r12,r6,r8
  1724. adde r11,r11,r10
  1725. #addze r12,r12
  1726. $ST r11,`1*$BNSZ`(r3)
  1727. #mul(rp[2],ap[2],w,c1);
  1728. $LD r8,`2*$BNSZ`(r4)
  1729. $UMULL r9,r6,r8
  1730. $UMULH r10,r6,r8
  1731. adde r9,r9,r12
  1732. #addze r10,r10
  1733. $ST r9,`2*$BNSZ`(r3)
  1734. #mul_add(rp[3],ap[3],w,c1);
  1735. $LD r8,`3*$BNSZ`(r4)
  1736. $UMULL r11,r6,r8
  1737. $UMULH r12,r6,r8
  1738. adde r11,r11,r10
  1739. addze r12,r12 #this spin we collect carry into
  1740. #r12
  1741. $ST r11,`3*$BNSZ`(r3)
  1742. addi r3,r3,`4*$BNSZ`
  1743. addi r4,r4,`4*$BNSZ`
  1744. bdnz- Lppcasm_mw_LOOP
  1745. Lppcasm_mw_REM:
  1746. andi. r5,r5,0x3
  1747. beq Lppcasm_mw_OVER
  1748. #mul(rp[0],ap[0],w,c1);
  1749. $LD r8,`0*$BNSZ`(r4)
  1750. $UMULL r9,r6,r8
  1751. $UMULH r10,r6,r8
  1752. addc r9,r9,r12
  1753. addze r10,r10
  1754. $ST r9,`0*$BNSZ`(r3)
  1755. addi r12,r10,0
  1756. addi r5,r5,-1
  1757. cmpli 0,0,r5,0
  1758. beq Lppcasm_mw_OVER
  1759. #mul(rp[1],ap[1],w,c1);
  1760. $LD r8,`1*$BNSZ`(r4)
  1761. $UMULL r9,r6,r8
  1762. $UMULH r10,r6,r8
  1763. addc r9,r9,r12
  1764. addze r10,r10
  1765. $ST r9,`1*$BNSZ`(r3)
  1766. addi r12,r10,0
  1767. addi r5,r5,-1
  1768. cmpli 0,0,r5,0
  1769. beq Lppcasm_mw_OVER
  1770. #mul_add(rp[2],ap[2],w,c1);
  1771. $LD r8,`2*$BNSZ`(r4)
  1772. $UMULL r9,r6,r8
  1773. $UMULH r10,r6,r8
  1774. addc r9,r9,r12
  1775. addze r10,r10
  1776. $ST r9,`2*$BNSZ`(r3)
  1777. addi r12,r10,0
  1778. Lppcasm_mw_OVER:
  1779. addi r3,r12,0
  1780. blr
  1781. .long 0
  1782. .byte 0,12,0x14,0,0,0,4,0
  1783. .long 0
  1784. .size bn_mul_words,.-bn_mul_words
  1785. #
  1786. # NOTE: The following label name should be changed to
  1787. # "bn_mul_add_words" i.e. remove the first dot
  1788. # for the gcc compiler. This should be automatically
  1789. # done in the build
  1790. #
  1791. .align 4
  1792. .bn_mul_add_words:
  1793. #
  1794. # BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
  1795. #
  1796. # r3 = rp
  1797. # r4 = ap
  1798. # r5 = num
  1799. # r6 = w
  1800. #
  1801. # empirical evidence suggests that unrolled version performs best!!
  1802. #
  1803. xor r0,r0,r0 #r0 = 0
  1804. xor r12,r12,r12 #r12 = 0 . used for carry
  1805. rlwinm. r7,r5,30,2,31 # num >> 2
  1806. beq Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover
  1807. mtctr r7
  1808. Lppcasm_maw_mainloop:
  1809. #mul_add(rp[0],ap[0],w,c1);
  1810. $LD r8,`0*$BNSZ`(r4)
  1811. $LD r11,`0*$BNSZ`(r3)
  1812. $UMULL r9,r6,r8
  1813. $UMULH r10,r6,r8
  1814. addc r9,r9,r12 #r12 is carry.
  1815. addze r10,r10
  1816. addc r9,r9,r11
  1817. #addze r10,r10
  1818. #the above instruction addze
  1819. #is NOT needed. Carry will NOT
  1820. #be ignored. It's not affected
  1821. #by multiply and will be collected
  1822. #in the next spin
  1823. $ST r9,`0*$BNSZ`(r3)
  1824. #mul_add(rp[1],ap[1],w,c1);
  1825. $LD r8,`1*$BNSZ`(r4)
  1826. $LD r9,`1*$BNSZ`(r3)
  1827. $UMULL r11,r6,r8
  1828. $UMULH r12,r6,r8
  1829. adde r11,r11,r10 #r10 is carry.
  1830. addze r12,r12
  1831. addc r11,r11,r9
  1832. #addze r12,r12
  1833. $ST r11,`1*$BNSZ`(r3)
  1834. #mul_add(rp[2],ap[2],w,c1);
  1835. $LD r8,`2*$BNSZ`(r4)
  1836. $UMULL r9,r6,r8
  1837. $LD r11,`2*$BNSZ`(r3)
  1838. $UMULH r10,r6,r8
  1839. adde r9,r9,r12
  1840. addze r10,r10
  1841. addc r9,r9,r11
  1842. #addze r10,r10
  1843. $ST r9,`2*$BNSZ`(r3)
  1844. #mul_add(rp[3],ap[3],w,c1);
  1845. $LD r8,`3*$BNSZ`(r4)
  1846. $UMULL r11,r6,r8
  1847. $LD r9,`3*$BNSZ`(r3)
  1848. $UMULH r12,r6,r8
  1849. adde r11,r11,r10
  1850. addze r12,r12
  1851. addc r11,r11,r9
  1852. addze r12,r12
  1853. $ST r11,`3*$BNSZ`(r3)
  1854. addi r3,r3,`4*$BNSZ`
  1855. addi r4,r4,`4*$BNSZ`
  1856. bdnz- Lppcasm_maw_mainloop
  1857. Lppcasm_maw_leftover:
  1858. andi. r5,r5,0x3
  1859. beq Lppcasm_maw_adios
  1860. addi r3,r3,-$BNSZ
  1861. addi r4,r4,-$BNSZ
  1862. #mul_add(rp[0],ap[0],w,c1);
  1863. mtctr r5
  1864. $LDU r8,$BNSZ(r4)
  1865. $UMULL r9,r6,r8
  1866. $UMULH r10,r6,r8
  1867. $LDU r11,$BNSZ(r3)
  1868. addc r9,r9,r11
  1869. addze r10,r10
  1870. addc r9,r9,r12
  1871. addze r12,r10
  1872. $ST r9,0(r3)
  1873. bdz Lppcasm_maw_adios
  1874. #mul_add(rp[1],ap[1],w,c1);
  1875. $LDU r8,$BNSZ(r4)
  1876. $UMULL r9,r6,r8
  1877. $UMULH r10,r6,r8
  1878. $LDU r11,$BNSZ(r3)
  1879. addc r9,r9,r11
  1880. addze r10,r10
  1881. addc r9,r9,r12
  1882. addze r12,r10
  1883. $ST r9,0(r3)
  1884. bdz Lppcasm_maw_adios
  1885. #mul_add(rp[2],ap[2],w,c1);
  1886. $LDU r8,$BNSZ(r4)
  1887. $UMULL r9,r6,r8
  1888. $UMULH r10,r6,r8
  1889. $LDU r11,$BNSZ(r3)
  1890. addc r9,r9,r11
  1891. addze r10,r10
  1892. addc r9,r9,r12
  1893. addze r12,r10
  1894. $ST r9,0(r3)
  1895. Lppcasm_maw_adios:
  1896. addi r3,r12,0
  1897. blr
  1898. .long 0
  1899. .byte 0,12,0x14,0,0,0,4,0
  1900. .long 0
  1901. .size .bn_mul_add_words,.-.bn_mul_add_words
  1902. .align 4
  1903. EOF
  1904. $data =~ s/\`([^\`]*)\`/eval $1/gem;
  1905. print $data;
  1906. close STDOUT;