ppc.pl 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014
  1. #! /usr/bin/env perl
  2. # Copyright 2004-2016 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. # Implemented as a Perl wrapper as we want to support several different
  9. # architectures with single file. We pick up the target based on the
  10. # file name we are asked to generate.
  11. #
  12. # It should be noted though that this perl code is nothing like
  13. # <openssl>/crypto/perlasm/x86*. In this case perl is used pretty much
  14. # as pre-processor to cover for platform differences in name decoration,
  15. # linker tables, 32-/64-bit instruction sets...
  16. #
  17. # As you might know there're several PowerPC ABI in use. Most notably
  18. # Linux and AIX use different 32-bit ABIs. Good news are that these ABIs
  19. # are similar enough to implement leaf(!) functions, which would be ABI
  20. # neutral. And that's what you find here: ABI neutral leaf functions.
  21. # In case you wonder what that is...
  22. #
  23. # AIX performance
  24. #
  25. # MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e.
  26. #
  27. # The following is the performance of 32-bit compiler
  28. # generated code:
  29. #
  30. # OpenSSL 0.9.6c 21 dec 2001
  31. # built on: Tue Jun 11 11:06:51 EDT 2002
  32. # options:bn(64,32) ...
  33. #compiler: cc -DTHREADS -DAIX -DB_ENDIAN -DBN_LLONG -O3
  34. # sign verify sign/s verify/s
  35. #rsa 512 bits 0.0098s 0.0009s 102.0 1170.6
  36. #rsa 1024 bits 0.0507s 0.0026s 19.7 387.5
  37. #rsa 2048 bits 0.3036s 0.0085s 3.3 117.1
  38. #rsa 4096 bits 2.0040s 0.0299s 0.5 33.4
  39. #dsa 512 bits 0.0087s 0.0106s 114.3 94.5
  40. #dsa 1024 bits 0.0256s 0.0313s 39.0 32.0
  41. #
  42. # Same bechmark with this assembler code:
  43. #
  44. #rsa 512 bits 0.0056s 0.0005s 178.6 2049.2
  45. #rsa 1024 bits 0.0283s 0.0015s 35.3 674.1
  46. #rsa 2048 bits 0.1744s 0.0050s 5.7 201.2
  47. #rsa 4096 bits 1.1644s 0.0179s 0.9 55.7
  48. #dsa 512 bits 0.0052s 0.0062s 191.6 162.0
  49. #dsa 1024 bits 0.0149s 0.0180s 67.0 55.5
  50. #
  51. # Number of operations increases by at almost 75%
  52. #
  53. # Here are performance numbers for 64-bit compiler
  54. # generated code:
  55. #
  56. # OpenSSL 0.9.6g [engine] 9 Aug 2002
  57. # built on: Fri Apr 18 16:59:20 EDT 2003
  58. # options:bn(64,64) ...
  59. # compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3
  60. # sign verify sign/s verify/s
  61. #rsa 512 bits 0.0028s 0.0003s 357.1 3844.4
  62. #rsa 1024 bits 0.0148s 0.0008s 67.5 1239.7
  63. #rsa 2048 bits 0.0963s 0.0028s 10.4 353.0
  64. #rsa 4096 bits 0.6538s 0.0102s 1.5 98.1
  65. #dsa 512 bits 0.0026s 0.0032s 382.5 313.7
  66. #dsa 1024 bits 0.0081s 0.0099s 122.8 100.6
  67. #
  68. # Same benchmark with this assembler code:
  69. #
  70. #rsa 512 bits 0.0020s 0.0002s 510.4 6273.7
  71. #rsa 1024 bits 0.0088s 0.0005s 114.1 2128.3
  72. #rsa 2048 bits 0.0540s 0.0016s 18.5 622.5
  73. #rsa 4096 bits 0.3700s 0.0058s 2.7 171.0
  74. #dsa 512 bits 0.0016s 0.0020s 610.7 507.1
  75. #dsa 1024 bits 0.0047s 0.0058s 212.5 173.2
  76. #
  77. # Again, performance increases by at about 75%
  78. #
  79. # Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code)
  80. # OpenSSL 0.9.7c 30 Sep 2003
  81. #
  82. # Original code.
  83. #
  84. #rsa 512 bits 0.0011s 0.0001s 906.1 11012.5
  85. #rsa 1024 bits 0.0060s 0.0003s 166.6 3363.1
  86. #rsa 2048 bits 0.0370s 0.0010s 27.1 982.4
  87. #rsa 4096 bits 0.2426s 0.0036s 4.1 280.4
  88. #dsa 512 bits 0.0010s 0.0012s 1038.1 841.5
  89. #dsa 1024 bits 0.0030s 0.0037s 329.6 269.7
  90. #dsa 2048 bits 0.0101s 0.0127s 98.9 78.6
  91. #
  92. # Same benchmark with this assembler code:
  93. #
  94. #rsa 512 bits 0.0007s 0.0001s 1416.2 16645.9
  95. #rsa 1024 bits 0.0036s 0.0002s 274.4 5380.6
  96. #rsa 2048 bits 0.0222s 0.0006s 45.1 1589.5
  97. #rsa 4096 bits 0.1469s 0.0022s 6.8 449.6
  98. #dsa 512 bits 0.0006s 0.0007s 1664.2 1376.2
  99. #dsa 1024 bits 0.0018s 0.0023s 545.0 442.2
  100. #dsa 2048 bits 0.0061s 0.0075s 163.5 132.8
  101. #
  102. # Performance increase of ~60%
  103. #
  104. # If you have comments or suggestions to improve code send
  105. # me a note at schari@us.ibm.com
  106. #
  107. $flavour = shift;
  108. if ($flavour =~ /32/) {
  109. $BITS= 32;
  110. $BNSZ= $BITS/8;
  111. $ISA= "\"ppc\"";
  112. $LD= "lwz"; # load
  113. $LDU= "lwzu"; # load and update
  114. $ST= "stw"; # store
  115. $STU= "stwu"; # store and update
  116. $UMULL= "mullw"; # unsigned multiply low
  117. $UMULH= "mulhwu"; # unsigned multiply high
  118. $UDIV= "divwu"; # unsigned divide
  119. $UCMPI= "cmplwi"; # unsigned compare with immediate
  120. $UCMP= "cmplw"; # unsigned compare
  121. $CNTLZ= "cntlzw"; # count leading zeros
  122. $SHL= "slw"; # shift left
  123. $SHR= "srw"; # unsigned shift right
  124. $SHRI= "srwi"; # unsigned shift right by immediate
  125. $SHLI= "slwi"; # shift left by immediate
  126. $CLRU= "clrlwi"; # clear upper bits
  127. $INSR= "insrwi"; # insert right
  128. $ROTL= "rotlwi"; # rotate left by immediate
  129. $TR= "tw"; # conditional trap
  130. } elsif ($flavour =~ /64/) {
  131. $BITS= 64;
  132. $BNSZ= $BITS/8;
  133. $ISA= "\"ppc64\"";
  134. # same as above, but 64-bit mnemonics...
  135. $LD= "ld"; # load
  136. $LDU= "ldu"; # load and update
  137. $ST= "std"; # store
  138. $STU= "stdu"; # store and update
  139. $UMULL= "mulld"; # unsigned multiply low
  140. $UMULH= "mulhdu"; # unsigned multiply high
  141. $UDIV= "divdu"; # unsigned divide
  142. $UCMPI= "cmpldi"; # unsigned compare with immediate
  143. $UCMP= "cmpld"; # unsigned compare
  144. $CNTLZ= "cntlzd"; # count leading zeros
  145. $SHL= "sld"; # shift left
  146. $SHR= "srd"; # unsigned shift right
  147. $SHRI= "srdi"; # unsigned shift right by immediate
  148. $SHLI= "sldi"; # shift left by immediate
  149. $CLRU= "clrldi"; # clear upper bits
  150. $INSR= "insrdi"; # insert right
  151. $ROTL= "rotldi"; # rotate left by immediate
  152. $TR= "td"; # conditional trap
  153. } else { die "nonsense $flavour"; }
  154. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  155. ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
  156. ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
  157. die "can't locate ppc-xlate.pl";
  158. open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
  159. $data=<<EOF;
  160. #--------------------------------------------------------------------
  161. #
  162. #
  163. #
  164. #
  165. # File: ppc32.s
  166. #
  167. # Created by: Suresh Chari
  168. # IBM Thomas J. Watson Research Library
  169. # Hawthorne, NY
  170. #
  171. #
  172. # Description: Optimized assembly routines for OpenSSL crypto
  173. # on the 32 bitPowerPC platform.
  174. #
  175. #
  176. # Version History
  177. #
  178. # 2. Fixed bn_add,bn_sub and bn_div_words, added comments,
  179. # cleaned up code. Also made a single version which can
  180. # be used for both the AIX and Linux compilers. See NOTE
  181. # below.
  182. # 12/05/03 Suresh Chari
  183. # (with lots of help from) Andy Polyakov
  184. ##
  185. # 1. Initial version 10/20/02 Suresh Chari
  186. #
  187. #
  188. # The following file works for the xlc,cc
  189. # and gcc compilers.
  190. #
  191. # NOTE: To get the file to link correctly with the gcc compiler
  192. # you have to change the names of the routines and remove
  193. # the first .(dot) character. This should automatically
  194. # be done in the build process.
  195. #
  196. # Hand optimized assembly code for the following routines
  197. #
  198. # bn_sqr_comba4
  199. # bn_sqr_comba8
  200. # bn_mul_comba4
  201. # bn_mul_comba8
  202. # bn_sub_words
  203. # bn_add_words
  204. # bn_div_words
  205. # bn_sqr_words
  206. # bn_mul_words
  207. # bn_mul_add_words
  208. #
  209. # NOTE: It is possible to optimize this code more for
  210. # specific PowerPC or Power architectures. On the Northstar
  211. # architecture the optimizations in this file do
  212. # NOT provide much improvement.
  213. #
  214. # If you have comments or suggestions to improve code send
  215. # me a note at schari\@us.ibm.com
  216. #
  217. #--------------------------------------------------------------------------
  218. #
  219. # Defines to be used in the assembly code.
  220. #
  221. #.set r0,0 # we use it as storage for value of 0
  222. #.set SP,1 # preserved
  223. #.set RTOC,2 # preserved
  224. #.set r3,3 # 1st argument/return value
  225. #.set r4,4 # 2nd argument/volatile register
  226. #.set r5,5 # 3rd argument/volatile register
  227. #.set r6,6 # ...
  228. #.set r7,7
  229. #.set r8,8
  230. #.set r9,9
  231. #.set r10,10
  232. #.set r11,11
  233. #.set r12,12
  234. #.set r13,13 # not used, nor any other "below" it...
  235. # Declare function names to be global
  236. # NOTE: For gcc these names MUST be changed to remove
  237. # the first . i.e. for example change ".bn_sqr_comba4"
  238. # to "bn_sqr_comba4". This should be automatically done
  239. # in the build.
  240. .globl .bn_sqr_comba4
  241. .globl .bn_sqr_comba8
  242. .globl .bn_mul_comba4
  243. .globl .bn_mul_comba8
  244. .globl .bn_sub_words
  245. .globl .bn_add_words
  246. .globl .bn_div_words
  247. .globl .bn_sqr_words
  248. .globl .bn_mul_words
  249. .globl .bn_mul_add_words
  250. # .text section
  251. .machine "any"
  252. #
  253. # NOTE: The following label name should be changed to
  254. # "bn_sqr_comba4" i.e. remove the first dot
  255. # for the gcc compiler. This should be automatically
  256. # done in the build
  257. #
  258. .align 4
  259. .bn_sqr_comba4:
  260. #
  261. # Optimized version of bn_sqr_comba4.
  262. #
  263. # void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
  264. # r3 contains r
  265. # r4 contains a
  266. #
  267. # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
  268. #
  269. # r5,r6 are the two BN_ULONGs being multiplied.
  270. # r7,r8 are the results of the 32x32 giving 64 bit multiply.
  271. # r9,r10, r11 are the equivalents of c1,c2, c3.
  272. # Here's the assembly
  273. #
  274. #
  275. xor r0,r0,r0 # set r0 = 0. Used in the addze
  276. # instructions below
  277. #sqr_add_c(a,0,c1,c2,c3)
  278. $LD r5,`0*$BNSZ`(r4)
  279. $UMULL r9,r5,r5
  280. $UMULH r10,r5,r5 #in first iteration. No need
  281. #to add since c1=c2=c3=0.
  282. # Note c3(r11) is NOT set to 0
  283. # but will be.
  284. $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
  285. # sqr_add_c2(a,1,0,c2,c3,c1);
  286. $LD r6,`1*$BNSZ`(r4)
  287. $UMULL r7,r5,r6
  288. $UMULH r8,r5,r6
  289. addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8)
  290. adde r8,r8,r8
  291. addze r9,r0 # catch carry if any.
  292. # r9= r0(=0) and carry
  293. addc r10,r7,r10 # now add to temp result.
  294. addze r11,r8 # r8 added to r11 which is 0
  295. addze r9,r9
  296. $ST r10,`1*$BNSZ`(r3) #r[1]=c2;
  297. #sqr_add_c(a,1,c3,c1,c2)
  298. $UMULL r7,r6,r6
  299. $UMULH r8,r6,r6
  300. addc r11,r7,r11
  301. adde r9,r8,r9
  302. addze r10,r0
  303. #sqr_add_c2(a,2,0,c3,c1,c2)
  304. $LD r6,`2*$BNSZ`(r4)
  305. $UMULL r7,r5,r6
  306. $UMULH r8,r5,r6
  307. addc r7,r7,r7
  308. adde r8,r8,r8
  309. addze r10,r10
  310. addc r11,r7,r11
  311. adde r9,r8,r9
  312. addze r10,r10
  313. $ST r11,`2*$BNSZ`(r3) #r[2]=c3
  314. #sqr_add_c2(a,3,0,c1,c2,c3);
  315. $LD r6,`3*$BNSZ`(r4)
  316. $UMULL r7,r5,r6
  317. $UMULH r8,r5,r6
  318. addc r7,r7,r7
  319. adde r8,r8,r8
  320. addze r11,r0
  321. addc r9,r7,r9
  322. adde r10,r8,r10
  323. addze r11,r11
  324. #sqr_add_c2(a,2,1,c1,c2,c3);
  325. $LD r5,`1*$BNSZ`(r4)
  326. $LD r6,`2*$BNSZ`(r4)
  327. $UMULL r7,r5,r6
  328. $UMULH r8,r5,r6
  329. addc r7,r7,r7
  330. adde r8,r8,r8
  331. addze r11,r11
  332. addc r9,r7,r9
  333. adde r10,r8,r10
  334. addze r11,r11
  335. $ST r9,`3*$BNSZ`(r3) #r[3]=c1
  336. #sqr_add_c(a,2,c2,c3,c1);
  337. $UMULL r7,r6,r6
  338. $UMULH r8,r6,r6
  339. addc r10,r7,r10
  340. adde r11,r8,r11
  341. addze r9,r0
  342. #sqr_add_c2(a,3,1,c2,c3,c1);
  343. $LD r6,`3*$BNSZ`(r4)
  344. $UMULL r7,r5,r6
  345. $UMULH r8,r5,r6
  346. addc r7,r7,r7
  347. adde r8,r8,r8
  348. addze r9,r9
  349. addc r10,r7,r10
  350. adde r11,r8,r11
  351. addze r9,r9
  352. $ST r10,`4*$BNSZ`(r3) #r[4]=c2
  353. #sqr_add_c2(a,3,2,c3,c1,c2);
  354. $LD r5,`2*$BNSZ`(r4)
  355. $UMULL r7,r5,r6
  356. $UMULH r8,r5,r6
  357. addc r7,r7,r7
  358. adde r8,r8,r8
  359. addze r10,r0
  360. addc r11,r7,r11
  361. adde r9,r8,r9
  362. addze r10,r10
  363. $ST r11,`5*$BNSZ`(r3) #r[5] = c3
  364. #sqr_add_c(a,3,c1,c2,c3);
  365. $UMULL r7,r6,r6
  366. $UMULH r8,r6,r6
  367. addc r9,r7,r9
  368. adde r10,r8,r10
  369. $ST r9,`6*$BNSZ`(r3) #r[6]=c1
  370. $ST r10,`7*$BNSZ`(r3) #r[7]=c2
  371. blr
  372. .long 0
  373. .byte 0,12,0x14,0,0,0,2,0
  374. .long 0
  375. .size .bn_sqr_comba4,.-.bn_sqr_comba4
  376. #
  377. # NOTE: The following label name should be changed to
  378. # "bn_sqr_comba8" i.e. remove the first dot
  379. # for the gcc compiler. This should be automatically
  380. # done in the build
  381. #
  382. .align 4
  383. .bn_sqr_comba8:
  384. #
  385. # This is an optimized version of the bn_sqr_comba8 routine.
  386. # Tightly uses the adde instruction
  387. #
  388. #
  389. # void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
  390. # r3 contains r
  391. # r4 contains a
  392. #
  393. # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
  394. #
  395. # r5,r6 are the two BN_ULONGs being multiplied.
  396. # r7,r8 are the results of the 32x32 giving 64 bit multiply.
  397. # r9,r10, r11 are the equivalents of c1,c2, c3.
  398. #
  399. # Possible optimization of loading all 8 longs of a into registers
  400. # doesn't provide any speedup
  401. #
  402. xor r0,r0,r0 #set r0 = 0.Used in addze
  403. #instructions below.
  404. #sqr_add_c(a,0,c1,c2,c3);
  405. $LD r5,`0*$BNSZ`(r4)
  406. $UMULL r9,r5,r5 #1st iteration: no carries.
  407. $UMULH r10,r5,r5
  408. $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
  409. #sqr_add_c2(a,1,0,c2,c3,c1);
  410. $LD r6,`1*$BNSZ`(r4)
  411. $UMULL r7,r5,r6
  412. $UMULH r8,r5,r6
  413. addc r10,r7,r10 #add the two register number
  414. adde r11,r8,r0 # (r8,r7) to the three register
  415. addze r9,r0 # number (r9,r11,r10).NOTE:r0=0
  416. addc r10,r7,r10 #add the two register number
  417. adde r11,r8,r11 # (r8,r7) to the three register
  418. addze r9,r9 # number (r9,r11,r10).
  419. $ST r10,`1*$BNSZ`(r3) # r[1]=c2
  420. #sqr_add_c(a,1,c3,c1,c2);
  421. $UMULL r7,r6,r6
  422. $UMULH r8,r6,r6
  423. addc r11,r7,r11
  424. adde r9,r8,r9
  425. addze r10,r0
  426. #sqr_add_c2(a,2,0,c3,c1,c2);
  427. $LD r6,`2*$BNSZ`(r4)
  428. $UMULL r7,r5,r6
  429. $UMULH r8,r5,r6
  430. addc r11,r7,r11
  431. adde r9,r8,r9
  432. addze r10,r10
  433. addc r11,r7,r11
  434. adde r9,r8,r9
  435. addze r10,r10
  436. $ST r11,`2*$BNSZ`(r3) #r[2]=c3
  437. #sqr_add_c2(a,3,0,c1,c2,c3);
  438. $LD r6,`3*$BNSZ`(r4) #r6 = a[3]. r5 is already a[0].
  439. $UMULL r7,r5,r6
  440. $UMULH r8,r5,r6
  441. addc r9,r7,r9
  442. adde r10,r8,r10
  443. addze r11,r0
  444. addc r9,r7,r9
  445. adde r10,r8,r10
  446. addze r11,r11
  447. #sqr_add_c2(a,2,1,c1,c2,c3);
  448. $LD r5,`1*$BNSZ`(r4)
  449. $LD r6,`2*$BNSZ`(r4)
  450. $UMULL r7,r5,r6
  451. $UMULH r8,r5,r6
  452. addc r9,r7,r9
  453. adde r10,r8,r10
  454. addze r11,r11
  455. addc r9,r7,r9
  456. adde r10,r8,r10
  457. addze r11,r11
  458. $ST r9,`3*$BNSZ`(r3) #r[3]=c1;
  459. #sqr_add_c(a,2,c2,c3,c1);
  460. $UMULL r7,r6,r6
  461. $UMULH r8,r6,r6
  462. addc r10,r7,r10
  463. adde r11,r8,r11
  464. addze r9,r0
  465. #sqr_add_c2(a,3,1,c2,c3,c1);
  466. $LD r6,`3*$BNSZ`(r4)
  467. $UMULL r7,r5,r6
  468. $UMULH r8,r5,r6
  469. addc r10,r7,r10
  470. adde r11,r8,r11
  471. addze r9,r9
  472. addc r10,r7,r10
  473. adde r11,r8,r11
  474. addze r9,r9
  475. #sqr_add_c2(a,4,0,c2,c3,c1);
  476. $LD r5,`0*$BNSZ`(r4)
  477. $LD r6,`4*$BNSZ`(r4)
  478. $UMULL r7,r5,r6
  479. $UMULH r8,r5,r6
  480. addc r10,r7,r10
  481. adde r11,r8,r11
  482. addze r9,r9
  483. addc r10,r7,r10
  484. adde r11,r8,r11
  485. addze r9,r9
  486. $ST r10,`4*$BNSZ`(r3) #r[4]=c2;
  487. #sqr_add_c2(a,5,0,c3,c1,c2);
  488. $LD r6,`5*$BNSZ`(r4)
  489. $UMULL r7,r5,r6
  490. $UMULH r8,r5,r6
  491. addc r11,r7,r11
  492. adde r9,r8,r9
  493. addze r10,r0
  494. addc r11,r7,r11
  495. adde r9,r8,r9
  496. addze r10,r10
  497. #sqr_add_c2(a,4,1,c3,c1,c2);
  498. $LD r5,`1*$BNSZ`(r4)
  499. $LD r6,`4*$BNSZ`(r4)
  500. $UMULL r7,r5,r6
  501. $UMULH r8,r5,r6
  502. addc r11,r7,r11
  503. adde r9,r8,r9
  504. addze r10,r10
  505. addc r11,r7,r11
  506. adde r9,r8,r9
  507. addze r10,r10
  508. #sqr_add_c2(a,3,2,c3,c1,c2);
  509. $LD r5,`2*$BNSZ`(r4)
  510. $LD r6,`3*$BNSZ`(r4)
  511. $UMULL r7,r5,r6
  512. $UMULH r8,r5,r6
  513. addc r11,r7,r11
  514. adde r9,r8,r9
  515. addze r10,r10
  516. addc r11,r7,r11
  517. adde r9,r8,r9
  518. addze r10,r10
  519. $ST r11,`5*$BNSZ`(r3) #r[5]=c3;
  520. #sqr_add_c(a,3,c1,c2,c3);
  521. $UMULL r7,r6,r6
  522. $UMULH r8,r6,r6
  523. addc r9,r7,r9
  524. adde r10,r8,r10
  525. addze r11,r0
  526. #sqr_add_c2(a,4,2,c1,c2,c3);
  527. $LD r6,`4*$BNSZ`(r4)
  528. $UMULL r7,r5,r6
  529. $UMULH r8,r5,r6
  530. addc r9,r7,r9
  531. adde r10,r8,r10
  532. addze r11,r11
  533. addc r9,r7,r9
  534. adde r10,r8,r10
  535. addze r11,r11
  536. #sqr_add_c2(a,5,1,c1,c2,c3);
  537. $LD r5,`1*$BNSZ`(r4)
  538. $LD r6,`5*$BNSZ`(r4)
  539. $UMULL r7,r5,r6
  540. $UMULH r8,r5,r6
  541. addc r9,r7,r9
  542. adde r10,r8,r10
  543. addze r11,r11
  544. addc r9,r7,r9
  545. adde r10,r8,r10
  546. addze r11,r11
  547. #sqr_add_c2(a,6,0,c1,c2,c3);
  548. $LD r5,`0*$BNSZ`(r4)
  549. $LD r6,`6*$BNSZ`(r4)
  550. $UMULL r7,r5,r6
  551. $UMULH r8,r5,r6
  552. addc r9,r7,r9
  553. adde r10,r8,r10
  554. addze r11,r11
  555. addc r9,r7,r9
  556. adde r10,r8,r10
  557. addze r11,r11
  558. $ST r9,`6*$BNSZ`(r3) #r[6]=c1;
  559. #sqr_add_c2(a,7,0,c2,c3,c1);
  560. $LD r6,`7*$BNSZ`(r4)
  561. $UMULL r7,r5,r6
  562. $UMULH r8,r5,r6
  563. addc r10,r7,r10
  564. adde r11,r8,r11
  565. addze r9,r0
  566. addc r10,r7,r10
  567. adde r11,r8,r11
  568. addze r9,r9
  569. #sqr_add_c2(a,6,1,c2,c3,c1);
  570. $LD r5,`1*$BNSZ`(r4)
  571. $LD r6,`6*$BNSZ`(r4)
  572. $UMULL r7,r5,r6
  573. $UMULH r8,r5,r6
  574. addc r10,r7,r10
  575. adde r11,r8,r11
  576. addze r9,r9
  577. addc r10,r7,r10
  578. adde r11,r8,r11
  579. addze r9,r9
  580. #sqr_add_c2(a,5,2,c2,c3,c1);
  581. $LD r5,`2*$BNSZ`(r4)
  582. $LD r6,`5*$BNSZ`(r4)
  583. $UMULL r7,r5,r6
  584. $UMULH r8,r5,r6
  585. addc r10,r7,r10
  586. adde r11,r8,r11
  587. addze r9,r9
  588. addc r10,r7,r10
  589. adde r11,r8,r11
  590. addze r9,r9
  591. #sqr_add_c2(a,4,3,c2,c3,c1);
  592. $LD r5,`3*$BNSZ`(r4)
  593. $LD r6,`4*$BNSZ`(r4)
  594. $UMULL r7,r5,r6
  595. $UMULH r8,r5,r6
  596. addc r10,r7,r10
  597. adde r11,r8,r11
  598. addze r9,r9
  599. addc r10,r7,r10
  600. adde r11,r8,r11
  601. addze r9,r9
  602. $ST r10,`7*$BNSZ`(r3) #r[7]=c2;
  603. #sqr_add_c(a,4,c3,c1,c2);
  604. $UMULL r7,r6,r6
  605. $UMULH r8,r6,r6
  606. addc r11,r7,r11
  607. adde r9,r8,r9
  608. addze r10,r0
  609. #sqr_add_c2(a,5,3,c3,c1,c2);
  610. $LD r6,`5*$BNSZ`(r4)
  611. $UMULL r7,r5,r6
  612. $UMULH r8,r5,r6
  613. addc r11,r7,r11
  614. adde r9,r8,r9
  615. addze r10,r10
  616. addc r11,r7,r11
  617. adde r9,r8,r9
  618. addze r10,r10
  619. #sqr_add_c2(a,6,2,c3,c1,c2);
  620. $LD r5,`2*$BNSZ`(r4)
  621. $LD r6,`6*$BNSZ`(r4)
  622. $UMULL r7,r5,r6
  623. $UMULH r8,r5,r6
  624. addc r11,r7,r11
  625. adde r9,r8,r9
  626. addze r10,r10
  627. addc r11,r7,r11
  628. adde r9,r8,r9
  629. addze r10,r10
  630. #sqr_add_c2(a,7,1,c3,c1,c2);
  631. $LD r5,`1*$BNSZ`(r4)
  632. $LD r6,`7*$BNSZ`(r4)
  633. $UMULL r7,r5,r6
  634. $UMULH r8,r5,r6
  635. addc r11,r7,r11
  636. adde r9,r8,r9
  637. addze r10,r10
  638. addc r11,r7,r11
  639. adde r9,r8,r9
  640. addze r10,r10
  641. $ST r11,`8*$BNSZ`(r3) #r[8]=c3;
  642. #sqr_add_c2(a,7,2,c1,c2,c3);
  643. $LD r5,`2*$BNSZ`(r4)
  644. $UMULL r7,r5,r6
  645. $UMULH r8,r5,r6
  646. addc r9,r7,r9
  647. adde r10,r8,r10
  648. addze r11,r0
  649. addc r9,r7,r9
  650. adde r10,r8,r10
  651. addze r11,r11
  652. #sqr_add_c2(a,6,3,c1,c2,c3);
  653. $LD r5,`3*$BNSZ`(r4)
  654. $LD r6,`6*$BNSZ`(r4)
  655. $UMULL r7,r5,r6
  656. $UMULH r8,r5,r6
  657. addc r9,r7,r9
  658. adde r10,r8,r10
  659. addze r11,r11
  660. addc r9,r7,r9
  661. adde r10,r8,r10
  662. addze r11,r11
  663. #sqr_add_c2(a,5,4,c1,c2,c3);
  664. $LD r5,`4*$BNSZ`(r4)
  665. $LD r6,`5*$BNSZ`(r4)
  666. $UMULL r7,r5,r6
  667. $UMULH r8,r5,r6
  668. addc r9,r7,r9
  669. adde r10,r8,r10
  670. addze r11,r11
  671. addc r9,r7,r9
  672. adde r10,r8,r10
  673. addze r11,r11
  674. $ST r9,`9*$BNSZ`(r3) #r[9]=c1;
  675. #sqr_add_c(a,5,c2,c3,c1);
  676. $UMULL r7,r6,r6
  677. $UMULH r8,r6,r6
  678. addc r10,r7,r10
  679. adde r11,r8,r11
  680. addze r9,r0
  681. #sqr_add_c2(a,6,4,c2,c3,c1);
  682. $LD r6,`6*$BNSZ`(r4)
  683. $UMULL r7,r5,r6
  684. $UMULH r8,r5,r6
  685. addc r10,r7,r10
  686. adde r11,r8,r11
  687. addze r9,r9
  688. addc r10,r7,r10
  689. adde r11,r8,r11
  690. addze r9,r9
  691. #sqr_add_c2(a,7,3,c2,c3,c1);
  692. $LD r5,`3*$BNSZ`(r4)
  693. $LD r6,`7*$BNSZ`(r4)
  694. $UMULL r7,r5,r6
  695. $UMULH r8,r5,r6
  696. addc r10,r7,r10
  697. adde r11,r8,r11
  698. addze r9,r9
  699. addc r10,r7,r10
  700. adde r11,r8,r11
  701. addze r9,r9
  702. $ST r10,`10*$BNSZ`(r3) #r[10]=c2;
  703. #sqr_add_c2(a,7,4,c3,c1,c2);
  704. $LD r5,`4*$BNSZ`(r4)
  705. $UMULL r7,r5,r6
  706. $UMULH r8,r5,r6
  707. addc r11,r7,r11
  708. adde r9,r8,r9
  709. addze r10,r0
  710. addc r11,r7,r11
  711. adde r9,r8,r9
  712. addze r10,r10
  713. #sqr_add_c2(a,6,5,c3,c1,c2);
  714. $LD r5,`5*$BNSZ`(r4)
  715. $LD r6,`6*$BNSZ`(r4)
  716. $UMULL r7,r5,r6
  717. $UMULH r8,r5,r6
  718. addc r11,r7,r11
  719. adde r9,r8,r9
  720. addze r10,r10
  721. addc r11,r7,r11
  722. adde r9,r8,r9
  723. addze r10,r10
  724. $ST r11,`11*$BNSZ`(r3) #r[11]=c3;
  725. #sqr_add_c(a,6,c1,c2,c3);
  726. $UMULL r7,r6,r6
  727. $UMULH r8,r6,r6
  728. addc r9,r7,r9
  729. adde r10,r8,r10
  730. addze r11,r0
  731. #sqr_add_c2(a,7,5,c1,c2,c3)
  732. $LD r6,`7*$BNSZ`(r4)
  733. $UMULL r7,r5,r6
  734. $UMULH r8,r5,r6
  735. addc r9,r7,r9
  736. adde r10,r8,r10
  737. addze r11,r11
  738. addc r9,r7,r9
  739. adde r10,r8,r10
  740. addze r11,r11
  741. $ST r9,`12*$BNSZ`(r3) #r[12]=c1;
  742. #sqr_add_c2(a,7,6,c2,c3,c1)
  743. $LD r5,`6*$BNSZ`(r4)
  744. $UMULL r7,r5,r6
  745. $UMULH r8,r5,r6
  746. addc r10,r7,r10
  747. adde r11,r8,r11
  748. addze r9,r0
  749. addc r10,r7,r10
  750. adde r11,r8,r11
  751. addze r9,r9
  752. $ST r10,`13*$BNSZ`(r3) #r[13]=c2;
  753. #sqr_add_c(a,7,c3,c1,c2);
  754. $UMULL r7,r6,r6
  755. $UMULH r8,r6,r6
  756. addc r11,r7,r11
  757. adde r9,r8,r9
  758. $ST r11,`14*$BNSZ`(r3) #r[14]=c3;
  759. $ST r9, `15*$BNSZ`(r3) #r[15]=c1;
  760. blr
  761. .long 0
  762. .byte 0,12,0x14,0,0,0,2,0
  763. .long 0
  764. .size .bn_sqr_comba8,.-.bn_sqr_comba8
  765. #
  766. # NOTE: The following label name should be changed to
  767. # "bn_mul_comba4" i.e. remove the first dot
  768. # for the gcc compiler. This should be automatically
  769. # done in the build
  770. #
  771. .align 4
  772. .bn_mul_comba4:
  773. #
  774. # This is an optimized version of the bn_mul_comba4 routine.
  775. #
  776. # void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
  777. # r3 contains r
  778. # r4 contains a
  779. # r5 contains b
  780. # r6, r7 are the 2 BN_ULONGs being multiplied.
  781. # r8, r9 are the results of the 32x32 giving 64 multiply.
  782. # r10, r11, r12 are the equivalents of c1, c2, and c3.
  783. #
  784. xor r0,r0,r0 #r0=0. Used in addze below.
  785. #mul_add_c(a[0],b[0],c1,c2,c3);
  786. $LD r6,`0*$BNSZ`(r4)
  787. $LD r7,`0*$BNSZ`(r5)
  788. $UMULL r10,r6,r7
  789. $UMULH r11,r6,r7
  790. $ST r10,`0*$BNSZ`(r3) #r[0]=c1
  791. #mul_add_c(a[0],b[1],c2,c3,c1);
  792. $LD r7,`1*$BNSZ`(r5)
  793. $UMULL r8,r6,r7
  794. $UMULH r9,r6,r7
  795. addc r11,r8,r11
  796. adde r12,r9,r0
  797. addze r10,r0
  798. #mul_add_c(a[1],b[0],c2,c3,c1);
  799. $LD r6, `1*$BNSZ`(r4)
  800. $LD r7, `0*$BNSZ`(r5)
  801. $UMULL r8,r6,r7
  802. $UMULH r9,r6,r7
  803. addc r11,r8,r11
  804. adde r12,r9,r12
  805. addze r10,r10
  806. $ST r11,`1*$BNSZ`(r3) #r[1]=c2
  807. #mul_add_c(a[2],b[0],c3,c1,c2);
  808. $LD r6,`2*$BNSZ`(r4)
  809. $UMULL r8,r6,r7
  810. $UMULH r9,r6,r7
  811. addc r12,r8,r12
  812. adde r10,r9,r10
  813. addze r11,r0
  814. #mul_add_c(a[1],b[1],c3,c1,c2);
  815. $LD r6,`1*$BNSZ`(r4)
  816. $LD r7,`1*$BNSZ`(r5)
  817. $UMULL r8,r6,r7
  818. $UMULH r9,r6,r7
  819. addc r12,r8,r12
  820. adde r10,r9,r10
  821. addze r11,r11
  822. #mul_add_c(a[0],b[2],c3,c1,c2);
  823. $LD r6,`0*$BNSZ`(r4)
  824. $LD r7,`2*$BNSZ`(r5)
  825. $UMULL r8,r6,r7
  826. $UMULH r9,r6,r7
  827. addc r12,r8,r12
  828. adde r10,r9,r10
  829. addze r11,r11
  830. $ST r12,`2*$BNSZ`(r3) #r[2]=c3
  831. #mul_add_c(a[0],b[3],c1,c2,c3);
  832. $LD r7,`3*$BNSZ`(r5)
  833. $UMULL r8,r6,r7
  834. $UMULH r9,r6,r7
  835. addc r10,r8,r10
  836. adde r11,r9,r11
  837. addze r12,r0
  838. #mul_add_c(a[1],b[2],c1,c2,c3);
  839. $LD r6,`1*$BNSZ`(r4)
  840. $LD r7,`2*$BNSZ`(r5)
  841. $UMULL r8,r6,r7
  842. $UMULH r9,r6,r7
  843. addc r10,r8,r10
  844. adde r11,r9,r11
  845. addze r12,r12
  846. #mul_add_c(a[2],b[1],c1,c2,c3);
  847. $LD r6,`2*$BNSZ`(r4)
  848. $LD r7,`1*$BNSZ`(r5)
  849. $UMULL r8,r6,r7
  850. $UMULH r9,r6,r7
  851. addc r10,r8,r10
  852. adde r11,r9,r11
  853. addze r12,r12
  854. #mul_add_c(a[3],b[0],c1,c2,c3);
  855. $LD r6,`3*$BNSZ`(r4)
  856. $LD r7,`0*$BNSZ`(r5)
  857. $UMULL r8,r6,r7
  858. $UMULH r9,r6,r7
  859. addc r10,r8,r10
  860. adde r11,r9,r11
  861. addze r12,r12
  862. $ST r10,`3*$BNSZ`(r3) #r[3]=c1
  863. #mul_add_c(a[3],b[1],c2,c3,c1);
  864. $LD r7,`1*$BNSZ`(r5)
  865. $UMULL r8,r6,r7
  866. $UMULH r9,r6,r7
  867. addc r11,r8,r11
  868. adde r12,r9,r12
  869. addze r10,r0
  870. #mul_add_c(a[2],b[2],c2,c3,c1);
  871. $LD r6,`2*$BNSZ`(r4)
  872. $LD r7,`2*$BNSZ`(r5)
  873. $UMULL r8,r6,r7
  874. $UMULH r9,r6,r7
  875. addc r11,r8,r11
  876. adde r12,r9,r12
  877. addze r10,r10
  878. #mul_add_c(a[1],b[3],c2,c3,c1);
  879. $LD r6,`1*$BNSZ`(r4)
  880. $LD r7,`3*$BNSZ`(r5)
  881. $UMULL r8,r6,r7
  882. $UMULH r9,r6,r7
  883. addc r11,r8,r11
  884. adde r12,r9,r12
  885. addze r10,r10
  886. $ST r11,`4*$BNSZ`(r3) #r[4]=c2
  887. #mul_add_c(a[2],b[3],c3,c1,c2);
  888. $LD r6,`2*$BNSZ`(r4)
  889. $UMULL r8,r6,r7
  890. $UMULH r9,r6,r7
  891. addc r12,r8,r12
  892. adde r10,r9,r10
  893. addze r11,r0
  894. #mul_add_c(a[3],b[2],c3,c1,c2);
  895. $LD r6,`3*$BNSZ`(r4)
  896. $LD r7,`2*$BNSZ`(r5)
  897. $UMULL r8,r6,r7
  898. $UMULH r9,r6,r7
  899. addc r12,r8,r12
  900. adde r10,r9,r10
  901. addze r11,r11
  902. $ST r12,`5*$BNSZ`(r3) #r[5]=c3
  903. #mul_add_c(a[3],b[3],c1,c2,c3);
  904. $LD r7,`3*$BNSZ`(r5)
  905. $UMULL r8,r6,r7
  906. $UMULH r9,r6,r7
  907. addc r10,r8,r10
  908. adde r11,r9,r11
  909. $ST r10,`6*$BNSZ`(r3) #r[6]=c1
  910. $ST r11,`7*$BNSZ`(r3) #r[7]=c2
  911. blr
  912. .long 0
  913. .byte 0,12,0x14,0,0,0,3,0
  914. .long 0
  915. .size .bn_mul_comba4,.-.bn_mul_comba4
  916. #
  917. # NOTE: The following label name should be changed to
  918. # "bn_mul_comba8" i.e. remove the first dot
  919. # for the gcc compiler. This should be automatically
  920. # done in the build
  921. #
  922. .align 4
  923. .bn_mul_comba8:
  924. #
  925. # Optimized version of the bn_mul_comba8 routine.
  926. #
  927. # void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
  928. # r3 contains r
  929. # r4 contains a
  930. # r5 contains b
  931. # r6, r7 are the 2 BN_ULONGs being multiplied.
  932. # r8, r9 are the results of the 32x32 giving 64 multiply.
  933. # r10, r11, r12 are the equivalents of c1, c2, and c3.
  934. #
  935. xor r0,r0,r0 #r0=0. Used in addze below.
  936. #mul_add_c(a[0],b[0],c1,c2,c3);
  937. $LD r6,`0*$BNSZ`(r4) #a[0]
  938. $LD r7,`0*$BNSZ`(r5) #b[0]
  939. $UMULL r10,r6,r7
  940. $UMULH r11,r6,r7
  941. $ST r10,`0*$BNSZ`(r3) #r[0]=c1;
  942. #mul_add_c(a[0],b[1],c2,c3,c1);
  943. $LD r7,`1*$BNSZ`(r5)
  944. $UMULL r8,r6,r7
  945. $UMULH r9,r6,r7
  946. addc r11,r11,r8
  947. addze r12,r9 # since we didn't set r12 to zero before.
  948. addze r10,r0
  949. #mul_add_c(a[1],b[0],c2,c3,c1);
  950. $LD r6,`1*$BNSZ`(r4)
  951. $LD r7,`0*$BNSZ`(r5)
  952. $UMULL r8,r6,r7
  953. $UMULH r9,r6,r7
  954. addc r11,r11,r8
  955. adde r12,r12,r9
  956. addze r10,r10
  957. $ST r11,`1*$BNSZ`(r3) #r[1]=c2;
  958. #mul_add_c(a[2],b[0],c3,c1,c2);
  959. $LD r6,`2*$BNSZ`(r4)
  960. $UMULL r8,r6,r7
  961. $UMULH r9,r6,r7
  962. addc r12,r12,r8
  963. adde r10,r10,r9
  964. addze r11,r0
  965. #mul_add_c(a[1],b[1],c3,c1,c2);
  966. $LD r6,`1*$BNSZ`(r4)
  967. $LD r7,`1*$BNSZ`(r5)
  968. $UMULL r8,r6,r7
  969. $UMULH r9,r6,r7
  970. addc r12,r12,r8
  971. adde r10,r10,r9
  972. addze r11,r11
  973. #mul_add_c(a[0],b[2],c3,c1,c2);
  974. $LD r6,`0*$BNSZ`(r4)
  975. $LD r7,`2*$BNSZ`(r5)
  976. $UMULL r8,r6,r7
  977. $UMULH r9,r6,r7
  978. addc r12,r12,r8
  979. adde r10,r10,r9
  980. addze r11,r11
  981. $ST r12,`2*$BNSZ`(r3) #r[2]=c3;
  982. #mul_add_c(a[0],b[3],c1,c2,c3);
  983. $LD r7,`3*$BNSZ`(r5)
  984. $UMULL r8,r6,r7
  985. $UMULH r9,r6,r7
  986. addc r10,r10,r8
  987. adde r11,r11,r9
  988. addze r12,r0
  989. #mul_add_c(a[1],b[2],c1,c2,c3);
  990. $LD r6,`1*$BNSZ`(r4)
  991. $LD r7,`2*$BNSZ`(r5)
  992. $UMULL r8,r6,r7
  993. $UMULH r9,r6,r7
  994. addc r10,r10,r8
  995. adde r11,r11,r9
  996. addze r12,r12
  997. #mul_add_c(a[2],b[1],c1,c2,c3);
  998. $LD r6,`2*$BNSZ`(r4)
  999. $LD r7,`1*$BNSZ`(r5)
  1000. $UMULL r8,r6,r7
  1001. $UMULH r9,r6,r7
  1002. addc r10,r10,r8
  1003. adde r11,r11,r9
  1004. addze r12,r12
  1005. #mul_add_c(a[3],b[0],c1,c2,c3);
  1006. $LD r6,`3*$BNSZ`(r4)
  1007. $LD r7,`0*$BNSZ`(r5)
  1008. $UMULL r8,r6,r7
  1009. $UMULH r9,r6,r7
  1010. addc r10,r10,r8
  1011. adde r11,r11,r9
  1012. addze r12,r12
  1013. $ST r10,`3*$BNSZ`(r3) #r[3]=c1;
  1014. #mul_add_c(a[4],b[0],c2,c3,c1);
  1015. $LD r6,`4*$BNSZ`(r4)
  1016. $UMULL r8,r6,r7
  1017. $UMULH r9,r6,r7
  1018. addc r11,r11,r8
  1019. adde r12,r12,r9
  1020. addze r10,r0
  1021. #mul_add_c(a[3],b[1],c2,c3,c1);
  1022. $LD r6,`3*$BNSZ`(r4)
  1023. $LD r7,`1*$BNSZ`(r5)
  1024. $UMULL r8,r6,r7
  1025. $UMULH r9,r6,r7
  1026. addc r11,r11,r8
  1027. adde r12,r12,r9
  1028. addze r10,r10
  1029. #mul_add_c(a[2],b[2],c2,c3,c1);
  1030. $LD r6,`2*$BNSZ`(r4)
  1031. $LD r7,`2*$BNSZ`(r5)
  1032. $UMULL r8,r6,r7
  1033. $UMULH r9,r6,r7
  1034. addc r11,r11,r8
  1035. adde r12,r12,r9
  1036. addze r10,r10
  1037. #mul_add_c(a[1],b[3],c2,c3,c1);
  1038. $LD r6,`1*$BNSZ`(r4)
  1039. $LD r7,`3*$BNSZ`(r5)
  1040. $UMULL r8,r6,r7
  1041. $UMULH r9,r6,r7
  1042. addc r11,r11,r8
  1043. adde r12,r12,r9
  1044. addze r10,r10
  1045. #mul_add_c(a[0],b[4],c2,c3,c1);
  1046. $LD r6,`0*$BNSZ`(r4)
  1047. $LD r7,`4*$BNSZ`(r5)
  1048. $UMULL r8,r6,r7
  1049. $UMULH r9,r6,r7
  1050. addc r11,r11,r8
  1051. adde r12,r12,r9
  1052. addze r10,r10
  1053. $ST r11,`4*$BNSZ`(r3) #r[4]=c2;
  1054. #mul_add_c(a[0],b[5],c3,c1,c2);
  1055. $LD r7,`5*$BNSZ`(r5)
  1056. $UMULL r8,r6,r7
  1057. $UMULH r9,r6,r7
  1058. addc r12,r12,r8
  1059. adde r10,r10,r9
  1060. addze r11,r0
  1061. #mul_add_c(a[1],b[4],c3,c1,c2);
  1062. $LD r6,`1*$BNSZ`(r4)
  1063. $LD r7,`4*$BNSZ`(r5)
  1064. $UMULL r8,r6,r7
  1065. $UMULH r9,r6,r7
  1066. addc r12,r12,r8
  1067. adde r10,r10,r9
  1068. addze r11,r11
  1069. #mul_add_c(a[2],b[3],c3,c1,c2);
  1070. $LD r6,`2*$BNSZ`(r4)
  1071. $LD r7,`3*$BNSZ`(r5)
  1072. $UMULL r8,r6,r7
  1073. $UMULH r9,r6,r7
  1074. addc r12,r12,r8
  1075. adde r10,r10,r9
  1076. addze r11,r11
  1077. #mul_add_c(a[3],b[2],c3,c1,c2);
  1078. $LD r6,`3*$BNSZ`(r4)
  1079. $LD r7,`2*$BNSZ`(r5)
  1080. $UMULL r8,r6,r7
  1081. $UMULH r9,r6,r7
  1082. addc r12,r12,r8
  1083. adde r10,r10,r9
  1084. addze r11,r11
  1085. #mul_add_c(a[4],b[1],c3,c1,c2);
  1086. $LD r6,`4*$BNSZ`(r4)
  1087. $LD r7,`1*$BNSZ`(r5)
  1088. $UMULL r8,r6,r7
  1089. $UMULH r9,r6,r7
  1090. addc r12,r12,r8
  1091. adde r10,r10,r9
  1092. addze r11,r11
  1093. #mul_add_c(a[5],b[0],c3,c1,c2);
  1094. $LD r6,`5*$BNSZ`(r4)
  1095. $LD r7,`0*$BNSZ`(r5)
  1096. $UMULL r8,r6,r7
  1097. $UMULH r9,r6,r7
  1098. addc r12,r12,r8
  1099. adde r10,r10,r9
  1100. addze r11,r11
  1101. $ST r12,`5*$BNSZ`(r3) #r[5]=c3;
  1102. #mul_add_c(a[6],b[0],c1,c2,c3);
  1103. $LD r6,`6*$BNSZ`(r4)
  1104. $UMULL r8,r6,r7
  1105. $UMULH r9,r6,r7
  1106. addc r10,r10,r8
  1107. adde r11,r11,r9
  1108. addze r12,r0
  1109. #mul_add_c(a[5],b[1],c1,c2,c3);
  1110. $LD r6,`5*$BNSZ`(r4)
  1111. $LD r7,`1*$BNSZ`(r5)
  1112. $UMULL r8,r6,r7
  1113. $UMULH r9,r6,r7
  1114. addc r10,r10,r8
  1115. adde r11,r11,r9
  1116. addze r12,r12
  1117. #mul_add_c(a[4],b[2],c1,c2,c3);
  1118. $LD r6,`4*$BNSZ`(r4)
  1119. $LD r7,`2*$BNSZ`(r5)
  1120. $UMULL r8,r6,r7
  1121. $UMULH r9,r6,r7
  1122. addc r10,r10,r8
  1123. adde r11,r11,r9
  1124. addze r12,r12
  1125. #mul_add_c(a[3],b[3],c1,c2,c3);
  1126. $LD r6,`3*$BNSZ`(r4)
  1127. $LD r7,`3*$BNSZ`(r5)
  1128. $UMULL r8,r6,r7
  1129. $UMULH r9,r6,r7
  1130. addc r10,r10,r8
  1131. adde r11,r11,r9
  1132. addze r12,r12
  1133. #mul_add_c(a[2],b[4],c1,c2,c3);
  1134. $LD r6,`2*$BNSZ`(r4)
  1135. $LD r7,`4*$BNSZ`(r5)
  1136. $UMULL r8,r6,r7
  1137. $UMULH r9,r6,r7
  1138. addc r10,r10,r8
  1139. adde r11,r11,r9
  1140. addze r12,r12
  1141. #mul_add_c(a[1],b[5],c1,c2,c3);
  1142. $LD r6,`1*$BNSZ`(r4)
  1143. $LD r7,`5*$BNSZ`(r5)
  1144. $UMULL r8,r6,r7
  1145. $UMULH r9,r6,r7
  1146. addc r10,r10,r8
  1147. adde r11,r11,r9
  1148. addze r12,r12
  1149. #mul_add_c(a[0],b[6],c1,c2,c3);
  1150. $LD r6,`0*$BNSZ`(r4)
  1151. $LD r7,`6*$BNSZ`(r5)
  1152. $UMULL r8,r6,r7
  1153. $UMULH r9,r6,r7
  1154. addc r10,r10,r8
  1155. adde r11,r11,r9
  1156. addze r12,r12
  1157. $ST r10,`6*$BNSZ`(r3) #r[6]=c1;
  1158. #mul_add_c(a[0],b[7],c2,c3,c1);
  1159. $LD r7,`7*$BNSZ`(r5)
  1160. $UMULL r8,r6,r7
  1161. $UMULH r9,r6,r7
  1162. addc r11,r11,r8
  1163. adde r12,r12,r9
  1164. addze r10,r0
  1165. #mul_add_c(a[1],b[6],c2,c3,c1);
  1166. $LD r6,`1*$BNSZ`(r4)
  1167. $LD r7,`6*$BNSZ`(r5)
  1168. $UMULL r8,r6,r7
  1169. $UMULH r9,r6,r7
  1170. addc r11,r11,r8
  1171. adde r12,r12,r9
  1172. addze r10,r10
  1173. #mul_add_c(a[2],b[5],c2,c3,c1);
  1174. $LD r6,`2*$BNSZ`(r4)
  1175. $LD r7,`5*$BNSZ`(r5)
  1176. $UMULL r8,r6,r7
  1177. $UMULH r9,r6,r7
  1178. addc r11,r11,r8
  1179. adde r12,r12,r9
  1180. addze r10,r10
  1181. #mul_add_c(a[3],b[4],c2,c3,c1);
  1182. $LD r6,`3*$BNSZ`(r4)
  1183. $LD r7,`4*$BNSZ`(r5)
  1184. $UMULL r8,r6,r7
  1185. $UMULH r9,r6,r7
  1186. addc r11,r11,r8
  1187. adde r12,r12,r9
  1188. addze r10,r10
  1189. #mul_add_c(a[4],b[3],c2,c3,c1);
  1190. $LD r6,`4*$BNSZ`(r4)
  1191. $LD r7,`3*$BNSZ`(r5)
  1192. $UMULL r8,r6,r7
  1193. $UMULH r9,r6,r7
  1194. addc r11,r11,r8
  1195. adde r12,r12,r9
  1196. addze r10,r10
  1197. #mul_add_c(a[5],b[2],c2,c3,c1);
  1198. $LD r6,`5*$BNSZ`(r4)
  1199. $LD r7,`2*$BNSZ`(r5)
  1200. $UMULL r8,r6,r7
  1201. $UMULH r9,r6,r7
  1202. addc r11,r11,r8
  1203. adde r12,r12,r9
  1204. addze r10,r10
  1205. #mul_add_c(a[6],b[1],c2,c3,c1);
  1206. $LD r6,`6*$BNSZ`(r4)
  1207. $LD r7,`1*$BNSZ`(r5)
  1208. $UMULL r8,r6,r7
  1209. $UMULH r9,r6,r7
  1210. addc r11,r11,r8
  1211. adde r12,r12,r9
  1212. addze r10,r10
  1213. #mul_add_c(a[7],b[0],c2,c3,c1);
  1214. $LD r6,`7*$BNSZ`(r4)
  1215. $LD r7,`0*$BNSZ`(r5)
  1216. $UMULL r8,r6,r7
  1217. $UMULH r9,r6,r7
  1218. addc r11,r11,r8
  1219. adde r12,r12,r9
  1220. addze r10,r10
  1221. $ST r11,`7*$BNSZ`(r3) #r[7]=c2;
  1222. #mul_add_c(a[7],b[1],c3,c1,c2);
  1223. $LD r7,`1*$BNSZ`(r5)
  1224. $UMULL r8,r6,r7
  1225. $UMULH r9,r6,r7
  1226. addc r12,r12,r8
  1227. adde r10,r10,r9
  1228. addze r11,r0
  1229. #mul_add_c(a[6],b[2],c3,c1,c2);
  1230. $LD r6,`6*$BNSZ`(r4)
  1231. $LD r7,`2*$BNSZ`(r5)
  1232. $UMULL r8,r6,r7
  1233. $UMULH r9,r6,r7
  1234. addc r12,r12,r8
  1235. adde r10,r10,r9
  1236. addze r11,r11
  1237. #mul_add_c(a[5],b[3],c3,c1,c2);
  1238. $LD r6,`5*$BNSZ`(r4)
  1239. $LD r7,`3*$BNSZ`(r5)
  1240. $UMULL r8,r6,r7
  1241. $UMULH r9,r6,r7
  1242. addc r12,r12,r8
  1243. adde r10,r10,r9
  1244. addze r11,r11
  1245. #mul_add_c(a[4],b[4],c3,c1,c2);
  1246. $LD r6,`4*$BNSZ`(r4)
  1247. $LD r7,`4*$BNSZ`(r5)
  1248. $UMULL r8,r6,r7
  1249. $UMULH r9,r6,r7
  1250. addc r12,r12,r8
  1251. adde r10,r10,r9
  1252. addze r11,r11
  1253. #mul_add_c(a[3],b[5],c3,c1,c2);
  1254. $LD r6,`3*$BNSZ`(r4)
  1255. $LD r7,`5*$BNSZ`(r5)
  1256. $UMULL r8,r6,r7
  1257. $UMULH r9,r6,r7
  1258. addc r12,r12,r8
  1259. adde r10,r10,r9
  1260. addze r11,r11
  1261. #mul_add_c(a[2],b[6],c3,c1,c2);
  1262. $LD r6,`2*$BNSZ`(r4)
  1263. $LD r7,`6*$BNSZ`(r5)
  1264. $UMULL r8,r6,r7
  1265. $UMULH r9,r6,r7
  1266. addc r12,r12,r8
  1267. adde r10,r10,r9
  1268. addze r11,r11
  1269. #mul_add_c(a[1],b[7],c3,c1,c2);
  1270. $LD r6,`1*$BNSZ`(r4)
  1271. $LD r7,`7*$BNSZ`(r5)
  1272. $UMULL r8,r6,r7
  1273. $UMULH r9,r6,r7
  1274. addc r12,r12,r8
  1275. adde r10,r10,r9
  1276. addze r11,r11
  1277. $ST r12,`8*$BNSZ`(r3) #r[8]=c3;
  1278. #mul_add_c(a[2],b[7],c1,c2,c3);
  1279. $LD r6,`2*$BNSZ`(r4)
  1280. $UMULL r8,r6,r7
  1281. $UMULH r9,r6,r7
  1282. addc r10,r10,r8
  1283. adde r11,r11,r9
  1284. addze r12,r0
  1285. #mul_add_c(a[3],b[6],c1,c2,c3);
  1286. $LD r6,`3*$BNSZ`(r4)
  1287. $LD r7,`6*$BNSZ`(r5)
  1288. $UMULL r8,r6,r7
  1289. $UMULH r9,r6,r7
  1290. addc r10,r10,r8
  1291. adde r11,r11,r9
  1292. addze r12,r12
  1293. #mul_add_c(a[4],b[5],c1,c2,c3);
  1294. $LD r6,`4*$BNSZ`(r4)
  1295. $LD r7,`5*$BNSZ`(r5)
  1296. $UMULL r8,r6,r7
  1297. $UMULH r9,r6,r7
  1298. addc r10,r10,r8
  1299. adde r11,r11,r9
  1300. addze r12,r12
  1301. #mul_add_c(a[5],b[4],c1,c2,c3);
  1302. $LD r6,`5*$BNSZ`(r4)
  1303. $LD r7,`4*$BNSZ`(r5)
  1304. $UMULL r8,r6,r7
  1305. $UMULH r9,r6,r7
  1306. addc r10,r10,r8
  1307. adde r11,r11,r9
  1308. addze r12,r12
  1309. #mul_add_c(a[6],b[3],c1,c2,c3);
  1310. $LD r6,`6*$BNSZ`(r4)
  1311. $LD r7,`3*$BNSZ`(r5)
  1312. $UMULL r8,r6,r7
  1313. $UMULH r9,r6,r7
  1314. addc r10,r10,r8
  1315. adde r11,r11,r9
  1316. addze r12,r12
  1317. #mul_add_c(a[7],b[2],c1,c2,c3);
  1318. $LD r6,`7*$BNSZ`(r4)
  1319. $LD r7,`2*$BNSZ`(r5)
  1320. $UMULL r8,r6,r7
  1321. $UMULH r9,r6,r7
  1322. addc r10,r10,r8
  1323. adde r11,r11,r9
  1324. addze r12,r12
  1325. $ST r10,`9*$BNSZ`(r3) #r[9]=c1;
  1326. #mul_add_c(a[7],b[3],c2,c3,c1);
  1327. $LD r7,`3*$BNSZ`(r5)
  1328. $UMULL r8,r6,r7
  1329. $UMULH r9,r6,r7
  1330. addc r11,r11,r8
  1331. adde r12,r12,r9
  1332. addze r10,r0
  1333. #mul_add_c(a[6],b[4],c2,c3,c1);
  1334. $LD r6,`6*$BNSZ`(r4)
  1335. $LD r7,`4*$BNSZ`(r5)
  1336. $UMULL r8,r6,r7
  1337. $UMULH r9,r6,r7
  1338. addc r11,r11,r8
  1339. adde r12,r12,r9
  1340. addze r10,r10
  1341. #mul_add_c(a[5],b[5],c2,c3,c1);
  1342. $LD r6,`5*$BNSZ`(r4)
  1343. $LD r7,`5*$BNSZ`(r5)
  1344. $UMULL r8,r6,r7
  1345. $UMULH r9,r6,r7
  1346. addc r11,r11,r8
  1347. adde r12,r12,r9
  1348. addze r10,r10
  1349. #mul_add_c(a[4],b[6],c2,c3,c1);
  1350. $LD r6,`4*$BNSZ`(r4)
  1351. $LD r7,`6*$BNSZ`(r5)
  1352. $UMULL r8,r6,r7
  1353. $UMULH r9,r6,r7
  1354. addc r11,r11,r8
  1355. adde r12,r12,r9
  1356. addze r10,r10
  1357. #mul_add_c(a[3],b[7],c2,c3,c1);
  1358. $LD r6,`3*$BNSZ`(r4)
  1359. $LD r7,`7*$BNSZ`(r5)
  1360. $UMULL r8,r6,r7
  1361. $UMULH r9,r6,r7
  1362. addc r11,r11,r8
  1363. adde r12,r12,r9
  1364. addze r10,r10
  1365. $ST r11,`10*$BNSZ`(r3) #r[10]=c2;
  1366. #mul_add_c(a[4],b[7],c3,c1,c2);
  1367. $LD r6,`4*$BNSZ`(r4)
  1368. $UMULL r8,r6,r7
  1369. $UMULH r9,r6,r7
  1370. addc r12,r12,r8
  1371. adde r10,r10,r9
  1372. addze r11,r0
  1373. #mul_add_c(a[5],b[6],c3,c1,c2);
  1374. $LD r6,`5*$BNSZ`(r4)
  1375. $LD r7,`6*$BNSZ`(r5)
  1376. $UMULL r8,r6,r7
  1377. $UMULH r9,r6,r7
  1378. addc r12,r12,r8
  1379. adde r10,r10,r9
  1380. addze r11,r11
  1381. #mul_add_c(a[6],b[5],c3,c1,c2);
  1382. $LD r6,`6*$BNSZ`(r4)
  1383. $LD r7,`5*$BNSZ`(r5)
  1384. $UMULL r8,r6,r7
  1385. $UMULH r9,r6,r7
  1386. addc r12,r12,r8
  1387. adde r10,r10,r9
  1388. addze r11,r11
  1389. #mul_add_c(a[7],b[4],c3,c1,c2);
  1390. $LD r6,`7*$BNSZ`(r4)
  1391. $LD r7,`4*$BNSZ`(r5)
  1392. $UMULL r8,r6,r7
  1393. $UMULH r9,r6,r7
  1394. addc r12,r12,r8
  1395. adde r10,r10,r9
  1396. addze r11,r11
  1397. $ST r12,`11*$BNSZ`(r3) #r[11]=c3;
  1398. #mul_add_c(a[7],b[5],c1,c2,c3);
  1399. $LD r7,`5*$BNSZ`(r5)
  1400. $UMULL r8,r6,r7
  1401. $UMULH r9,r6,r7
  1402. addc r10,r10,r8
  1403. adde r11,r11,r9
  1404. addze r12,r0
  1405. #mul_add_c(a[6],b[6],c1,c2,c3);
  1406. $LD r6,`6*$BNSZ`(r4)
  1407. $LD r7,`6*$BNSZ`(r5)
  1408. $UMULL r8,r6,r7
  1409. $UMULH r9,r6,r7
  1410. addc r10,r10,r8
  1411. adde r11,r11,r9
  1412. addze r12,r12
  1413. #mul_add_c(a[5],b[7],c1,c2,c3);
  1414. $LD r6,`5*$BNSZ`(r4)
  1415. $LD r7,`7*$BNSZ`(r5)
  1416. $UMULL r8,r6,r7
  1417. $UMULH r9,r6,r7
  1418. addc r10,r10,r8
  1419. adde r11,r11,r9
  1420. addze r12,r12
  1421. $ST r10,`12*$BNSZ`(r3) #r[12]=c1;
  1422. #mul_add_c(a[6],b[7],c2,c3,c1);
  1423. $LD r6,`6*$BNSZ`(r4)
  1424. $UMULL r8,r6,r7
  1425. $UMULH r9,r6,r7
  1426. addc r11,r11,r8
  1427. adde r12,r12,r9
  1428. addze r10,r0
  1429. #mul_add_c(a[7],b[6],c2,c3,c1);
  1430. $LD r6,`7*$BNSZ`(r4)
  1431. $LD r7,`6*$BNSZ`(r5)
  1432. $UMULL r8,r6,r7
  1433. $UMULH r9,r6,r7
  1434. addc r11,r11,r8
  1435. adde r12,r12,r9
  1436. addze r10,r10
  1437. $ST r11,`13*$BNSZ`(r3) #r[13]=c2;
  1438. #mul_add_c(a[7],b[7],c3,c1,c2);
  1439. $LD r7,`7*$BNSZ`(r5)
  1440. $UMULL r8,r6,r7
  1441. $UMULH r9,r6,r7
  1442. addc r12,r12,r8
  1443. adde r10,r10,r9
  1444. $ST r12,`14*$BNSZ`(r3) #r[14]=c3;
  1445. $ST r10,`15*$BNSZ`(r3) #r[15]=c1;
  1446. blr
  1447. .long 0
  1448. .byte 0,12,0x14,0,0,0,3,0
  1449. .long 0
  1450. .size .bn_mul_comba8,.-.bn_mul_comba8
  1451. #
  1452. # NOTE: The following label name should be changed to
  1453. # "bn_sub_words" i.e. remove the first dot
  1454. # for the gcc compiler. This should be automatically
  1455. # done in the build
  1456. #
  1457. #
  1458. .align 4
  1459. .bn_sub_words:
  1460. #
  1461. # Handcoded version of bn_sub_words
  1462. #
  1463. #BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
  1464. #
  1465. # r3 = r
  1466. # r4 = a
  1467. # r5 = b
  1468. # r6 = n
  1469. #
  1470. # Note: No loop unrolling done since this is not a performance
  1471. # critical loop.
  1472. xor r0,r0,r0 #set r0 = 0
  1473. #
  1474. # check for r6 = 0 AND set carry bit.
  1475. #
  1476. subfc. r7,r0,r6 # If r6 is 0 then result is 0.
  1477. # if r6 > 0 then result !=0
  1478. # In either case carry bit is set.
  1479. beq Lppcasm_sub_adios
  1480. addi r4,r4,-$BNSZ
  1481. addi r3,r3,-$BNSZ
  1482. addi r5,r5,-$BNSZ
  1483. mtctr r6
  1484. Lppcasm_sub_mainloop:
  1485. $LDU r7,$BNSZ(r4)
  1486. $LDU r8,$BNSZ(r5)
  1487. subfe r6,r8,r7 # r6 = r7+carry bit + onescomplement(r8)
  1488. # if carry = 1 this is r7-r8. Else it
  1489. # is r7-r8 -1 as we need.
  1490. $STU r6,$BNSZ(r3)
  1491. bdnz Lppcasm_sub_mainloop
  1492. Lppcasm_sub_adios:
  1493. subfze r3,r0 # if carry bit is set then r3 = 0 else -1
  1494. andi. r3,r3,1 # keep only last bit.
  1495. blr
  1496. .long 0
  1497. .byte 0,12,0x14,0,0,0,4,0
  1498. .long 0
  1499. .size .bn_sub_words,.-.bn_sub_words
  1500. #
  1501. # NOTE: The following label name should be changed to
  1502. # "bn_add_words" i.e. remove the first dot
  1503. # for the gcc compiler. This should be automatically
  1504. # done in the build
  1505. #
  1506. .align 4
  1507. .bn_add_words:
  1508. #
  1509. # Handcoded version of bn_add_words
  1510. #
  1511. #BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
  1512. #
  1513. # r3 = r
  1514. # r4 = a
  1515. # r5 = b
  1516. # r6 = n
  1517. #
  1518. # Note: No loop unrolling done since this is not a performance
  1519. # critical loop.
  1520. xor r0,r0,r0
  1521. #
  1522. # check for r6 = 0. Is this needed?
  1523. #
  1524. addic. r6,r6,0 #test r6 and clear carry bit.
  1525. beq Lppcasm_add_adios
  1526. addi r4,r4,-$BNSZ
  1527. addi r3,r3,-$BNSZ
  1528. addi r5,r5,-$BNSZ
  1529. mtctr r6
  1530. Lppcasm_add_mainloop:
  1531. $LDU r7,$BNSZ(r4)
  1532. $LDU r8,$BNSZ(r5)
  1533. adde r8,r7,r8
  1534. $STU r8,$BNSZ(r3)
  1535. bdnz Lppcasm_add_mainloop
  1536. Lppcasm_add_adios:
  1537. addze r3,r0 #return carry bit.
  1538. blr
  1539. .long 0
  1540. .byte 0,12,0x14,0,0,0,4,0
  1541. .long 0
  1542. .size .bn_add_words,.-.bn_add_words
  1543. #
  1544. # NOTE: The following label name should be changed to
  1545. # "bn_div_words" i.e. remove the first dot
  1546. # for the gcc compiler. This should be automatically
  1547. # done in the build
  1548. #
  1549. .align 4
  1550. .bn_div_words:
  1551. #
  1552. # This is a cleaned up version of code generated by
  1553. # the AIX compiler. The only optimization is to use
  1554. # the PPC instruction to count leading zeros instead
  1555. # of call to num_bits_word. Since this was compiled
  1556. # only at level -O2 we can possibly squeeze it more?
  1557. #
  1558. # r3 = h
  1559. # r4 = l
  1560. # r5 = d
  1561. $UCMPI 0,r5,0 # compare r5 and 0
  1562. bne Lppcasm_div1 # proceed if d!=0
  1563. li r3,-1 # d=0 return -1
  1564. blr
  1565. Lppcasm_div1:
  1566. xor r0,r0,r0 #r0=0
  1567. li r8,$BITS
  1568. $CNTLZ. r7,r5 #r7 = num leading 0s in d.
  1569. beq Lppcasm_div2 #proceed if no leading zeros
  1570. subf r8,r7,r8 #r8 = BN_num_bits_word(d)
  1571. $SHR. r9,r3,r8 #are there any bits above r8'th?
  1572. $TR 16,r9,r0 #if there're, signal to dump core...
  1573. Lppcasm_div2:
  1574. $UCMP 0,r3,r5 #h>=d?
  1575. blt Lppcasm_div3 #goto Lppcasm_div3 if not
  1576. subf r3,r5,r3 #h-=d ;
  1577. Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i
  1578. cmpi 0,0,r7,0 # is (i == 0)?
  1579. beq Lppcasm_div4
  1580. $SHL r3,r3,r7 # h = (h<< i)
  1581. $SHR r8,r4,r8 # r8 = (l >> BN_BITS2 -i)
  1582. $SHL r5,r5,r7 # d<<=i
  1583. or r3,r3,r8 # h = (h<<i)|(l>>(BN_BITS2-i))
  1584. $SHL r4,r4,r7 # l <<=i
  1585. Lppcasm_div4:
  1586. $SHRI r9,r5,`$BITS/2` # r9 = dh
  1587. # dl will be computed when needed
  1588. # as it saves registers.
  1589. li r6,2 #r6=2
  1590. mtctr r6 #counter will be in count.
  1591. Lppcasm_divouterloop:
  1592. $SHRI r8,r3,`$BITS/2` #r8 = (h>>BN_BITS4)
  1593. $SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4
  1594. # compute here for innerloop.
  1595. $UCMP 0,r8,r9 # is (h>>BN_BITS4)==dh
  1596. bne Lppcasm_div5 # goto Lppcasm_div5 if not
  1597. li r8,-1
  1598. $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l
  1599. b Lppcasm_div6
  1600. Lppcasm_div5:
  1601. $UDIV r8,r3,r9 #q = h/dh
  1602. Lppcasm_div6:
  1603. $UMULL r12,r9,r8 #th = q*dh
  1604. $CLRU r10,r5,`$BITS/2` #r10=dl
  1605. $UMULL r6,r8,r10 #tl = q*dl
  1606. Lppcasm_divinnerloop:
  1607. subf r10,r12,r3 #t = h -th
  1608. $SHRI r7,r10,`$BITS/2` #r7= (t &BN_MASK2H), sort of...
  1609. addic. r7,r7,0 #test if r7 == 0. used below.
  1610. # now want to compute
  1611. # r7 = (t<<BN_BITS4)|((l&BN_MASK2h)>>BN_BITS4)
  1612. # the following 2 instructions do that
  1613. $SHLI r7,r10,`$BITS/2` # r7 = (t<<BN_BITS4)
  1614. or r7,r7,r11 # r7|=((l&BN_MASK2h)>>BN_BITS4)
  1615. $UCMP cr1,r6,r7 # compare (tl <= r7)
  1616. bne Lppcasm_divinnerexit
  1617. ble cr1,Lppcasm_divinnerexit
  1618. addi r8,r8,-1 #q--
  1619. subf r12,r9,r12 #th -=dh
  1620. $CLRU r10,r5,`$BITS/2` #r10=dl. t is no longer needed in loop.
  1621. subf r6,r10,r6 #tl -=dl
  1622. b Lppcasm_divinnerloop
  1623. Lppcasm_divinnerexit:
  1624. $SHRI r10,r6,`$BITS/2` #t=(tl>>BN_BITS4)
  1625. $SHLI r11,r6,`$BITS/2` #tl=(tl<<BN_BITS4)&BN_MASK2h;
  1626. $UCMP cr1,r4,r11 # compare l and tl
  1627. add r12,r12,r10 # th+=t
  1628. bge cr1,Lppcasm_div7 # if (l>=tl) goto Lppcasm_div7
  1629. addi r12,r12,1 # th++
  1630. Lppcasm_div7:
  1631. subf r11,r11,r4 #r11=l-tl
  1632. $UCMP cr1,r3,r12 #compare h and th
  1633. bge cr1,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8
  1634. addi r8,r8,-1 # q--
  1635. add r3,r5,r3 # h+=d
  1636. Lppcasm_div8:
  1637. subf r12,r12,r3 #r12 = h-th
  1638. $SHLI r4,r11,`$BITS/2` #l=(l&BN_MASK2l)<<BN_BITS4
  1639. # want to compute
  1640. # h = ((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2
  1641. # the following 2 instructions will do this.
  1642. $INSR r11,r12,`$BITS/2`,`$BITS/2` # r11 is the value we want rotated $BITS/2.
  1643. $ROTL r3,r11,`$BITS/2` # rotate by $BITS/2 and store in r3
  1644. bdz Lppcasm_div9 #if (count==0) break ;
  1645. $SHLI r0,r8,`$BITS/2` #ret =q<<BN_BITS4
  1646. b Lppcasm_divouterloop
  1647. Lppcasm_div9:
  1648. or r3,r8,r0
  1649. blr
  1650. .long 0
  1651. .byte 0,12,0x14,0,0,0,3,0
  1652. .long 0
  1653. .size .bn_div_words,.-.bn_div_words
  1654. #
  1655. # NOTE: The following label name should be changed to
  1656. # "bn_sqr_words" i.e. remove the first dot
  1657. # for the gcc compiler. This should be automatically
  1658. # done in the build
  1659. #
  1660. .align 4
  1661. .bn_sqr_words:
  1662. #
  1663. # Optimized version of bn_sqr_words
  1664. #
  1665. # void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
  1666. #
  1667. # r3 = r
  1668. # r4 = a
  1669. # r5 = n
  1670. #
  1671. # r6 = a[i].
  1672. # r7,r8 = product.
  1673. #
  1674. # No unrolling done here. Not performance critical.
  1675. addic. r5,r5,0 #test r5.
  1676. beq Lppcasm_sqr_adios
  1677. addi r4,r4,-$BNSZ
  1678. addi r3,r3,-$BNSZ
  1679. mtctr r5
  1680. Lppcasm_sqr_mainloop:
  1681. #sqr(r[0],r[1],a[0]);
  1682. $LDU r6,$BNSZ(r4)
  1683. $UMULL r7,r6,r6
  1684. $UMULH r8,r6,r6
  1685. $STU r7,$BNSZ(r3)
  1686. $STU r8,$BNSZ(r3)
  1687. bdnz Lppcasm_sqr_mainloop
  1688. Lppcasm_sqr_adios:
  1689. blr
  1690. .long 0
  1691. .byte 0,12,0x14,0,0,0,3,0
  1692. .long 0
  1693. .size .bn_sqr_words,.-.bn_sqr_words
  1694. #
  1695. # NOTE: The following label name should be changed to
  1696. # "bn_mul_words" i.e. remove the first dot
  1697. # for the gcc compiler. This should be automatically
  1698. # done in the build
  1699. #
  1700. .align 4
  1701. .bn_mul_words:
  1702. #
  1703. # BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
  1704. #
  1705. # r3 = rp
  1706. # r4 = ap
  1707. # r5 = num
  1708. # r6 = w
  1709. xor r0,r0,r0
  1710. xor r12,r12,r12 # used for carry
  1711. rlwinm. r7,r5,30,2,31 # num >> 2
  1712. beq Lppcasm_mw_REM
  1713. mtctr r7
  1714. Lppcasm_mw_LOOP:
  1715. #mul(rp[0],ap[0],w,c1);
  1716. $LD r8,`0*$BNSZ`(r4)
  1717. $UMULL r9,r6,r8
  1718. $UMULH r10,r6,r8
  1719. addc r9,r9,r12
  1720. #addze r10,r10 #carry is NOT ignored.
  1721. #will be taken care of
  1722. #in second spin below
  1723. #using adde.
  1724. $ST r9,`0*$BNSZ`(r3)
  1725. #mul(rp[1],ap[1],w,c1);
  1726. $LD r8,`1*$BNSZ`(r4)
  1727. $UMULL r11,r6,r8
  1728. $UMULH r12,r6,r8
  1729. adde r11,r11,r10
  1730. #addze r12,r12
  1731. $ST r11,`1*$BNSZ`(r3)
  1732. #mul(rp[2],ap[2],w,c1);
  1733. $LD r8,`2*$BNSZ`(r4)
  1734. $UMULL r9,r6,r8
  1735. $UMULH r10,r6,r8
  1736. adde r9,r9,r12
  1737. #addze r10,r10
  1738. $ST r9,`2*$BNSZ`(r3)
  1739. #mul_add(rp[3],ap[3],w,c1);
  1740. $LD r8,`3*$BNSZ`(r4)
  1741. $UMULL r11,r6,r8
  1742. $UMULH r12,r6,r8
  1743. adde r11,r11,r10
  1744. addze r12,r12 #this spin we collect carry into
  1745. #r12
  1746. $ST r11,`3*$BNSZ`(r3)
  1747. addi r3,r3,`4*$BNSZ`
  1748. addi r4,r4,`4*$BNSZ`
  1749. bdnz Lppcasm_mw_LOOP
  1750. Lppcasm_mw_REM:
  1751. andi. r5,r5,0x3
  1752. beq Lppcasm_mw_OVER
  1753. #mul(rp[0],ap[0],w,c1);
  1754. $LD r8,`0*$BNSZ`(r4)
  1755. $UMULL r9,r6,r8
  1756. $UMULH r10,r6,r8
  1757. addc r9,r9,r12
  1758. addze r10,r10
  1759. $ST r9,`0*$BNSZ`(r3)
  1760. addi r12,r10,0
  1761. addi r5,r5,-1
  1762. cmpli 0,0,r5,0
  1763. beq Lppcasm_mw_OVER
  1764. #mul(rp[1],ap[1],w,c1);
  1765. $LD r8,`1*$BNSZ`(r4)
  1766. $UMULL r9,r6,r8
  1767. $UMULH r10,r6,r8
  1768. addc r9,r9,r12
  1769. addze r10,r10
  1770. $ST r9,`1*$BNSZ`(r3)
  1771. addi r12,r10,0
  1772. addi r5,r5,-1
  1773. cmpli 0,0,r5,0
  1774. beq Lppcasm_mw_OVER
  1775. #mul_add(rp[2],ap[2],w,c1);
  1776. $LD r8,`2*$BNSZ`(r4)
  1777. $UMULL r9,r6,r8
  1778. $UMULH r10,r6,r8
  1779. addc r9,r9,r12
  1780. addze r10,r10
  1781. $ST r9,`2*$BNSZ`(r3)
  1782. addi r12,r10,0
  1783. Lppcasm_mw_OVER:
  1784. addi r3,r12,0
  1785. blr
  1786. .long 0
  1787. .byte 0,12,0x14,0,0,0,4,0
  1788. .long 0
  1789. .size .bn_mul_words,.-.bn_mul_words
  1790. #
  1791. # NOTE: The following label name should be changed to
  1792. # "bn_mul_add_words" i.e. remove the first dot
  1793. # for the gcc compiler. This should be automatically
  1794. # done in the build
  1795. #
  1796. .align 4
  1797. .bn_mul_add_words:
  1798. #
  1799. # BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
  1800. #
  1801. # r3 = rp
  1802. # r4 = ap
  1803. # r5 = num
  1804. # r6 = w
  1805. #
  1806. # empirical evidence suggests that unrolled version performs best!!
  1807. #
  1808. xor r0,r0,r0 #r0 = 0
  1809. xor r12,r12,r12 #r12 = 0 . used for carry
  1810. rlwinm. r7,r5,30,2,31 # num >> 2
  1811. beq Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover
  1812. mtctr r7
  1813. Lppcasm_maw_mainloop:
  1814. #mul_add(rp[0],ap[0],w,c1);
  1815. $LD r8,`0*$BNSZ`(r4)
  1816. $LD r11,`0*$BNSZ`(r3)
  1817. $UMULL r9,r6,r8
  1818. $UMULH r10,r6,r8
  1819. addc r9,r9,r12 #r12 is carry.
  1820. addze r10,r10
  1821. addc r9,r9,r11
  1822. #addze r10,r10
  1823. #the above instruction addze
  1824. #is NOT needed. Carry will NOT
  1825. #be ignored. It's not affected
  1826. #by multiply and will be collected
  1827. #in the next spin
  1828. $ST r9,`0*$BNSZ`(r3)
  1829. #mul_add(rp[1],ap[1],w,c1);
  1830. $LD r8,`1*$BNSZ`(r4)
  1831. $LD r9,`1*$BNSZ`(r3)
  1832. $UMULL r11,r6,r8
  1833. $UMULH r12,r6,r8
  1834. adde r11,r11,r10 #r10 is carry.
  1835. addze r12,r12
  1836. addc r11,r11,r9
  1837. #addze r12,r12
  1838. $ST r11,`1*$BNSZ`(r3)
  1839. #mul_add(rp[2],ap[2],w,c1);
  1840. $LD r8,`2*$BNSZ`(r4)
  1841. $UMULL r9,r6,r8
  1842. $LD r11,`2*$BNSZ`(r3)
  1843. $UMULH r10,r6,r8
  1844. adde r9,r9,r12
  1845. addze r10,r10
  1846. addc r9,r9,r11
  1847. #addze r10,r10
  1848. $ST r9,`2*$BNSZ`(r3)
  1849. #mul_add(rp[3],ap[3],w,c1);
  1850. $LD r8,`3*$BNSZ`(r4)
  1851. $UMULL r11,r6,r8
  1852. $LD r9,`3*$BNSZ`(r3)
  1853. $UMULH r12,r6,r8
  1854. adde r11,r11,r10
  1855. addze r12,r12
  1856. addc r11,r11,r9
  1857. addze r12,r12
  1858. $ST r11,`3*$BNSZ`(r3)
  1859. addi r3,r3,`4*$BNSZ`
  1860. addi r4,r4,`4*$BNSZ`
  1861. bdnz Lppcasm_maw_mainloop
  1862. Lppcasm_maw_leftover:
  1863. andi. r5,r5,0x3
  1864. beq Lppcasm_maw_adios
  1865. addi r3,r3,-$BNSZ
  1866. addi r4,r4,-$BNSZ
  1867. #mul_add(rp[0],ap[0],w,c1);
  1868. mtctr r5
  1869. $LDU r8,$BNSZ(r4)
  1870. $UMULL r9,r6,r8
  1871. $UMULH r10,r6,r8
  1872. $LDU r11,$BNSZ(r3)
  1873. addc r9,r9,r11
  1874. addze r10,r10
  1875. addc r9,r9,r12
  1876. addze r12,r10
  1877. $ST r9,0(r3)
  1878. bdz Lppcasm_maw_adios
  1879. #mul_add(rp[1],ap[1],w,c1);
  1880. $LDU r8,$BNSZ(r4)
  1881. $UMULL r9,r6,r8
  1882. $UMULH r10,r6,r8
  1883. $LDU r11,$BNSZ(r3)
  1884. addc r9,r9,r11
  1885. addze r10,r10
  1886. addc r9,r9,r12
  1887. addze r12,r10
  1888. $ST r9,0(r3)
  1889. bdz Lppcasm_maw_adios
  1890. #mul_add(rp[2],ap[2],w,c1);
  1891. $LDU r8,$BNSZ(r4)
  1892. $UMULL r9,r6,r8
  1893. $UMULH r10,r6,r8
  1894. $LDU r11,$BNSZ(r3)
  1895. addc r9,r9,r11
  1896. addze r10,r10
  1897. addc r9,r9,r12
  1898. addze r12,r10
  1899. $ST r9,0(r3)
  1900. Lppcasm_maw_adios:
  1901. addi r3,r12,0
  1902. blr
  1903. .long 0
  1904. .byte 0,12,0x14,0,0,0,4,0
  1905. .long 0
  1906. .size .bn_mul_add_words,.-.bn_mul_add_words
  1907. .align 4
  1908. EOF
  1909. $data =~ s/\`([^\`]*)\`/eval $1/gem;
  1910. print $data;
  1911. close STDOUT;