ppc.pl 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078
  1. #!/usr/bin/env perl
  2. #
  3. # Implemented as a Perl wrapper as we want to support several different
  4. # architectures with single file. We pick up the target based on the
  5. # file name we are asked to generate.
  6. #
  7. # It should be noted though that this perl code is nothing like
  8. # <openssl>/crypto/perlasm/x86*. In this case perl is used pretty much
  9. # as pre-processor to cover for platform differences in name decoration,
  10. # linker tables, 32-/64-bit instruction sets...
  11. #
  12. # As you might know there're several PowerPC ABI in use. Most notably
  13. # Linux and AIX use different 32-bit ABIs. Good news are that these ABIs
  14. # are similar enough to implement leaf(!) functions, which would be ABI
  15. # neutral. And that's what you find here: ABI neutral leaf functions.
  16. # In case you wonder what that is...
  17. #
  18. # AIX performance
  19. #
  20. # MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e.
  21. #
  22. # The following is the performance of 32-bit compiler
  23. # generated code:
  24. #
  25. # OpenSSL 0.9.6c 21 dec 2001
  26. # built on: Tue Jun 11 11:06:51 EDT 2002
  27. # options:bn(64,32) ...
  28. #compiler: cc -DTHREADS -DAIX -DB_ENDIAN -DBN_LLONG -O3
  29. # sign verify sign/s verify/s
  30. #rsa 512 bits 0.0098s 0.0009s 102.0 1170.6
  31. #rsa 1024 bits 0.0507s 0.0026s 19.7 387.5
  32. #rsa 2048 bits 0.3036s 0.0085s 3.3 117.1
  33. #rsa 4096 bits 2.0040s 0.0299s 0.5 33.4
  34. #dsa 512 bits 0.0087s 0.0106s 114.3 94.5
  35. #dsa 1024 bits 0.0256s 0.0313s 39.0 32.0
  36. #
  37. # Same bechmark with this assembler code:
  38. #
  39. #rsa 512 bits 0.0056s 0.0005s 178.6 2049.2
  40. #rsa 1024 bits 0.0283s 0.0015s 35.3 674.1
  41. #rsa 2048 bits 0.1744s 0.0050s 5.7 201.2
  42. #rsa 4096 bits 1.1644s 0.0179s 0.9 55.7
  43. #dsa 512 bits 0.0052s 0.0062s 191.6 162.0
  44. #dsa 1024 bits 0.0149s 0.0180s 67.0 55.5
  45. #
  46. # Number of operations increases by at almost 75%
  47. #
  48. # Here are performance numbers for 64-bit compiler
  49. # generated code:
  50. #
  51. # OpenSSL 0.9.6g [engine] 9 Aug 2002
  52. # built on: Fri Apr 18 16:59:20 EDT 2003
  53. # options:bn(64,64) ...
  54. # compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3
  55. # sign verify sign/s verify/s
  56. #rsa 512 bits 0.0028s 0.0003s 357.1 3844.4
  57. #rsa 1024 bits 0.0148s 0.0008s 67.5 1239.7
  58. #rsa 2048 bits 0.0963s 0.0028s 10.4 353.0
  59. #rsa 4096 bits 0.6538s 0.0102s 1.5 98.1
  60. #dsa 512 bits 0.0026s 0.0032s 382.5 313.7
  61. #dsa 1024 bits 0.0081s 0.0099s 122.8 100.6
  62. #
  63. # Same benchmark with this assembler code:
  64. #
  65. #rsa 512 bits 0.0020s 0.0002s 510.4 6273.7
  66. #rsa 1024 bits 0.0088s 0.0005s 114.1 2128.3
  67. #rsa 2048 bits 0.0540s 0.0016s 18.5 622.5
  68. #rsa 4096 bits 0.3700s 0.0058s 2.7 171.0
  69. #dsa 512 bits 0.0016s 0.0020s 610.7 507.1
  70. #dsa 1024 bits 0.0047s 0.0058s 212.5 173.2
  71. #
  72. # Again, performance increases by at about 75%
  73. #
  74. # Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code)
  75. # OpenSSL 0.9.7c 30 Sep 2003
  76. #
  77. # Original code.
  78. #
  79. #rsa 512 bits 0.0011s 0.0001s 906.1 11012.5
  80. #rsa 1024 bits 0.0060s 0.0003s 166.6 3363.1
  81. #rsa 2048 bits 0.0370s 0.0010s 27.1 982.4
  82. #rsa 4096 bits 0.2426s 0.0036s 4.1 280.4
  83. #dsa 512 bits 0.0010s 0.0012s 1038.1 841.5
  84. #dsa 1024 bits 0.0030s 0.0037s 329.6 269.7
  85. #dsa 2048 bits 0.0101s 0.0127s 98.9 78.6
  86. #
  87. # Same benchmark with this assembler code:
  88. #
  89. #rsa 512 bits 0.0007s 0.0001s 1416.2 16645.9
  90. #rsa 1024 bits 0.0036s 0.0002s 274.4 5380.6
  91. #rsa 2048 bits 0.0222s 0.0006s 45.1 1589.5
  92. #rsa 4096 bits 0.1469s 0.0022s 6.8 449.6
  93. #dsa 512 bits 0.0006s 0.0007s 1664.2 1376.2
  94. #dsa 1024 bits 0.0018s 0.0023s 545.0 442.2
  95. #dsa 2048 bits 0.0061s 0.0075s 163.5 132.8
  96. #
  97. # Performance increase of ~60%
  98. #
  99. # If you have comments or suggestions to improve code send
  100. # me a note at schari@us.ibm.com
  101. #
  102. $opf = shift;
  103. if ($opf =~ /32\.s/) {
  104. $BITS= 32;
  105. $BNSZ= $BITS/8;
  106. $ISA= "\"ppc\"";
  107. $LD= "lwz"; # load
  108. $LDU= "lwzu"; # load and update
  109. $ST= "stw"; # store
  110. $STU= "stwu"; # store and update
  111. $UMULL= "mullw"; # unsigned multiply low
  112. $UMULH= "mulhwu"; # unsigned multiply high
  113. $UDIV= "divwu"; # unsigned divide
  114. $UCMPI= "cmplwi"; # unsigned compare with immediate
  115. $UCMP= "cmplw"; # unsigned compare
  116. $CNTLZ= "cntlzw"; # count leading zeros
  117. $SHL= "slw"; # shift left
  118. $SHR= "srw"; # unsigned shift right
  119. $SHRI= "srwi"; # unsigned shift right by immediate
  120. $SHLI= "slwi"; # shift left by immediate
  121. $CLRU= "clrlwi"; # clear upper bits
  122. $INSR= "insrwi"; # insert right
  123. $ROTL= "rotlwi"; # rotate left by immediate
  124. $TR= "tw"; # conditional trap
  125. } elsif ($opf =~ /64\.s/) {
  126. $BITS= 64;
  127. $BNSZ= $BITS/8;
  128. $ISA= "\"ppc64\"";
  129. # same as above, but 64-bit mnemonics...
  130. $LD= "ld"; # load
  131. $LDU= "ldu"; # load and update
  132. $ST= "std"; # store
  133. $STU= "stdu"; # store and update
  134. $UMULL= "mulld"; # unsigned multiply low
  135. $UMULH= "mulhdu"; # unsigned multiply high
  136. $UDIV= "divdu"; # unsigned divide
  137. $UCMPI= "cmpldi"; # unsigned compare with immediate
  138. $UCMP= "cmpld"; # unsigned compare
  139. $CNTLZ= "cntlzd"; # count leading zeros
  140. $SHL= "sld"; # shift left
  141. $SHR= "srd"; # unsigned shift right
  142. $SHRI= "srdi"; # unsigned shift right by immediate
  143. $SHLI= "sldi"; # shift left by immediate
  144. $CLRU= "clrldi"; # clear upper bits
  145. $INSR= "insrdi"; # insert right
  146. $ROTL= "rotldi"; # rotate left by immediate
  147. $TR= "td"; # conditional trap
  148. } else { die "nonsense $opf"; }
  149. ( defined shift || open STDOUT,">$opf" ) || die "can't open $opf: $!";
  150. # function entry points from the AIX code
  151. #
  152. # There are other, more elegant, ways to handle this. We (IBM) chose
  153. # this approach as it plays well with scripts we run to 'namespace'
  154. # OpenSSL .i.e. we add a prefix to all the public symbols so we can
  155. # co-exist in the same process with other implementations of OpenSSL.
  156. # 'cleverer' ways of doing these substitutions tend to hide data we
  157. # need to be obvious.
  158. #
  159. my @items = ("bn_sqr_comba4",
  160. "bn_sqr_comba8",
  161. "bn_mul_comba4",
  162. "bn_mul_comba8",
  163. "bn_sub_words",
  164. "bn_add_words",
  165. "bn_div_words",
  166. "bn_sqr_words",
  167. "bn_mul_words",
  168. "bn_mul_add_words");
  169. if ($opf =~ /linux/) { do_linux(); }
  170. elsif ($opf =~ /aix/) { do_aix(); }
  171. elsif ($opf =~ /osx/) { do_osx(); }
  172. else { do_bsd(); }
  173. sub do_linux {
  174. $d=&data();
  175. if ($BITS==64) {
  176. foreach $t (@items) {
  177. $d =~ s/\.$t:/\
  178. \t.section\t".opd","aw"\
  179. \t.align\t3\
  180. \t.globl\t$t\
  181. $t:\
  182. \t.quad\t.$t,.TOC.\@tocbase,0\
  183. \t.size\t$t,24\
  184. \t.previous\n\
  185. \t.type\t.$t,\@function\
  186. \t.globl\t.$t\
  187. .$t:/g;
  188. }
  189. }
  190. else {
  191. foreach $t (@items) {
  192. $d=~s/\.$t/$t/g;
  193. }
  194. }
  195. # hide internal labels to avoid pollution of name table...
  196. $d=~s/Lppcasm_/.Lppcasm_/gm;
  197. print $d;
  198. }
  199. sub do_aix {
  200. # AIX assembler is smart enough to please the linker without
  201. # making us do something special...
  202. print &data();
  203. }
  204. # MacOSX 32 bit
  205. sub do_osx {
  206. $d=&data();
  207. # Change the bn symbol prefix from '.' to '_'
  208. foreach $t (@items) {
  209. $d=~s/\.$t/_$t/g;
  210. }
  211. # Change .machine to something OS X asm will accept
  212. $d=~s/\.machine.*/.text/g;
  213. $d=~s/\#/;/g; # change comment from '#' to ';'
  214. print $d;
  215. }
  216. # BSD (Untested)
  217. sub do_bsd {
  218. $d=&data();
  219. foreach $t (@items) {
  220. $d=~s/\.$t/_$t/g;
  221. }
  222. print $d;
  223. }
  224. sub data {
  225. local($data)=<<EOF;
  226. #--------------------------------------------------------------------
  227. #
  228. #
  229. #
  230. #
  231. # File: ppc32.s
  232. #
  233. # Created by: Suresh Chari
  234. # IBM Thomas J. Watson Research Library
  235. # Hawthorne, NY
  236. #
  237. #
  238. # Description: Optimized assembly routines for OpenSSL crypto
  239. # on the 32 bitPowerPC platform.
  240. #
  241. #
  242. # Version History
  243. #
  244. # 2. Fixed bn_add,bn_sub and bn_div_words, added comments,
  245. # cleaned up code. Also made a single version which can
  246. # be used for both the AIX and Linux compilers. See NOTE
  247. # below.
  248. # 12/05/03 Suresh Chari
  249. # (with lots of help from) Andy Polyakov
  250. ##
  251. # 1. Initial version 10/20/02 Suresh Chari
  252. #
  253. #
  254. # The following file works for the xlc,cc
  255. # and gcc compilers.
  256. #
  257. # NOTE: To get the file to link correctly with the gcc compiler
  258. # you have to change the names of the routines and remove
  259. # the first .(dot) character. This should automatically
  260. # be done in the build process.
  261. #
  262. # Hand optimized assembly code for the following routines
  263. #
  264. # bn_sqr_comba4
  265. # bn_sqr_comba8
  266. # bn_mul_comba4
  267. # bn_mul_comba8
  268. # bn_sub_words
  269. # bn_add_words
  270. # bn_div_words
  271. # bn_sqr_words
  272. # bn_mul_words
  273. # bn_mul_add_words
  274. #
  275. # NOTE: It is possible to optimize this code more for
  276. # specific PowerPC or Power architectures. On the Northstar
  277. # architecture the optimizations in this file do
  278. # NOT provide much improvement.
  279. #
  280. # If you have comments or suggestions to improve code send
  281. # me a note at schari\@us.ibm.com
  282. #
  283. #--------------------------------------------------------------------------
  284. #
  285. # Defines to be used in the assembly code.
  286. #
  287. .set r0,0 # we use it as storage for value of 0
  288. .set SP,1 # preserved
  289. .set RTOC,2 # preserved
  290. .set r3,3 # 1st argument/return value
  291. .set r4,4 # 2nd argument/volatile register
  292. .set r5,5 # 3rd argument/volatile register
  293. .set r6,6 # ...
  294. .set r7,7
  295. .set r8,8
  296. .set r9,9
  297. .set r10,10
  298. .set r11,11
  299. .set r12,12
  300. .set r13,13 # not used, nor any other "below" it...
  301. .set BO_IF_NOT,4
  302. .set BO_IF,12
  303. .set BO_dCTR_NZERO,16
  304. .set BO_dCTR_ZERO,18
  305. .set BO_ALWAYS,20
  306. .set CR0_LT,0;
  307. .set CR0_GT,1;
  308. .set CR0_EQ,2
  309. .set CR1_FX,4;
  310. .set CR1_FEX,5;
  311. .set CR1_VX,6
  312. .set LR,8
  313. # Declare function names to be global
  314. # NOTE: For gcc these names MUST be changed to remove
  315. # the first . i.e. for example change ".bn_sqr_comba4"
  316. # to "bn_sqr_comba4". This should be automatically done
  317. # in the build.
  318. .globl .bn_sqr_comba4
  319. .globl .bn_sqr_comba8
  320. .globl .bn_mul_comba4
  321. .globl .bn_mul_comba8
  322. .globl .bn_sub_words
  323. .globl .bn_add_words
  324. .globl .bn_div_words
  325. .globl .bn_sqr_words
  326. .globl .bn_mul_words
  327. .globl .bn_mul_add_words
  328. # .text section
  329. .machine $ISA
  330. #
  331. # NOTE: The following label name should be changed to
  332. # "bn_sqr_comba4" i.e. remove the first dot
  333. # for the gcc compiler. This should be automatically
  334. # done in the build
  335. #
  336. .align 4
  337. .bn_sqr_comba4:
  338. #
  339. # Optimized version of bn_sqr_comba4.
  340. #
  341. # void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
  342. # r3 contains r
  343. # r4 contains a
  344. #
  345. # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
  346. #
  347. # r5,r6 are the two BN_ULONGs being multiplied.
  348. # r7,r8 are the results of the 32x32 giving 64 bit multiply.
  349. # r9,r10, r11 are the equivalents of c1,c2, c3.
  350. # Here's the assembly
  351. #
  352. #
  353. xor r0,r0,r0 # set r0 = 0. Used in the addze
  354. # instructions below
  355. #sqr_add_c(a,0,c1,c2,c3)
  356. $LD r5,`0*$BNSZ`(r4)
  357. $UMULL r9,r5,r5
  358. $UMULH r10,r5,r5 #in first iteration. No need
  359. #to add since c1=c2=c3=0.
  360. # Note c3(r11) is NOT set to 0
  361. # but will be.
  362. $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
  363. # sqr_add_c2(a,1,0,c2,c3,c1);
  364. $LD r6,`1*$BNSZ`(r4)
  365. $UMULL r7,r5,r6
  366. $UMULH r8,r5,r6
  367. addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8)
  368. adde r8,r8,r8
  369. addze r9,r0 # catch carry if any.
  370. # r9= r0(=0) and carry
  371. addc r10,r7,r10 # now add to temp result.
  372. addze r11,r8 # r8 added to r11 which is 0
  373. addze r9,r9
  374. $ST r10,`1*$BNSZ`(r3) #r[1]=c2;
  375. #sqr_add_c(a,1,c3,c1,c2)
  376. $UMULL r7,r6,r6
  377. $UMULH r8,r6,r6
  378. addc r11,r7,r11
  379. adde r9,r8,r9
  380. addze r10,r0
  381. #sqr_add_c2(a,2,0,c3,c1,c2)
  382. $LD r6,`2*$BNSZ`(r4)
  383. $UMULL r7,r5,r6
  384. $UMULH r8,r5,r6
  385. addc r7,r7,r7
  386. adde r8,r8,r8
  387. addze r10,r10
  388. addc r11,r7,r11
  389. adde r9,r8,r9
  390. addze r10,r10
  391. $ST r11,`2*$BNSZ`(r3) #r[2]=c3
  392. #sqr_add_c2(a,3,0,c1,c2,c3);
  393. $LD r6,`3*$BNSZ`(r4)
  394. $UMULL r7,r5,r6
  395. $UMULH r8,r5,r6
  396. addc r7,r7,r7
  397. adde r8,r8,r8
  398. addze r11,r0
  399. addc r9,r7,r9
  400. adde r10,r8,r10
  401. addze r11,r11
  402. #sqr_add_c2(a,2,1,c1,c2,c3);
  403. $LD r5,`1*$BNSZ`(r4)
  404. $LD r6,`2*$BNSZ`(r4)
  405. $UMULL r7,r5,r6
  406. $UMULH r8,r5,r6
  407. addc r7,r7,r7
  408. adde r8,r8,r8
  409. addze r11,r11
  410. addc r9,r7,r9
  411. adde r10,r8,r10
  412. addze r11,r11
  413. $ST r9,`3*$BNSZ`(r3) #r[3]=c1
  414. #sqr_add_c(a,2,c2,c3,c1);
  415. $UMULL r7,r6,r6
  416. $UMULH r8,r6,r6
  417. addc r10,r7,r10
  418. adde r11,r8,r11
  419. addze r9,r0
  420. #sqr_add_c2(a,3,1,c2,c3,c1);
  421. $LD r6,`3*$BNSZ`(r4)
  422. $UMULL r7,r5,r6
  423. $UMULH r8,r5,r6
  424. addc r7,r7,r7
  425. adde r8,r8,r8
  426. addze r9,r9
  427. addc r10,r7,r10
  428. adde r11,r8,r11
  429. addze r9,r9
  430. $ST r10,`4*$BNSZ`(r3) #r[4]=c2
  431. #sqr_add_c2(a,3,2,c3,c1,c2);
  432. $LD r5,`2*$BNSZ`(r4)
  433. $UMULL r7,r5,r6
  434. $UMULH r8,r5,r6
  435. addc r7,r7,r7
  436. adde r8,r8,r8
  437. addze r10,r0
  438. addc r11,r7,r11
  439. adde r9,r8,r9
  440. addze r10,r10
  441. $ST r11,`5*$BNSZ`(r3) #r[5] = c3
  442. #sqr_add_c(a,3,c1,c2,c3);
  443. $UMULL r7,r6,r6
  444. $UMULH r8,r6,r6
  445. addc r9,r7,r9
  446. adde r10,r8,r10
  447. $ST r9,`6*$BNSZ`(r3) #r[6]=c1
  448. $ST r10,`7*$BNSZ`(r3) #r[7]=c2
  449. bclr BO_ALWAYS,CR0_LT
  450. .long 0x00000000
  451. #
  452. # NOTE: The following label name should be changed to
  453. # "bn_sqr_comba8" i.e. remove the first dot
  454. # for the gcc compiler. This should be automatically
  455. # done in the build
  456. #
  457. .align 4
  458. .bn_sqr_comba8:
  459. #
  460. # This is an optimized version of the bn_sqr_comba8 routine.
  461. # Tightly uses the adde instruction
  462. #
  463. #
  464. # void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
  465. # r3 contains r
  466. # r4 contains a
  467. #
  468. # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
  469. #
  470. # r5,r6 are the two BN_ULONGs being multiplied.
  471. # r7,r8 are the results of the 32x32 giving 64 bit multiply.
  472. # r9,r10, r11 are the equivalents of c1,c2, c3.
  473. #
  474. # Possible optimization of loading all 8 longs of a into registers
  475. # doesnt provide any speedup
  476. #
  477. xor r0,r0,r0 #set r0 = 0.Used in addze
  478. #instructions below.
  479. #sqr_add_c(a,0,c1,c2,c3);
  480. $LD r5,`0*$BNSZ`(r4)
  481. $UMULL r9,r5,r5 #1st iteration: no carries.
  482. $UMULH r10,r5,r5
  483. $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
  484. #sqr_add_c2(a,1,0,c2,c3,c1);
  485. $LD r6,`1*$BNSZ`(r4)
  486. $UMULL r7,r5,r6
  487. $UMULH r8,r5,r6
  488. addc r10,r7,r10 #add the two register number
  489. adde r11,r8,r0 # (r8,r7) to the three register
  490. addze r9,r0 # number (r9,r11,r10).NOTE:r0=0
  491. addc r10,r7,r10 #add the two register number
  492. adde r11,r8,r11 # (r8,r7) to the three register
  493. addze r9,r9 # number (r9,r11,r10).
  494. $ST r10,`1*$BNSZ`(r3) # r[1]=c2
  495. #sqr_add_c(a,1,c3,c1,c2);
  496. $UMULL r7,r6,r6
  497. $UMULH r8,r6,r6
  498. addc r11,r7,r11
  499. adde r9,r8,r9
  500. addze r10,r0
  501. #sqr_add_c2(a,2,0,c3,c1,c2);
  502. $LD r6,`2*$BNSZ`(r4)
  503. $UMULL r7,r5,r6
  504. $UMULH r8,r5,r6
  505. addc r11,r7,r11
  506. adde r9,r8,r9
  507. addze r10,r10
  508. addc r11,r7,r11
  509. adde r9,r8,r9
  510. addze r10,r10
  511. $ST r11,`2*$BNSZ`(r3) #r[2]=c3
  512. #sqr_add_c2(a,3,0,c1,c2,c3);
  513. $LD r6,`3*$BNSZ`(r4) #r6 = a[3]. r5 is already a[0].
  514. $UMULL r7,r5,r6
  515. $UMULH r8,r5,r6
  516. addc r9,r7,r9
  517. adde r10,r8,r10
  518. addze r11,r0
  519. addc r9,r7,r9
  520. adde r10,r8,r10
  521. addze r11,r11
  522. #sqr_add_c2(a,2,1,c1,c2,c3);
  523. $LD r5,`1*$BNSZ`(r4)
  524. $LD r6,`2*$BNSZ`(r4)
  525. $UMULL r7,r5,r6
  526. $UMULH r8,r5,r6
  527. addc r9,r7,r9
  528. adde r10,r8,r10
  529. addze r11,r11
  530. addc r9,r7,r9
  531. adde r10,r8,r10
  532. addze r11,r11
  533. $ST r9,`3*$BNSZ`(r3) #r[3]=c1;
  534. #sqr_add_c(a,2,c2,c3,c1);
  535. $UMULL r7,r6,r6
  536. $UMULH r8,r6,r6
  537. addc r10,r7,r10
  538. adde r11,r8,r11
  539. addze r9,r0
  540. #sqr_add_c2(a,3,1,c2,c3,c1);
  541. $LD r6,`3*$BNSZ`(r4)
  542. $UMULL r7,r5,r6
  543. $UMULH r8,r5,r6
  544. addc r10,r7,r10
  545. adde r11,r8,r11
  546. addze r9,r9
  547. addc r10,r7,r10
  548. adde r11,r8,r11
  549. addze r9,r9
  550. #sqr_add_c2(a,4,0,c2,c3,c1);
  551. $LD r5,`0*$BNSZ`(r4)
  552. $LD r6,`4*$BNSZ`(r4)
  553. $UMULL r7,r5,r6
  554. $UMULH r8,r5,r6
  555. addc r10,r7,r10
  556. adde r11,r8,r11
  557. addze r9,r9
  558. addc r10,r7,r10
  559. adde r11,r8,r11
  560. addze r9,r9
  561. $ST r10,`4*$BNSZ`(r3) #r[4]=c2;
  562. #sqr_add_c2(a,5,0,c3,c1,c2);
  563. $LD r6,`5*$BNSZ`(r4)
  564. $UMULL r7,r5,r6
  565. $UMULH r8,r5,r6
  566. addc r11,r7,r11
  567. adde r9,r8,r9
  568. addze r10,r0
  569. addc r11,r7,r11
  570. adde r9,r8,r9
  571. addze r10,r10
  572. #sqr_add_c2(a,4,1,c3,c1,c2);
  573. $LD r5,`1*$BNSZ`(r4)
  574. $LD r6,`4*$BNSZ`(r4)
  575. $UMULL r7,r5,r6
  576. $UMULH r8,r5,r6
  577. addc r11,r7,r11
  578. adde r9,r8,r9
  579. addze r10,r10
  580. addc r11,r7,r11
  581. adde r9,r8,r9
  582. addze r10,r10
  583. #sqr_add_c2(a,3,2,c3,c1,c2);
  584. $LD r5,`2*$BNSZ`(r4)
  585. $LD r6,`3*$BNSZ`(r4)
  586. $UMULL r7,r5,r6
  587. $UMULH r8,r5,r6
  588. addc r11,r7,r11
  589. adde r9,r8,r9
  590. addze r10,r10
  591. addc r11,r7,r11
  592. adde r9,r8,r9
  593. addze r10,r10
  594. $ST r11,`5*$BNSZ`(r3) #r[5]=c3;
  595. #sqr_add_c(a,3,c1,c2,c3);
  596. $UMULL r7,r6,r6
  597. $UMULH r8,r6,r6
  598. addc r9,r7,r9
  599. adde r10,r8,r10
  600. addze r11,r0
  601. #sqr_add_c2(a,4,2,c1,c2,c3);
  602. $LD r6,`4*$BNSZ`(r4)
  603. $UMULL r7,r5,r6
  604. $UMULH r8,r5,r6
  605. addc r9,r7,r9
  606. adde r10,r8,r10
  607. addze r11,r11
  608. addc r9,r7,r9
  609. adde r10,r8,r10
  610. addze r11,r11
  611. #sqr_add_c2(a,5,1,c1,c2,c3);
  612. $LD r5,`1*$BNSZ`(r4)
  613. $LD r6,`5*$BNSZ`(r4)
  614. $UMULL r7,r5,r6
  615. $UMULH r8,r5,r6
  616. addc r9,r7,r9
  617. adde r10,r8,r10
  618. addze r11,r11
  619. addc r9,r7,r9
  620. adde r10,r8,r10
  621. addze r11,r11
  622. #sqr_add_c2(a,6,0,c1,c2,c3);
  623. $LD r5,`0*$BNSZ`(r4)
  624. $LD r6,`6*$BNSZ`(r4)
  625. $UMULL r7,r5,r6
  626. $UMULH r8,r5,r6
  627. addc r9,r7,r9
  628. adde r10,r8,r10
  629. addze r11,r11
  630. addc r9,r7,r9
  631. adde r10,r8,r10
  632. addze r11,r11
  633. $ST r9,`6*$BNSZ`(r3) #r[6]=c1;
  634. #sqr_add_c2(a,7,0,c2,c3,c1);
  635. $LD r6,`7*$BNSZ`(r4)
  636. $UMULL r7,r5,r6
  637. $UMULH r8,r5,r6
  638. addc r10,r7,r10
  639. adde r11,r8,r11
  640. addze r9,r0
  641. addc r10,r7,r10
  642. adde r11,r8,r11
  643. addze r9,r9
  644. #sqr_add_c2(a,6,1,c2,c3,c1);
  645. $LD r5,`1*$BNSZ`(r4)
  646. $LD r6,`6*$BNSZ`(r4)
  647. $UMULL r7,r5,r6
  648. $UMULH r8,r5,r6
  649. addc r10,r7,r10
  650. adde r11,r8,r11
  651. addze r9,r9
  652. addc r10,r7,r10
  653. adde r11,r8,r11
  654. addze r9,r9
  655. #sqr_add_c2(a,5,2,c2,c3,c1);
  656. $LD r5,`2*$BNSZ`(r4)
  657. $LD r6,`5*$BNSZ`(r4)
  658. $UMULL r7,r5,r6
  659. $UMULH r8,r5,r6
  660. addc r10,r7,r10
  661. adde r11,r8,r11
  662. addze r9,r9
  663. addc r10,r7,r10
  664. adde r11,r8,r11
  665. addze r9,r9
  666. #sqr_add_c2(a,4,3,c2,c3,c1);
  667. $LD r5,`3*$BNSZ`(r4)
  668. $LD r6,`4*$BNSZ`(r4)
  669. $UMULL r7,r5,r6
  670. $UMULH r8,r5,r6
  671. addc r10,r7,r10
  672. adde r11,r8,r11
  673. addze r9,r9
  674. addc r10,r7,r10
  675. adde r11,r8,r11
  676. addze r9,r9
  677. $ST r10,`7*$BNSZ`(r3) #r[7]=c2;
  678. #sqr_add_c(a,4,c3,c1,c2);
  679. $UMULL r7,r6,r6
  680. $UMULH r8,r6,r6
  681. addc r11,r7,r11
  682. adde r9,r8,r9
  683. addze r10,r0
  684. #sqr_add_c2(a,5,3,c3,c1,c2);
  685. $LD r6,`5*$BNSZ`(r4)
  686. $UMULL r7,r5,r6
  687. $UMULH r8,r5,r6
  688. addc r11,r7,r11
  689. adde r9,r8,r9
  690. addze r10,r10
  691. addc r11,r7,r11
  692. adde r9,r8,r9
  693. addze r10,r10
  694. #sqr_add_c2(a,6,2,c3,c1,c2);
  695. $LD r5,`2*$BNSZ`(r4)
  696. $LD r6,`6*$BNSZ`(r4)
  697. $UMULL r7,r5,r6
  698. $UMULH r8,r5,r6
  699. addc r11,r7,r11
  700. adde r9,r8,r9
  701. addze r10,r10
  702. addc r11,r7,r11
  703. adde r9,r8,r9
  704. addze r10,r10
  705. #sqr_add_c2(a,7,1,c3,c1,c2);
  706. $LD r5,`1*$BNSZ`(r4)
  707. $LD r6,`7*$BNSZ`(r4)
  708. $UMULL r7,r5,r6
  709. $UMULH r8,r5,r6
  710. addc r11,r7,r11
  711. adde r9,r8,r9
  712. addze r10,r10
  713. addc r11,r7,r11
  714. adde r9,r8,r9
  715. addze r10,r10
  716. $ST r11,`8*$BNSZ`(r3) #r[8]=c3;
  717. #sqr_add_c2(a,7,2,c1,c2,c3);
  718. $LD r5,`2*$BNSZ`(r4)
  719. $UMULL r7,r5,r6
  720. $UMULH r8,r5,r6
  721. addc r9,r7,r9
  722. adde r10,r8,r10
  723. addze r11,r0
  724. addc r9,r7,r9
  725. adde r10,r8,r10
  726. addze r11,r11
  727. #sqr_add_c2(a,6,3,c1,c2,c3);
  728. $LD r5,`3*$BNSZ`(r4)
  729. $LD r6,`6*$BNSZ`(r4)
  730. $UMULL r7,r5,r6
  731. $UMULH r8,r5,r6
  732. addc r9,r7,r9
  733. adde r10,r8,r10
  734. addze r11,r11
  735. addc r9,r7,r9
  736. adde r10,r8,r10
  737. addze r11,r11
  738. #sqr_add_c2(a,5,4,c1,c2,c3);
  739. $LD r5,`4*$BNSZ`(r4)
  740. $LD r6,`5*$BNSZ`(r4)
  741. $UMULL r7,r5,r6
  742. $UMULH r8,r5,r6
  743. addc r9,r7,r9
  744. adde r10,r8,r10
  745. addze r11,r11
  746. addc r9,r7,r9
  747. adde r10,r8,r10
  748. addze r11,r11
  749. $ST r9,`9*$BNSZ`(r3) #r[9]=c1;
  750. #sqr_add_c(a,5,c2,c3,c1);
  751. $UMULL r7,r6,r6
  752. $UMULH r8,r6,r6
  753. addc r10,r7,r10
  754. adde r11,r8,r11
  755. addze r9,r0
  756. #sqr_add_c2(a,6,4,c2,c3,c1);
  757. $LD r6,`6*$BNSZ`(r4)
  758. $UMULL r7,r5,r6
  759. $UMULH r8,r5,r6
  760. addc r10,r7,r10
  761. adde r11,r8,r11
  762. addze r9,r9
  763. addc r10,r7,r10
  764. adde r11,r8,r11
  765. addze r9,r9
  766. #sqr_add_c2(a,7,3,c2,c3,c1);
  767. $LD r5,`3*$BNSZ`(r4)
  768. $LD r6,`7*$BNSZ`(r4)
  769. $UMULL r7,r5,r6
  770. $UMULH r8,r5,r6
  771. addc r10,r7,r10
  772. adde r11,r8,r11
  773. addze r9,r9
  774. addc r10,r7,r10
  775. adde r11,r8,r11
  776. addze r9,r9
  777. $ST r10,`10*$BNSZ`(r3) #r[10]=c2;
  778. #sqr_add_c2(a,7,4,c3,c1,c2);
  779. $LD r5,`4*$BNSZ`(r4)
  780. $UMULL r7,r5,r6
  781. $UMULH r8,r5,r6
  782. addc r11,r7,r11
  783. adde r9,r8,r9
  784. addze r10,r0
  785. addc r11,r7,r11
  786. adde r9,r8,r9
  787. addze r10,r10
  788. #sqr_add_c2(a,6,5,c3,c1,c2);
  789. $LD r5,`5*$BNSZ`(r4)
  790. $LD r6,`6*$BNSZ`(r4)
  791. $UMULL r7,r5,r6
  792. $UMULH r8,r5,r6
  793. addc r11,r7,r11
  794. adde r9,r8,r9
  795. addze r10,r10
  796. addc r11,r7,r11
  797. adde r9,r8,r9
  798. addze r10,r10
  799. $ST r11,`11*$BNSZ`(r3) #r[11]=c3;
  800. #sqr_add_c(a,6,c1,c2,c3);
  801. $UMULL r7,r6,r6
  802. $UMULH r8,r6,r6
  803. addc r9,r7,r9
  804. adde r10,r8,r10
  805. addze r11,r0
  806. #sqr_add_c2(a,7,5,c1,c2,c3)
  807. $LD r6,`7*$BNSZ`(r4)
  808. $UMULL r7,r5,r6
  809. $UMULH r8,r5,r6
  810. addc r9,r7,r9
  811. adde r10,r8,r10
  812. addze r11,r11
  813. addc r9,r7,r9
  814. adde r10,r8,r10
  815. addze r11,r11
  816. $ST r9,`12*$BNSZ`(r3) #r[12]=c1;
  817. #sqr_add_c2(a,7,6,c2,c3,c1)
  818. $LD r5,`6*$BNSZ`(r4)
  819. $UMULL r7,r5,r6
  820. $UMULH r8,r5,r6
  821. addc r10,r7,r10
  822. adde r11,r8,r11
  823. addze r9,r0
  824. addc r10,r7,r10
  825. adde r11,r8,r11
  826. addze r9,r9
  827. $ST r10,`13*$BNSZ`(r3) #r[13]=c2;
  828. #sqr_add_c(a,7,c3,c1,c2);
  829. $UMULL r7,r6,r6
  830. $UMULH r8,r6,r6
  831. addc r11,r7,r11
  832. adde r9,r8,r9
  833. $ST r11,`14*$BNSZ`(r3) #r[14]=c3;
  834. $ST r9, `15*$BNSZ`(r3) #r[15]=c1;
  835. bclr BO_ALWAYS,CR0_LT
  836. .long 0x00000000
  837. #
  838. # NOTE: The following label name should be changed to
  839. # "bn_mul_comba4" i.e. remove the first dot
  840. # for the gcc compiler. This should be automatically
  841. # done in the build
  842. #
  843. .align 4
  844. .bn_mul_comba4:
  845. #
  846. # This is an optimized version of the bn_mul_comba4 routine.
  847. #
  848. # void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
  849. # r3 contains r
  850. # r4 contains a
  851. # r5 contains b
  852. # r6, r7 are the 2 BN_ULONGs being multiplied.
  853. # r8, r9 are the results of the 32x32 giving 64 multiply.
  854. # r10, r11, r12 are the equivalents of c1, c2, and c3.
  855. #
  856. xor r0,r0,r0 #r0=0. Used in addze below.
  857. #mul_add_c(a[0],b[0],c1,c2,c3);
  858. $LD r6,`0*$BNSZ`(r4)
  859. $LD r7,`0*$BNSZ`(r5)
  860. $UMULL r10,r6,r7
  861. $UMULH r11,r6,r7
  862. $ST r10,`0*$BNSZ`(r3) #r[0]=c1
  863. #mul_add_c(a[0],b[1],c2,c3,c1);
  864. $LD r7,`1*$BNSZ`(r5)
  865. $UMULL r8,r6,r7
  866. $UMULH r9,r6,r7
  867. addc r11,r8,r11
  868. adde r12,r9,r0
  869. addze r10,r0
  870. #mul_add_c(a[1],b[0],c2,c3,c1);
  871. $LD r6, `1*$BNSZ`(r4)
  872. $LD r7, `0*$BNSZ`(r5)
  873. $UMULL r8,r6,r7
  874. $UMULH r9,r6,r7
  875. addc r11,r8,r11
  876. adde r12,r9,r12
  877. addze r10,r10
  878. $ST r11,`1*$BNSZ`(r3) #r[1]=c2
  879. #mul_add_c(a[2],b[0],c3,c1,c2);
  880. $LD r6,`2*$BNSZ`(r4)
  881. $UMULL r8,r6,r7
  882. $UMULH r9,r6,r7
  883. addc r12,r8,r12
  884. adde r10,r9,r10
  885. addze r11,r0
  886. #mul_add_c(a[1],b[1],c3,c1,c2);
  887. $LD r6,`1*$BNSZ`(r4)
  888. $LD r7,`1*$BNSZ`(r5)
  889. $UMULL r8,r6,r7
  890. $UMULH r9,r6,r7
  891. addc r12,r8,r12
  892. adde r10,r9,r10
  893. addze r11,r11
  894. #mul_add_c(a[0],b[2],c3,c1,c2);
  895. $LD r6,`0*$BNSZ`(r4)
  896. $LD r7,`2*$BNSZ`(r5)
  897. $UMULL r8,r6,r7
  898. $UMULH r9,r6,r7
  899. addc r12,r8,r12
  900. adde r10,r9,r10
  901. addze r11,r11
  902. $ST r12,`2*$BNSZ`(r3) #r[2]=c3
  903. #mul_add_c(a[0],b[3],c1,c2,c3);
  904. $LD r7,`3*$BNSZ`(r5)
  905. $UMULL r8,r6,r7
  906. $UMULH r9,r6,r7
  907. addc r10,r8,r10
  908. adde r11,r9,r11
  909. addze r12,r0
  910. #mul_add_c(a[1],b[2],c1,c2,c3);
  911. $LD r6,`1*$BNSZ`(r4)
  912. $LD r7,`2*$BNSZ`(r5)
  913. $UMULL r8,r6,r7
  914. $UMULH r9,r6,r7
  915. addc r10,r8,r10
  916. adde r11,r9,r11
  917. addze r12,r12
  918. #mul_add_c(a[2],b[1],c1,c2,c3);
  919. $LD r6,`2*$BNSZ`(r4)
  920. $LD r7,`1*$BNSZ`(r5)
  921. $UMULL r8,r6,r7
  922. $UMULH r9,r6,r7
  923. addc r10,r8,r10
  924. adde r11,r9,r11
  925. addze r12,r12
  926. #mul_add_c(a[3],b[0],c1,c2,c3);
  927. $LD r6,`3*$BNSZ`(r4)
  928. $LD r7,`0*$BNSZ`(r5)
  929. $UMULL r8,r6,r7
  930. $UMULH r9,r6,r7
  931. addc r10,r8,r10
  932. adde r11,r9,r11
  933. addze r12,r12
  934. $ST r10,`3*$BNSZ`(r3) #r[3]=c1
  935. #mul_add_c(a[3],b[1],c2,c3,c1);
  936. $LD r7,`1*$BNSZ`(r5)
  937. $UMULL r8,r6,r7
  938. $UMULH r9,r6,r7
  939. addc r11,r8,r11
  940. adde r12,r9,r12
  941. addze r10,r0
  942. #mul_add_c(a[2],b[2],c2,c3,c1);
  943. $LD r6,`2*$BNSZ`(r4)
  944. $LD r7,`2*$BNSZ`(r5)
  945. $UMULL r8,r6,r7
  946. $UMULH r9,r6,r7
  947. addc r11,r8,r11
  948. adde r12,r9,r12
  949. addze r10,r10
  950. #mul_add_c(a[1],b[3],c2,c3,c1);
  951. $LD r6,`1*$BNSZ`(r4)
  952. $LD r7,`3*$BNSZ`(r5)
  953. $UMULL r8,r6,r7
  954. $UMULH r9,r6,r7
  955. addc r11,r8,r11
  956. adde r12,r9,r12
  957. addze r10,r10
  958. $ST r11,`4*$BNSZ`(r3) #r[4]=c2
  959. #mul_add_c(a[2],b[3],c3,c1,c2);
  960. $LD r6,`2*$BNSZ`(r4)
  961. $UMULL r8,r6,r7
  962. $UMULH r9,r6,r7
  963. addc r12,r8,r12
  964. adde r10,r9,r10
  965. addze r11,r0
  966. #mul_add_c(a[3],b[2],c3,c1,c2);
  967. $LD r6,`3*$BNSZ`(r4)
  968. $LD r7,`2*$BNSZ`(r4)
  969. $UMULL r8,r6,r7
  970. $UMULH r9,r6,r7
  971. addc r12,r8,r12
  972. adde r10,r9,r10
  973. addze r11,r11
  974. $ST r12,`5*$BNSZ`(r3) #r[5]=c3
  975. #mul_add_c(a[3],b[3],c1,c2,c3);
  976. $LD r7,`3*$BNSZ`(r5)
  977. $UMULL r8,r6,r7
  978. $UMULH r9,r6,r7
  979. addc r10,r8,r10
  980. adde r11,r9,r11
  981. $ST r10,`6*$BNSZ`(r3) #r[6]=c1
  982. $ST r11,`7*$BNSZ`(r3) #r[7]=c2
  983. bclr BO_ALWAYS,CR0_LT
  984. .long 0x00000000
  985. #
  986. # NOTE: The following label name should be changed to
  987. # "bn_mul_comba8" i.e. remove the first dot
  988. # for the gcc compiler. This should be automatically
  989. # done in the build
  990. #
  991. .align 4
  992. .bn_mul_comba8:
  993. #
  994. # Optimized version of the bn_mul_comba8 routine.
  995. #
  996. # void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
  997. # r3 contains r
  998. # r4 contains a
  999. # r5 contains b
  1000. # r6, r7 are the 2 BN_ULONGs being multiplied.
  1001. # r8, r9 are the results of the 32x32 giving 64 multiply.
  1002. # r10, r11, r12 are the equivalents of c1, c2, and c3.
  1003. #
  1004. xor r0,r0,r0 #r0=0. Used in addze below.
  1005. #mul_add_c(a[0],b[0],c1,c2,c3);
  1006. $LD r6,`0*$BNSZ`(r4) #a[0]
  1007. $LD r7,`0*$BNSZ`(r5) #b[0]
  1008. $UMULL r10,r6,r7
  1009. $UMULH r11,r6,r7
  1010. $ST r10,`0*$BNSZ`(r3) #r[0]=c1;
  1011. #mul_add_c(a[0],b[1],c2,c3,c1);
  1012. $LD r7,`1*$BNSZ`(r5)
  1013. $UMULL r8,r6,r7
  1014. $UMULH r9,r6,r7
  1015. addc r11,r11,r8
  1016. addze r12,r9 # since we didnt set r12 to zero before.
  1017. addze r10,r0
  1018. #mul_add_c(a[1],b[0],c2,c3,c1);
  1019. $LD r6,`1*$BNSZ`(r4)
  1020. $LD r7,`0*$BNSZ`(r5)
  1021. $UMULL r8,r6,r7
  1022. $UMULH r9,r6,r7
  1023. addc r11,r11,r8
  1024. adde r12,r12,r9
  1025. addze r10,r10
  1026. $ST r11,`1*$BNSZ`(r3) #r[1]=c2;
  1027. #mul_add_c(a[2],b[0],c3,c1,c2);
  1028. $LD r6,`2*$BNSZ`(r4)
  1029. $UMULL r8,r6,r7
  1030. $UMULH r9,r6,r7
  1031. addc r12,r12,r8
  1032. adde r10,r10,r9
  1033. addze r11,r0
  1034. #mul_add_c(a[1],b[1],c3,c1,c2);
  1035. $LD r6,`1*$BNSZ`(r4)
  1036. $LD r7,`1*$BNSZ`(r5)
  1037. $UMULL r8,r6,r7
  1038. $UMULH r9,r6,r7
  1039. addc r12,r12,r8
  1040. adde r10,r10,r9
  1041. addze r11,r11
  1042. #mul_add_c(a[0],b[2],c3,c1,c2);
  1043. $LD r6,`0*$BNSZ`(r4)
  1044. $LD r7,`2*$BNSZ`(r5)
  1045. $UMULL r8,r6,r7
  1046. $UMULH r9,r6,r7
  1047. addc r12,r12,r8
  1048. adde r10,r10,r9
  1049. addze r11,r11
  1050. $ST r12,`2*$BNSZ`(r3) #r[2]=c3;
  1051. #mul_add_c(a[0],b[3],c1,c2,c3);
  1052. $LD r7,`3*$BNSZ`(r5)
  1053. $UMULL r8,r6,r7
  1054. $UMULH r9,r6,r7
  1055. addc r10,r10,r8
  1056. adde r11,r11,r9
  1057. addze r12,r0
  1058. #mul_add_c(a[1],b[2],c1,c2,c3);
  1059. $LD r6,`1*$BNSZ`(r4)
  1060. $LD r7,`2*$BNSZ`(r5)
  1061. $UMULL r8,r6,r7
  1062. $UMULH r9,r6,r7
  1063. addc r10,r10,r8
  1064. adde r11,r11,r9
  1065. addze r12,r12
  1066. #mul_add_c(a[2],b[1],c1,c2,c3);
  1067. $LD r6,`2*$BNSZ`(r4)
  1068. $LD r7,`1*$BNSZ`(r5)
  1069. $UMULL r8,r6,r7
  1070. $UMULH r9,r6,r7
  1071. addc r10,r10,r8
  1072. adde r11,r11,r9
  1073. addze r12,r12
  1074. #mul_add_c(a[3],b[0],c1,c2,c3);
  1075. $LD r6,`3*$BNSZ`(r4)
  1076. $LD r7,`0*$BNSZ`(r5)
  1077. $UMULL r8,r6,r7
  1078. $UMULH r9,r6,r7
  1079. addc r10,r10,r8
  1080. adde r11,r11,r9
  1081. addze r12,r12
  1082. $ST r10,`3*$BNSZ`(r3) #r[3]=c1;
  1083. #mul_add_c(a[4],b[0],c2,c3,c1);
  1084. $LD r6,`4*$BNSZ`(r4)
  1085. $UMULL r8,r6,r7
  1086. $UMULH r9,r6,r7
  1087. addc r11,r11,r8
  1088. adde r12,r12,r9
  1089. addze r10,r0
  1090. #mul_add_c(a[3],b[1],c2,c3,c1);
  1091. $LD r6,`3*$BNSZ`(r4)
  1092. $LD r7,`1*$BNSZ`(r5)
  1093. $UMULL r8,r6,r7
  1094. $UMULH r9,r6,r7
  1095. addc r11,r11,r8
  1096. adde r12,r12,r9
  1097. addze r10,r10
  1098. #mul_add_c(a[2],b[2],c2,c3,c1);
  1099. $LD r6,`2*$BNSZ`(r4)
  1100. $LD r7,`2*$BNSZ`(r5)
  1101. $UMULL r8,r6,r7
  1102. $UMULH r9,r6,r7
  1103. addc r11,r11,r8
  1104. adde r12,r12,r9
  1105. addze r10,r10
  1106. #mul_add_c(a[1],b[3],c2,c3,c1);
  1107. $LD r6,`1*$BNSZ`(r4)
  1108. $LD r7,`3*$BNSZ`(r5)
  1109. $UMULL r8,r6,r7
  1110. $UMULH r9,r6,r7
  1111. addc r11,r11,r8
  1112. adde r12,r12,r9
  1113. addze r10,r10
  1114. #mul_add_c(a[0],b[4],c2,c3,c1);
  1115. $LD r6,`0*$BNSZ`(r4)
  1116. $LD r7,`4*$BNSZ`(r5)
  1117. $UMULL r8,r6,r7
  1118. $UMULH r9,r6,r7
  1119. addc r11,r11,r8
  1120. adde r12,r12,r9
  1121. addze r10,r10
  1122. $ST r11,`4*$BNSZ`(r3) #r[4]=c2;
  1123. #mul_add_c(a[0],b[5],c3,c1,c2);
  1124. $LD r7,`5*$BNSZ`(r5)
  1125. $UMULL r8,r6,r7
  1126. $UMULH r9,r6,r7
  1127. addc r12,r12,r8
  1128. adde r10,r10,r9
  1129. addze r11,r0
  1130. #mul_add_c(a[1],b[4],c3,c1,c2);
  1131. $LD r6,`1*$BNSZ`(r4)
  1132. $LD r7,`4*$BNSZ`(r5)
  1133. $UMULL r8,r6,r7
  1134. $UMULH r9,r6,r7
  1135. addc r12,r12,r8
  1136. adde r10,r10,r9
  1137. addze r11,r11
  1138. #mul_add_c(a[2],b[3],c3,c1,c2);
  1139. $LD r6,`2*$BNSZ`(r4)
  1140. $LD r7,`3*$BNSZ`(r5)
  1141. $UMULL r8,r6,r7
  1142. $UMULH r9,r6,r7
  1143. addc r12,r12,r8
  1144. adde r10,r10,r9
  1145. addze r11,r11
  1146. #mul_add_c(a[3],b[2],c3,c1,c2);
  1147. $LD r6,`3*$BNSZ`(r4)
  1148. $LD r7,`2*$BNSZ`(r5)
  1149. $UMULL r8,r6,r7
  1150. $UMULH r9,r6,r7
  1151. addc r12,r12,r8
  1152. adde r10,r10,r9
  1153. addze r11,r11
  1154. #mul_add_c(a[4],b[1],c3,c1,c2);
  1155. $LD r6,`4*$BNSZ`(r4)
  1156. $LD r7,`1*$BNSZ`(r5)
  1157. $UMULL r8,r6,r7
  1158. $UMULH r9,r6,r7
  1159. addc r12,r12,r8
  1160. adde r10,r10,r9
  1161. addze r11,r11
  1162. #mul_add_c(a[5],b[0],c3,c1,c2);
  1163. $LD r6,`5*$BNSZ`(r4)
  1164. $LD r7,`0*$BNSZ`(r5)
  1165. $UMULL r8,r6,r7
  1166. $UMULH r9,r6,r7
  1167. addc r12,r12,r8
  1168. adde r10,r10,r9
  1169. addze r11,r11
  1170. $ST r12,`5*$BNSZ`(r3) #r[5]=c3;
  1171. #mul_add_c(a[6],b[0],c1,c2,c3);
  1172. $LD r6,`6*$BNSZ`(r4)
  1173. $UMULL r8,r6,r7
  1174. $UMULH r9,r6,r7
  1175. addc r10,r10,r8
  1176. adde r11,r11,r9
  1177. addze r12,r0
  1178. #mul_add_c(a[5],b[1],c1,c2,c3);
  1179. $LD r6,`5*$BNSZ`(r4)
  1180. $LD r7,`1*$BNSZ`(r5)
  1181. $UMULL r8,r6,r7
  1182. $UMULH r9,r6,r7
  1183. addc r10,r10,r8
  1184. adde r11,r11,r9
  1185. addze r12,r12
  1186. #mul_add_c(a[4],b[2],c1,c2,c3);
  1187. $LD r6,`4*$BNSZ`(r4)
  1188. $LD r7,`2*$BNSZ`(r5)
  1189. $UMULL r8,r6,r7
  1190. $UMULH r9,r6,r7
  1191. addc r10,r10,r8
  1192. adde r11,r11,r9
  1193. addze r12,r12
  1194. #mul_add_c(a[3],b[3],c1,c2,c3);
  1195. $LD r6,`3*$BNSZ`(r4)
  1196. $LD r7,`3*$BNSZ`(r5)
  1197. $UMULL r8,r6,r7
  1198. $UMULH r9,r6,r7
  1199. addc r10,r10,r8
  1200. adde r11,r11,r9
  1201. addze r12,r12
  1202. #mul_add_c(a[2],b[4],c1,c2,c3);
  1203. $LD r6,`2*$BNSZ`(r4)
  1204. $LD r7,`4*$BNSZ`(r5)
  1205. $UMULL r8,r6,r7
  1206. $UMULH r9,r6,r7
  1207. addc r10,r10,r8
  1208. adde r11,r11,r9
  1209. addze r12,r12
  1210. #mul_add_c(a[1],b[5],c1,c2,c3);
  1211. $LD r6,`1*$BNSZ`(r4)
  1212. $LD r7,`5*$BNSZ`(r5)
  1213. $UMULL r8,r6,r7
  1214. $UMULH r9,r6,r7
  1215. addc r10,r10,r8
  1216. adde r11,r11,r9
  1217. addze r12,r12
  1218. #mul_add_c(a[0],b[6],c1,c2,c3);
  1219. $LD r6,`0*$BNSZ`(r4)
  1220. $LD r7,`6*$BNSZ`(r5)
  1221. $UMULL r8,r6,r7
  1222. $UMULH r9,r6,r7
  1223. addc r10,r10,r8
  1224. adde r11,r11,r9
  1225. addze r12,r12
  1226. $ST r10,`6*$BNSZ`(r3) #r[6]=c1;
  1227. #mul_add_c(a[0],b[7],c2,c3,c1);
  1228. $LD r7,`7*$BNSZ`(r5)
  1229. $UMULL r8,r6,r7
  1230. $UMULH r9,r6,r7
  1231. addc r11,r11,r8
  1232. adde r12,r12,r9
  1233. addze r10,r0
  1234. #mul_add_c(a[1],b[6],c2,c3,c1);
  1235. $LD r6,`1*$BNSZ`(r4)
  1236. $LD r7,`6*$BNSZ`(r5)
  1237. $UMULL r8,r6,r7
  1238. $UMULH r9,r6,r7
  1239. addc r11,r11,r8
  1240. adde r12,r12,r9
  1241. addze r10,r10
  1242. #mul_add_c(a[2],b[5],c2,c3,c1);
  1243. $LD r6,`2*$BNSZ`(r4)
  1244. $LD r7,`5*$BNSZ`(r5)
  1245. $UMULL r8,r6,r7
  1246. $UMULH r9,r6,r7
  1247. addc r11,r11,r8
  1248. adde r12,r12,r9
  1249. addze r10,r10
  1250. #mul_add_c(a[3],b[4],c2,c3,c1);
  1251. $LD r6,`3*$BNSZ`(r4)
  1252. $LD r7,`4*$BNSZ`(r5)
  1253. $UMULL r8,r6,r7
  1254. $UMULH r9,r6,r7
  1255. addc r11,r11,r8
  1256. adde r12,r12,r9
  1257. addze r10,r10
  1258. #mul_add_c(a[4],b[3],c2,c3,c1);
  1259. $LD r6,`4*$BNSZ`(r4)
  1260. $LD r7,`3*$BNSZ`(r5)
  1261. $UMULL r8,r6,r7
  1262. $UMULH r9,r6,r7
  1263. addc r11,r11,r8
  1264. adde r12,r12,r9
  1265. addze r10,r10
  1266. #mul_add_c(a[5],b[2],c2,c3,c1);
  1267. $LD r6,`5*$BNSZ`(r4)
  1268. $LD r7,`2*$BNSZ`(r5)
  1269. $UMULL r8,r6,r7
  1270. $UMULH r9,r6,r7
  1271. addc r11,r11,r8
  1272. adde r12,r12,r9
  1273. addze r10,r10
  1274. #mul_add_c(a[6],b[1],c2,c3,c1);
  1275. $LD r6,`6*$BNSZ`(r4)
  1276. $LD r7,`1*$BNSZ`(r5)
  1277. $UMULL r8,r6,r7
  1278. $UMULH r9,r6,r7
  1279. addc r11,r11,r8
  1280. adde r12,r12,r9
  1281. addze r10,r10
  1282. #mul_add_c(a[7],b[0],c2,c3,c1);
  1283. $LD r6,`7*$BNSZ`(r4)
  1284. $LD r7,`0*$BNSZ`(r5)
  1285. $UMULL r8,r6,r7
  1286. $UMULH r9,r6,r7
  1287. addc r11,r11,r8
  1288. adde r12,r12,r9
  1289. addze r10,r10
  1290. $ST r11,`7*$BNSZ`(r3) #r[7]=c2;
  1291. #mul_add_c(a[7],b[1],c3,c1,c2);
  1292. $LD r7,`1*$BNSZ`(r5)
  1293. $UMULL r8,r6,r7
  1294. $UMULH r9,r6,r7
  1295. addc r12,r12,r8
  1296. adde r10,r10,r9
  1297. addze r11,r0
  1298. #mul_add_c(a[6],b[2],c3,c1,c2);
  1299. $LD r6,`6*$BNSZ`(r4)
  1300. $LD r7,`2*$BNSZ`(r5)
  1301. $UMULL r8,r6,r7
  1302. $UMULH r9,r6,r7
  1303. addc r12,r12,r8
  1304. adde r10,r10,r9
  1305. addze r11,r11
  1306. #mul_add_c(a[5],b[3],c3,c1,c2);
  1307. $LD r6,`5*$BNSZ`(r4)
  1308. $LD r7,`3*$BNSZ`(r5)
  1309. $UMULL r8,r6,r7
  1310. $UMULH r9,r6,r7
  1311. addc r12,r12,r8
  1312. adde r10,r10,r9
  1313. addze r11,r11
  1314. #mul_add_c(a[4],b[4],c3,c1,c2);
  1315. $LD r6,`4*$BNSZ`(r4)
  1316. $LD r7,`4*$BNSZ`(r5)
  1317. $UMULL r8,r6,r7
  1318. $UMULH r9,r6,r7
  1319. addc r12,r12,r8
  1320. adde r10,r10,r9
  1321. addze r11,r11
  1322. #mul_add_c(a[3],b[5],c3,c1,c2);
  1323. $LD r6,`3*$BNSZ`(r4)
  1324. $LD r7,`5*$BNSZ`(r5)
  1325. $UMULL r8,r6,r7
  1326. $UMULH r9,r6,r7
  1327. addc r12,r12,r8
  1328. adde r10,r10,r9
  1329. addze r11,r11
  1330. #mul_add_c(a[2],b[6],c3,c1,c2);
  1331. $LD r6,`2*$BNSZ`(r4)
  1332. $LD r7,`6*$BNSZ`(r5)
  1333. $UMULL r8,r6,r7
  1334. $UMULH r9,r6,r7
  1335. addc r12,r12,r8
  1336. adde r10,r10,r9
  1337. addze r11,r11
  1338. #mul_add_c(a[1],b[7],c3,c1,c2);
  1339. $LD r6,`1*$BNSZ`(r4)
  1340. $LD r7,`7*$BNSZ`(r5)
  1341. $UMULL r8,r6,r7
  1342. $UMULH r9,r6,r7
  1343. addc r12,r12,r8
  1344. adde r10,r10,r9
  1345. addze r11,r11
  1346. $ST r12,`8*$BNSZ`(r3) #r[8]=c3;
  1347. #mul_add_c(a[2],b[7],c1,c2,c3);
  1348. $LD r6,`2*$BNSZ`(r4)
  1349. $UMULL r8,r6,r7
  1350. $UMULH r9,r6,r7
  1351. addc r10,r10,r8
  1352. adde r11,r11,r9
  1353. addze r12,r0
  1354. #mul_add_c(a[3],b[6],c1,c2,c3);
  1355. $LD r6,`3*$BNSZ`(r4)
  1356. $LD r7,`6*$BNSZ`(r5)
  1357. $UMULL r8,r6,r7
  1358. $UMULH r9,r6,r7
  1359. addc r10,r10,r8
  1360. adde r11,r11,r9
  1361. addze r12,r12
  1362. #mul_add_c(a[4],b[5],c1,c2,c3);
  1363. $LD r6,`4*$BNSZ`(r4)
  1364. $LD r7,`5*$BNSZ`(r5)
  1365. $UMULL r8,r6,r7
  1366. $UMULH r9,r6,r7
  1367. addc r10,r10,r8
  1368. adde r11,r11,r9
  1369. addze r12,r12
  1370. #mul_add_c(a[5],b[4],c1,c2,c3);
  1371. $LD r6,`5*$BNSZ`(r4)
  1372. $LD r7,`4*$BNSZ`(r5)
  1373. $UMULL r8,r6,r7
  1374. $UMULH r9,r6,r7
  1375. addc r10,r10,r8
  1376. adde r11,r11,r9
  1377. addze r12,r12
  1378. #mul_add_c(a[6],b[3],c1,c2,c3);
  1379. $LD r6,`6*$BNSZ`(r4)
  1380. $LD r7,`3*$BNSZ`(r5)
  1381. $UMULL r8,r6,r7
  1382. $UMULH r9,r6,r7
  1383. addc r10,r10,r8
  1384. adde r11,r11,r9
  1385. addze r12,r12
  1386. #mul_add_c(a[7],b[2],c1,c2,c3);
  1387. $LD r6,`7*$BNSZ`(r4)
  1388. $LD r7,`2*$BNSZ`(r5)
  1389. $UMULL r8,r6,r7
  1390. $UMULH r9,r6,r7
  1391. addc r10,r10,r8
  1392. adde r11,r11,r9
  1393. addze r12,r12
  1394. $ST r10,`9*$BNSZ`(r3) #r[9]=c1;
  1395. #mul_add_c(a[7],b[3],c2,c3,c1);
  1396. $LD r7,`3*$BNSZ`(r5)
  1397. $UMULL r8,r6,r7
  1398. $UMULH r9,r6,r7
  1399. addc r11,r11,r8
  1400. adde r12,r12,r9
  1401. addze r10,r0
  1402. #mul_add_c(a[6],b[4],c2,c3,c1);
  1403. $LD r6,`6*$BNSZ`(r4)
  1404. $LD r7,`4*$BNSZ`(r5)
  1405. $UMULL r8,r6,r7
  1406. $UMULH r9,r6,r7
  1407. addc r11,r11,r8
  1408. adde r12,r12,r9
  1409. addze r10,r10
  1410. #mul_add_c(a[5],b[5],c2,c3,c1);
  1411. $LD r6,`5*$BNSZ`(r4)
  1412. $LD r7,`5*$BNSZ`(r5)
  1413. $UMULL r8,r6,r7
  1414. $UMULH r9,r6,r7
  1415. addc r11,r11,r8
  1416. adde r12,r12,r9
  1417. addze r10,r10
  1418. #mul_add_c(a[4],b[6],c2,c3,c1);
  1419. $LD r6,`4*$BNSZ`(r4)
  1420. $LD r7,`6*$BNSZ`(r5)
  1421. $UMULL r8,r6,r7
  1422. $UMULH r9,r6,r7
  1423. addc r11,r11,r8
  1424. adde r12,r12,r9
  1425. addze r10,r10
  1426. #mul_add_c(a[3],b[7],c2,c3,c1);
  1427. $LD r6,`3*$BNSZ`(r4)
  1428. $LD r7,`7*$BNSZ`(r5)
  1429. $UMULL r8,r6,r7
  1430. $UMULH r9,r6,r7
  1431. addc r11,r11,r8
  1432. adde r12,r12,r9
  1433. addze r10,r10
  1434. $ST r11,`10*$BNSZ`(r3) #r[10]=c2;
  1435. #mul_add_c(a[4],b[7],c3,c1,c2);
  1436. $LD r6,`4*$BNSZ`(r4)
  1437. $UMULL r8,r6,r7
  1438. $UMULH r9,r6,r7
  1439. addc r12,r12,r8
  1440. adde r10,r10,r9
  1441. addze r11,r0
  1442. #mul_add_c(a[5],b[6],c3,c1,c2);
  1443. $LD r6,`5*$BNSZ`(r4)
  1444. $LD r7,`6*$BNSZ`(r5)
  1445. $UMULL r8,r6,r7
  1446. $UMULH r9,r6,r7
  1447. addc r12,r12,r8
  1448. adde r10,r10,r9
  1449. addze r11,r11
  1450. #mul_add_c(a[6],b[5],c3,c1,c2);
  1451. $LD r6,`6*$BNSZ`(r4)
  1452. $LD r7,`5*$BNSZ`(r5)
  1453. $UMULL r8,r6,r7
  1454. $UMULH r9,r6,r7
  1455. addc r12,r12,r8
  1456. adde r10,r10,r9
  1457. addze r11,r11
  1458. #mul_add_c(a[7],b[4],c3,c1,c2);
  1459. $LD r6,`7*$BNSZ`(r4)
  1460. $LD r7,`4*$BNSZ`(r5)
  1461. $UMULL r8,r6,r7
  1462. $UMULH r9,r6,r7
  1463. addc r12,r12,r8
  1464. adde r10,r10,r9
  1465. addze r11,r11
  1466. $ST r12,`11*$BNSZ`(r3) #r[11]=c3;
  1467. #mul_add_c(a[7],b[5],c1,c2,c3);
  1468. $LD r7,`5*$BNSZ`(r5)
  1469. $UMULL r8,r6,r7
  1470. $UMULH r9,r6,r7
  1471. addc r10,r10,r8
  1472. adde r11,r11,r9
  1473. addze r12,r0
  1474. #mul_add_c(a[6],b[6],c1,c2,c3);
  1475. $LD r6,`6*$BNSZ`(r4)
  1476. $LD r7,`6*$BNSZ`(r5)
  1477. $UMULL r8,r6,r7
  1478. $UMULH r9,r6,r7
  1479. addc r10,r10,r8
  1480. adde r11,r11,r9
  1481. addze r12,r12
  1482. #mul_add_c(a[5],b[7],c1,c2,c3);
  1483. $LD r6,`5*$BNSZ`(r4)
  1484. $LD r7,`7*$BNSZ`(r5)
  1485. $UMULL r8,r6,r7
  1486. $UMULH r9,r6,r7
  1487. addc r10,r10,r8
  1488. adde r11,r11,r9
  1489. addze r12,r12
  1490. $ST r10,`12*$BNSZ`(r3) #r[12]=c1;
  1491. #mul_add_c(a[6],b[7],c2,c3,c1);
  1492. $LD r6,`6*$BNSZ`(r4)
  1493. $UMULL r8,r6,r7
  1494. $UMULH r9,r6,r7
  1495. addc r11,r11,r8
  1496. adde r12,r12,r9
  1497. addze r10,r0
  1498. #mul_add_c(a[7],b[6],c2,c3,c1);
  1499. $LD r6,`7*$BNSZ`(r4)
  1500. $LD r7,`6*$BNSZ`(r5)
  1501. $UMULL r8,r6,r7
  1502. $UMULH r9,r6,r7
  1503. addc r11,r11,r8
  1504. adde r12,r12,r9
  1505. addze r10,r10
  1506. $ST r11,`13*$BNSZ`(r3) #r[13]=c2;
  1507. #mul_add_c(a[7],b[7],c3,c1,c2);
  1508. $LD r7,`7*$BNSZ`(r5)
  1509. $UMULL r8,r6,r7
  1510. $UMULH r9,r6,r7
  1511. addc r12,r12,r8
  1512. adde r10,r10,r9
  1513. $ST r12,`14*$BNSZ`(r3) #r[14]=c3;
  1514. $ST r10,`15*$BNSZ`(r3) #r[15]=c1;
  1515. bclr BO_ALWAYS,CR0_LT
  1516. .long 0x00000000
  1517. #
  1518. # NOTE: The following label name should be changed to
  1519. # "bn_sub_words" i.e. remove the first dot
  1520. # for the gcc compiler. This should be automatically
  1521. # done in the build
  1522. #
  1523. #
  1524. .align 4
  1525. .bn_sub_words:
  1526. #
  1527. # Handcoded version of bn_sub_words
  1528. #
  1529. #BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
  1530. #
  1531. # r3 = r
  1532. # r4 = a
  1533. # r5 = b
  1534. # r6 = n
  1535. #
  1536. # Note: No loop unrolling done since this is not a performance
  1537. # critical loop.
  1538. xor r0,r0,r0 #set r0 = 0
  1539. #
  1540. # check for r6 = 0 AND set carry bit.
  1541. #
  1542. subfc. r7,r0,r6 # If r6 is 0 then result is 0.
  1543. # if r6 > 0 then result !=0
  1544. # In either case carry bit is set.
  1545. bc BO_IF,CR0_EQ,Lppcasm_sub_adios
  1546. addi r4,r4,-$BNSZ
  1547. addi r3,r3,-$BNSZ
  1548. addi r5,r5,-$BNSZ
  1549. mtctr r6
  1550. Lppcasm_sub_mainloop:
  1551. $LDU r7,$BNSZ(r4)
  1552. $LDU r8,$BNSZ(r5)
  1553. subfe r6,r8,r7 # r6 = r7+carry bit + onescomplement(r8)
  1554. # if carry = 1 this is r7-r8. Else it
  1555. # is r7-r8 -1 as we need.
  1556. $STU r6,$BNSZ(r3)
  1557. bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_sub_mainloop
  1558. Lppcasm_sub_adios:
  1559. subfze r3,r0 # if carry bit is set then r3 = 0 else -1
  1560. andi. r3,r3,1 # keep only last bit.
  1561. bclr BO_ALWAYS,CR0_LT
  1562. .long 0x00000000
  1563. #
  1564. # NOTE: The following label name should be changed to
  1565. # "bn_add_words" i.e. remove the first dot
  1566. # for the gcc compiler. This should be automatically
  1567. # done in the build
  1568. #
  1569. .align 4
  1570. .bn_add_words:
  1571. #
  1572. # Handcoded version of bn_add_words
  1573. #
  1574. #BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
  1575. #
  1576. # r3 = r
  1577. # r4 = a
  1578. # r5 = b
  1579. # r6 = n
  1580. #
  1581. # Note: No loop unrolling done since this is not a performance
  1582. # critical loop.
  1583. xor r0,r0,r0
  1584. #
  1585. # check for r6 = 0. Is this needed?
  1586. #
  1587. addic. r6,r6,0 #test r6 and clear carry bit.
  1588. bc BO_IF,CR0_EQ,Lppcasm_add_adios
  1589. addi r4,r4,-$BNSZ
  1590. addi r3,r3,-$BNSZ
  1591. addi r5,r5,-$BNSZ
  1592. mtctr r6
  1593. Lppcasm_add_mainloop:
  1594. $LDU r7,$BNSZ(r4)
  1595. $LDU r8,$BNSZ(r5)
  1596. adde r8,r7,r8
  1597. $STU r8,$BNSZ(r3)
  1598. bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_add_mainloop
  1599. Lppcasm_add_adios:
  1600. addze r3,r0 #return carry bit.
  1601. bclr BO_ALWAYS,CR0_LT
  1602. .long 0x00000000
  1603. #
  1604. # NOTE: The following label name should be changed to
  1605. # "bn_div_words" i.e. remove the first dot
  1606. # for the gcc compiler. This should be automatically
  1607. # done in the build
  1608. #
  1609. .align 4
  1610. .bn_div_words:
  1611. #
  1612. # This is a cleaned up version of code generated by
  1613. # the AIX compiler. The only optimization is to use
  1614. # the PPC instruction to count leading zeros instead
  1615. # of call to num_bits_word. Since this was compiled
  1616. # only at level -O2 we can possibly squeeze it more?
  1617. #
  1618. # r3 = h
  1619. # r4 = l
  1620. # r5 = d
  1621. $UCMPI 0,r5,0 # compare r5 and 0
  1622. bc BO_IF_NOT,CR0_EQ,Lppcasm_div1 # proceed if d!=0
  1623. li r3,-1 # d=0 return -1
  1624. bclr BO_ALWAYS,CR0_LT
  1625. Lppcasm_div1:
  1626. xor r0,r0,r0 #r0=0
  1627. li r8,$BITS
  1628. $CNTLZ. r7,r5 #r7 = num leading 0s in d.
  1629. bc BO_IF,CR0_EQ,Lppcasm_div2 #proceed if no leading zeros
  1630. subf r8,r7,r8 #r8 = BN_num_bits_word(d)
  1631. $SHR. r9,r3,r8 #are there any bits above r8'th?
  1632. $TR 16,r9,r0 #if there're, signal to dump core...
  1633. Lppcasm_div2:
  1634. $UCMP 0,r3,r5 #h>=d?
  1635. bc BO_IF,CR0_LT,Lppcasm_div3 #goto Lppcasm_div3 if not
  1636. subf r3,r5,r3 #h-=d ;
  1637. Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i
  1638. cmpi 0,0,r7,0 # is (i == 0)?
  1639. bc BO_IF,CR0_EQ,Lppcasm_div4
  1640. $SHL r3,r3,r7 # h = (h<< i)
  1641. $SHR r8,r4,r8 # r8 = (l >> BN_BITS2 -i)
  1642. $SHL r5,r5,r7 # d<<=i
  1643. or r3,r3,r8 # h = (h<<i)|(l>>(BN_BITS2-i))
  1644. $SHL r4,r4,r7 # l <<=i
  1645. Lppcasm_div4:
  1646. $SHRI r9,r5,`$BITS/2` # r9 = dh
  1647. # dl will be computed when needed
  1648. # as it saves registers.
  1649. li r6,2 #r6=2
  1650. mtctr r6 #counter will be in count.
  1651. Lppcasm_divouterloop:
  1652. $SHRI r8,r3,`$BITS/2` #r8 = (h>>BN_BITS4)
  1653. $SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4
  1654. # compute here for innerloop.
  1655. $UCMP 0,r8,r9 # is (h>>BN_BITS4)==dh
  1656. bc BO_IF_NOT,CR0_EQ,Lppcasm_div5 # goto Lppcasm_div5 if not
  1657. li r8,-1
  1658. $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l
  1659. b Lppcasm_div6
  1660. Lppcasm_div5:
  1661. $UDIV r8,r3,r9 #q = h/dh
  1662. Lppcasm_div6:
  1663. $UMULL r12,r9,r8 #th = q*dh
  1664. $CLRU r10,r5,`$BITS/2` #r10=dl
  1665. $UMULL r6,r8,r10 #tl = q*dl
  1666. Lppcasm_divinnerloop:
  1667. subf r10,r12,r3 #t = h -th
  1668. $SHRI r7,r10,`$BITS/2` #r7= (t &BN_MASK2H), sort of...
  1669. addic. r7,r7,0 #test if r7 == 0. used below.
  1670. # now want to compute
  1671. # r7 = (t<<BN_BITS4)|((l&BN_MASK2h)>>BN_BITS4)
  1672. # the following 2 instructions do that
  1673. $SHLI r7,r10,`$BITS/2` # r7 = (t<<BN_BITS4)
  1674. or r7,r7,r11 # r7|=((l&BN_MASK2h)>>BN_BITS4)
  1675. $UCMP 1,r6,r7 # compare (tl <= r7)
  1676. bc BO_IF_NOT,CR0_EQ,Lppcasm_divinnerexit
  1677. bc BO_IF_NOT,CR1_FEX,Lppcasm_divinnerexit
  1678. addi r8,r8,-1 #q--
  1679. subf r12,r9,r12 #th -=dh
  1680. $CLRU r10,r5,`$BITS/2` #r10=dl. t is no longer needed in loop.
  1681. subf r6,r10,r6 #tl -=dl
  1682. b Lppcasm_divinnerloop
  1683. Lppcasm_divinnerexit:
  1684. $SHRI r10,r6,`$BITS/2` #t=(tl>>BN_BITS4)
  1685. $SHLI r11,r6,`$BITS/2` #tl=(tl<<BN_BITS4)&BN_MASK2h;
  1686. $UCMP 1,r4,r11 # compare l and tl
  1687. add r12,r12,r10 # th+=t
  1688. bc BO_IF_NOT,CR1_FX,Lppcasm_div7 # if (l>=tl) goto Lppcasm_div7
  1689. addi r12,r12,1 # th++
  1690. Lppcasm_div7:
  1691. subf r11,r11,r4 #r11=l-tl
  1692. $UCMP 1,r3,r12 #compare h and th
  1693. bc BO_IF_NOT,CR1_FX,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8
  1694. addi r8,r8,-1 # q--
  1695. add r3,r5,r3 # h+=d
  1696. Lppcasm_div8:
  1697. subf r12,r12,r3 #r12 = h-th
  1698. $SHLI r4,r11,`$BITS/2` #l=(l&BN_MASK2l)<<BN_BITS4
  1699. # want to compute
  1700. # h = ((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2
  1701. # the following 2 instructions will do this.
  1702. $INSR r11,r12,`$BITS/2`,`$BITS/2` # r11 is the value we want rotated $BITS/2.
  1703. $ROTL r3,r11,`$BITS/2` # rotate by $BITS/2 and store in r3
  1704. bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_div9#if (count==0) break ;
  1705. $SHLI r0,r8,`$BITS/2` #ret =q<<BN_BITS4
  1706. b Lppcasm_divouterloop
  1707. Lppcasm_div9:
  1708. or r3,r8,r0
  1709. bclr BO_ALWAYS,CR0_LT
  1710. .long 0x00000000
  1711. #
  1712. # NOTE: The following label name should be changed to
  1713. # "bn_sqr_words" i.e. remove the first dot
  1714. # for the gcc compiler. This should be automatically
  1715. # done in the build
  1716. #
  1717. .align 4
  1718. .bn_sqr_words:
  1719. #
  1720. # Optimized version of bn_sqr_words
  1721. #
  1722. # void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
  1723. #
  1724. # r3 = r
  1725. # r4 = a
  1726. # r5 = n
  1727. #
  1728. # r6 = a[i].
  1729. # r7,r8 = product.
  1730. #
  1731. # No unrolling done here. Not performance critical.
  1732. addic. r5,r5,0 #test r5.
  1733. bc BO_IF,CR0_EQ,Lppcasm_sqr_adios
  1734. addi r4,r4,-$BNSZ
  1735. addi r3,r3,-$BNSZ
  1736. mtctr r5
  1737. Lppcasm_sqr_mainloop:
  1738. #sqr(r[0],r[1],a[0]);
  1739. $LDU r6,$BNSZ(r4)
  1740. $UMULL r7,r6,r6
  1741. $UMULH r8,r6,r6
  1742. $STU r7,$BNSZ(r3)
  1743. $STU r8,$BNSZ(r3)
  1744. bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_sqr_mainloop
  1745. Lppcasm_sqr_adios:
  1746. bclr BO_ALWAYS,CR0_LT
  1747. .long 0x00000000
  1748. #
  1749. # NOTE: The following label name should be changed to
  1750. # "bn_mul_words" i.e. remove the first dot
  1751. # for the gcc compiler. This should be automatically
  1752. # done in the build
  1753. #
  1754. .align 4
  1755. .bn_mul_words:
  1756. #
  1757. # BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
  1758. #
  1759. # r3 = rp
  1760. # r4 = ap
  1761. # r5 = num
  1762. # r6 = w
  1763. xor r0,r0,r0
  1764. xor r12,r12,r12 # used for carry
  1765. rlwinm. r7,r5,30,2,31 # num >> 2
  1766. bc BO_IF,CR0_EQ,Lppcasm_mw_REM
  1767. mtctr r7
  1768. Lppcasm_mw_LOOP:
  1769. #mul(rp[0],ap[0],w,c1);
  1770. $LD r8,`0*$BNSZ`(r4)
  1771. $UMULL r9,r6,r8
  1772. $UMULH r10,r6,r8
  1773. addc r9,r9,r12
  1774. #addze r10,r10 #carry is NOT ignored.
  1775. #will be taken care of
  1776. #in second spin below
  1777. #using adde.
  1778. $ST r9,`0*$BNSZ`(r3)
  1779. #mul(rp[1],ap[1],w,c1);
  1780. $LD r8,`1*$BNSZ`(r4)
  1781. $UMULL r11,r6,r8
  1782. $UMULH r12,r6,r8
  1783. adde r11,r11,r10
  1784. #addze r12,r12
  1785. $ST r11,`1*$BNSZ`(r3)
  1786. #mul(rp[2],ap[2],w,c1);
  1787. $LD r8,`2*$BNSZ`(r4)
  1788. $UMULL r9,r6,r8
  1789. $UMULH r10,r6,r8
  1790. adde r9,r9,r12
  1791. #addze r10,r10
  1792. $ST r9,`2*$BNSZ`(r3)
  1793. #mul_add(rp[3],ap[3],w,c1);
  1794. $LD r8,`3*$BNSZ`(r4)
  1795. $UMULL r11,r6,r8
  1796. $UMULH r12,r6,r8
  1797. adde r11,r11,r10
  1798. addze r12,r12 #this spin we collect carry into
  1799. #r12
  1800. $ST r11,`3*$BNSZ`(r3)
  1801. addi r3,r3,`4*$BNSZ`
  1802. addi r4,r4,`4*$BNSZ`
  1803. bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_mw_LOOP
  1804. Lppcasm_mw_REM:
  1805. andi. r5,r5,0x3
  1806. bc BO_IF,CR0_EQ,Lppcasm_mw_OVER
  1807. #mul(rp[0],ap[0],w,c1);
  1808. $LD r8,`0*$BNSZ`(r4)
  1809. $UMULL r9,r6,r8
  1810. $UMULH r10,r6,r8
  1811. addc r9,r9,r12
  1812. addze r10,r10
  1813. $ST r9,`0*$BNSZ`(r3)
  1814. addi r12,r10,0
  1815. addi r5,r5,-1
  1816. cmpli 0,0,r5,0
  1817. bc BO_IF,CR0_EQ,Lppcasm_mw_OVER
  1818. #mul(rp[1],ap[1],w,c1);
  1819. $LD r8,`1*$BNSZ`(r4)
  1820. $UMULL r9,r6,r8
  1821. $UMULH r10,r6,r8
  1822. addc r9,r9,r12
  1823. addze r10,r10
  1824. $ST r9,`1*$BNSZ`(r3)
  1825. addi r12,r10,0
  1826. addi r5,r5,-1
  1827. cmpli 0,0,r5,0
  1828. bc BO_IF,CR0_EQ,Lppcasm_mw_OVER
  1829. #mul_add(rp[2],ap[2],w,c1);
  1830. $LD r8,`2*$BNSZ`(r4)
  1831. $UMULL r9,r6,r8
  1832. $UMULH r10,r6,r8
  1833. addc r9,r9,r12
  1834. addze r10,r10
  1835. $ST r9,`2*$BNSZ`(r3)
  1836. addi r12,r10,0
  1837. Lppcasm_mw_OVER:
  1838. addi r3,r12,0
  1839. bclr BO_ALWAYS,CR0_LT
  1840. .long 0x00000000
  1841. #
  1842. # NOTE: The following label name should be changed to
  1843. # "bn_mul_add_words" i.e. remove the first dot
  1844. # for the gcc compiler. This should be automatically
  1845. # done in the build
  1846. #
  1847. .align 4
  1848. .bn_mul_add_words:
  1849. #
  1850. # BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
  1851. #
  1852. # r3 = rp
  1853. # r4 = ap
  1854. # r5 = num
  1855. # r6 = w
  1856. #
  1857. # empirical evidence suggests that unrolled version performs best!!
  1858. #
  1859. xor r0,r0,r0 #r0 = 0
  1860. xor r12,r12,r12 #r12 = 0 . used for carry
  1861. rlwinm. r7,r5,30,2,31 # num >> 2
  1862. bc BO_IF,CR0_EQ,Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover
  1863. mtctr r7
  1864. Lppcasm_maw_mainloop:
  1865. #mul_add(rp[0],ap[0],w,c1);
  1866. $LD r8,`0*$BNSZ`(r4)
  1867. $LD r11,`0*$BNSZ`(r3)
  1868. $UMULL r9,r6,r8
  1869. $UMULH r10,r6,r8
  1870. addc r9,r9,r12 #r12 is carry.
  1871. addze r10,r10
  1872. addc r9,r9,r11
  1873. #addze r10,r10
  1874. #the above instruction addze
  1875. #is NOT needed. Carry will NOT
  1876. #be ignored. It's not affected
  1877. #by multiply and will be collected
  1878. #in the next spin
  1879. $ST r9,`0*$BNSZ`(r3)
  1880. #mul_add(rp[1],ap[1],w,c1);
  1881. $LD r8,`1*$BNSZ`(r4)
  1882. $LD r9,`1*$BNSZ`(r3)
  1883. $UMULL r11,r6,r8
  1884. $UMULH r12,r6,r8
  1885. adde r11,r11,r10 #r10 is carry.
  1886. addze r12,r12
  1887. addc r11,r11,r9
  1888. #addze r12,r12
  1889. $ST r11,`1*$BNSZ`(r3)
  1890. #mul_add(rp[2],ap[2],w,c1);
  1891. $LD r8,`2*$BNSZ`(r4)
  1892. $UMULL r9,r6,r8
  1893. $LD r11,`2*$BNSZ`(r3)
  1894. $UMULH r10,r6,r8
  1895. adde r9,r9,r12
  1896. addze r10,r10
  1897. addc r9,r9,r11
  1898. #addze r10,r10
  1899. $ST r9,`2*$BNSZ`(r3)
  1900. #mul_add(rp[3],ap[3],w,c1);
  1901. $LD r8,`3*$BNSZ`(r4)
  1902. $UMULL r11,r6,r8
  1903. $LD r9,`3*$BNSZ`(r3)
  1904. $UMULH r12,r6,r8
  1905. adde r11,r11,r10
  1906. addze r12,r12
  1907. addc r11,r11,r9
  1908. addze r12,r12
  1909. $ST r11,`3*$BNSZ`(r3)
  1910. addi r3,r3,`4*$BNSZ`
  1911. addi r4,r4,`4*$BNSZ`
  1912. bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_maw_mainloop
  1913. Lppcasm_maw_leftover:
  1914. andi. r5,r5,0x3
  1915. bc BO_IF,CR0_EQ,Lppcasm_maw_adios
  1916. addi r3,r3,-$BNSZ
  1917. addi r4,r4,-$BNSZ
  1918. #mul_add(rp[0],ap[0],w,c1);
  1919. mtctr r5
  1920. $LDU r8,$BNSZ(r4)
  1921. $UMULL r9,r6,r8
  1922. $UMULH r10,r6,r8
  1923. $LDU r11,$BNSZ(r3)
  1924. addc r9,r9,r11
  1925. addze r10,r10
  1926. addc r9,r9,r12
  1927. addze r12,r10
  1928. $ST r9,0(r3)
  1929. bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_maw_adios
  1930. #mul_add(rp[1],ap[1],w,c1);
  1931. $LDU r8,$BNSZ(r4)
  1932. $UMULL r9,r6,r8
  1933. $UMULH r10,r6,r8
  1934. $LDU r11,$BNSZ(r3)
  1935. addc r9,r9,r11
  1936. addze r10,r10
  1937. addc r9,r9,r12
  1938. addze r12,r10
  1939. $ST r9,0(r3)
  1940. bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_maw_adios
  1941. #mul_add(rp[2],ap[2],w,c1);
  1942. $LDU r8,$BNSZ(r4)
  1943. $UMULL r9,r6,r8
  1944. $UMULH r10,r6,r8
  1945. $LDU r11,$BNSZ(r3)
  1946. addc r9,r9,r11
  1947. addze r10,r10
  1948. addc r9,r9,r12
  1949. addze r12,r10
  1950. $ST r9,0(r3)
  1951. Lppcasm_maw_adios:
  1952. addi r3,r12,0
  1953. bclr BO_ALWAYS,CR0_LT
  1954. .long 0x00000000
  1955. .align 4
  1956. EOF
  1957. $data =~ s/\`([^\`]*)\`/eval $1/gem;
  1958. # if some assembler chokes on some simplified mnemonic,
  1959. # this is the spot to fix it up, e.g.:
  1960. # GNU as doesn't seem to accept cmplw, 32-bit unsigned compare
  1961. $data =~ s/^(\s*)cmplw(\s+)([^,]+),(.*)/$1cmpl$2$3,0,$4/gm;
  1962. # assembler X doesn't accept li, load immediate value
  1963. #$data =~ s/^(\s*)li(\s+)([^,]+),(.*)/$1addi$2$3,0,$4/gm;
  1964. return($data);
  1965. }