asm.c 60 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793
  1. /* asm.c
  2. *
  3. * Copyright (C) 2006-2022 wolfSSL Inc.
  4. *
  5. * This file is part of wolfSSL.
  6. *
  7. * wolfSSL is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * wolfSSL is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
  20. */
  21. #ifdef HAVE_CONFIG_H
  22. #include <config.h>
  23. #endif
  24. #include <wolfssl/wolfcrypt/settings.h>
  25. /*
  26. * Based on public domain TomsFastMath 0.10 by Tom St Denis, tomstdenis@iahu.ca,
  27. * http://math.libtomcrypt.com
  28. */
  29. /******************************************************************/
  30. /* fp_montgomery_reduce.c asm or generic */
  31. /* Each platform needs to query info type 1 from cpuid to see if aesni is
  32. * supported. Also, let's setup a macro for proper linkage w/o ABI conflicts
  33. */
  34. #if defined(HAVE_INTEL_MULX)
  35. #ifndef _MSC_VER
  36. #define cpuid(reg, leaf, sub)\
  37. __asm__ __volatile__ ("cpuid":\
  38. "=a" (reg[0]), "=b" (reg[1]), "=c" (reg[2]), "=d" (reg[3]) :\
  39. "a" (leaf), "c"(sub));
  40. #define XASM_LINK(f) asm(f)
  41. #else
  42. #include <intrin.h>
  43. #define cpuid(a,b,c) __cpuidex((int*)a,b,c)
  44. #define XASM_LINK(f)
  45. #endif /* _MSC_VER */
  46. #define EAX 0
  47. #define EBX 1
  48. #define ECX 2
  49. #define EDX 3
  50. #define CPUID_AVX1 0x1
  51. #define CPUID_AVX2 0x2
  52. #define CPUID_RDRAND 0x4
  53. #define CPUID_RDSEED 0x8
  54. #define CPUID_BMI2 0x10 /* MULX, RORX */
  55. #define CPUID_ADX 0x20 /* ADCX, ADOX */
  56. #define IS_INTEL_AVX1 (cpuid_flags&CPUID_AVX1)
  57. #define IS_INTEL_AVX2 (cpuid_flags&CPUID_AVX2)
  58. #define IS_INTEL_BMI2 (cpuid_flags&CPUID_BMI2)
  59. #define IS_INTEL_ADX (cpuid_flags&CPUID_ADX)
  60. #define IS_INTEL_RDRAND (cpuid_flags&CPUID_RDRAND)
  61. #define IS_INTEL_RDSEED (cpuid_flags&CPUID_RDSEED)
  62. #define SET_FLAGS
  63. static word32 cpuid_check = 0 ;
  64. static word32 cpuid_flags = 0 ;
  65. static word32 cpuid_flag(word32 leaf, word32 sub, word32 num, word32 bit) {
  66. int got_intel_cpu = 0;
  67. int got_amd_cpu = 0;
  68. unsigned int reg[5];
  69. reg[4] = '\0' ;
  70. cpuid(reg, 0, 0);
  71. /* check for intel cpu */
  72. if( memcmp((char *)&(reg[EBX]), "Genu", 4) == 0 &&
  73. memcmp((char *)&(reg[EDX]), "ineI", 4) == 0 &&
  74. memcmp((char *)&(reg[ECX]), "ntel", 4) == 0) {
  75. got_intel_cpu = 1;
  76. }
  77. /* check for AMD cpu */
  78. if( memcmp((char *)&(reg[EBX]), "Auth", 4) == 0 &&
  79. memcmp((char *)&(reg[EDX]), "enti", 4) == 0 &&
  80. memcmp((char *)&(reg[ECX]), "cAMD", 4) == 0) {
  81. got_amd_cpu = 1;
  82. }
  83. if (got_intel_cpu || got_amd_cpu) {
  84. cpuid(reg, leaf, sub);
  85. return((reg[num]>>bit)&0x1) ;
  86. }
  87. return 0 ;
  88. }
  89. WC_INLINE static int set_cpuid_flags(void) {
  90. if(cpuid_check == 0) {
  91. if(cpuid_flag(7, 0, EBX, 8)){ cpuid_flags |= CPUID_BMI2 ; }
  92. if(cpuid_flag(7, 0, EBX,19)){ cpuid_flags |= CPUID_ADX ; }
  93. cpuid_check = 1 ;
  94. return 0 ;
  95. }
  96. return 1 ;
  97. }
  98. #define RETURN return
  99. #define IF_HAVE_INTEL_MULX(func, ret) \
  100. if(cpuid_check==0)set_cpuid_flags() ; \
  101. if(IS_INTEL_BMI2 && IS_INTEL_ADX){ func; ret ; }
  102. #else
  103. #define IF_HAVE_INTEL_MULX(func, ret)
  104. #endif
  105. #if defined(TFM_X86) && !defined(TFM_SSE2)
  106. /* x86-32 code */
  107. #define MONT_START
  108. #define MONT_FINI
  109. #define LOOP_END
  110. #define LOOP_START \
  111. mu = c[x] * mp
  112. #define INNERMUL \
  113. __asm__( \
  114. "movl %5,%%eax \n\t" \
  115. "mull %4 \n\t" \
  116. "addl %1,%%eax \n\t" \
  117. "adcl $0,%%edx \n\t" \
  118. "addl %%eax,%0 \n\t" \
  119. "adcl $0,%%edx \n\t" \
  120. "movl %%edx,%1 \n\t" \
  121. :"=g"(_c[LO]), "=r"(cy) \
  122. :"0"(_c[LO]), "1"(cy), "r"(mu), "r"(*tmpm++) \
  123. : "%eax", "%edx", "cc")
  124. #define PROPCARRY \
  125. __asm__( \
  126. "addl %1,%0 \n\t" \
  127. "setb %%al \n\t" \
  128. "movzbl %%al,%1 \n\t" \
  129. :"=g"(_c[LO]), "=r"(cy) \
  130. :"0"(_c[LO]), "1"(cy) \
  131. : "%eax", "cc")
  132. /******************************************************************/
  133. #elif defined(TFM_X86_64)
  134. /* x86-64 code */
  135. #define MONT_START
  136. #define MONT_FINI
  137. #define LOOP_END
  138. #define LOOP_START \
  139. mu = c[x] * mp
  140. #define INNERMUL \
  141. __asm__( \
  142. "movq %5,%%rax \n\t" \
  143. "mulq %4 \n\t" \
  144. "addq %1,%%rax \n\t" \
  145. "adcq $0,%%rdx \n\t" \
  146. "addq %%rax,%0 \n\t" \
  147. "adcq $0,%%rdx \n\t" \
  148. "movq %%rdx,%1 \n\t" \
  149. :"=g"(_c[LO]), "=r"(cy) \
  150. :"0"(_c[LO]), "1"(cy), "r"(mu), "r"(*tmpm++) \
  151. : "%rax", "%rdx", "cc")
  152. #if defined(HAVE_INTEL_MULX)
  153. #define MULX_INNERMUL8(x,y,z,cy) \
  154. __asm__ volatile ( \
  155. "movq %[yn], %%rdx\n\t" \
  156. "xorq %%rcx, %%rcx\n\t" \
  157. "movq 0(%[c]), %%r8\n\t" \
  158. "movq 8(%[c]), %%r9\n\t" \
  159. "movq 16(%[c]), %%r10\n\t" \
  160. "movq 24(%[c]), %%r11\n\t" \
  161. "movq 32(%[c]), %%r12\n\t" \
  162. "movq 40(%[c]), %%r13\n\t" \
  163. "movq 48(%[c]), %%r14\n\t" \
  164. "movq 56(%[c]), %%r15\n\t" \
  165. \
  166. "mulx 0(%[xp]), %%rax, %%rcx\n\t" \
  167. "adcxq %[cy], %%r8\n\t" \
  168. "adoxq %%rax, %%r8\n\t" \
  169. "mulx 8(%[xp]), %%rax, %[cy]\n\t" \
  170. "adcxq %%rcx, %%r9\n\t" \
  171. "adoxq %%rax, %%r9\n\t" \
  172. "mulx 16(%[xp]), %%rax, %%rcx\n\t" \
  173. "adcxq %[cy], %%r10\n\t" \
  174. "adoxq %%rax, %%r10\n\t" \
  175. "mulx 24(%[xp]), %%rax, %[cy]\n\t" \
  176. "adcxq %%rcx, %%r11\n\t" \
  177. "adoxq %%rax, %%r11\n\t" \
  178. "mulx 32(%[xp]), %%rax, %%rcx\n\t" \
  179. "adcxq %[cy], %%r12\n\t" \
  180. "adoxq %%rax, %%r12\n\t" \
  181. "mulx 40(%[xp]), %%rax, %[cy]\n\t" \
  182. "adcxq %%rcx, %%r13\n\t" \
  183. "adoxq %%rax, %%r13\n\t" \
  184. "mulx 48(%[xp]), %%rax, %%rcx\n\t" \
  185. "adcxq %[cy], %%r14\n\t" \
  186. "adoxq %%rax, %%r14\n\t" \
  187. "adcxq %%rcx, %%r15\n\t" \
  188. "mulx 56(%[xp]), %%rax, %[cy]\n\t" \
  189. "movq $0, %%rdx\n\t" \
  190. "adoxq %%rdx, %%rax\n\t" \
  191. "adcxq %%rdx, %[cy]\n\t" \
  192. "adoxq %%rdx, %[cy]\n\t" \
  193. "addq %%rax, %%r15\n\t" \
  194. "adcq $0, %[cy]\n\t" \
  195. \
  196. "movq %%r8, 0(%[c])\n\t" \
  197. "movq %%r9, 8(%[c])\n\t" \
  198. "movq %%r10, 16(%[c])\n\t" \
  199. "movq %%r11, 24(%[c])\n\t" \
  200. "movq %%r12, 32(%[c])\n\t" \
  201. "movq %%r13, 40(%[c])\n\t" \
  202. "movq %%r14, 48(%[c])\n\t" \
  203. "movq %%r15, 56(%[c])\n\t" \
  204. : [cy] "+r" (cy) \
  205. : [xp] "r" (x), [c] "r" (c_mulx), [yn] "rm" (y) \
  206. :"%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", \
  207. "%rdx", "%rax", "%rcx" \
  208. )
  209. #define INNERMUL8_MULX \
  210. {\
  211. MULX_INNERMUL8(tmpm, mu, _c, cy);\
  212. }
  213. #endif
  214. #define INNERMUL8 \
  215. __asm__( \
  216. "movq 0(%5),%%rax \n\t" \
  217. "movq 0(%2),%%r10 \n\t" \
  218. "movq 0x8(%5),%%r11 \n\t" \
  219. "mulq %4 \n\t" \
  220. "addq %%r10,%%rax \n\t" \
  221. "adcq $0,%%rdx \n\t" \
  222. "movq 0x8(%2),%%r10 \n\t" \
  223. "addq %3,%%rax \n\t" \
  224. "adcq $0,%%rdx \n\t" \
  225. "movq %%rax,0(%0) \n\t" \
  226. "movq %%rdx,%1 \n\t" \
  227. \
  228. "movq %%r11,%%rax \n\t" \
  229. "movq 0x10(%5),%%r11 \n\t" \
  230. "mulq %4 \n\t" \
  231. "addq %%r10,%%rax \n\t" \
  232. "adcq $0,%%rdx \n\t" \
  233. "movq 0x10(%2),%%r10 \n\t" \
  234. "addq %3,%%rax \n\t" \
  235. "adcq $0,%%rdx \n\t" \
  236. "movq %%rax,0x8(%0) \n\t" \
  237. "movq %%rdx,%1 \n\t" \
  238. \
  239. "movq %%r11,%%rax \n\t" \
  240. "movq 0x18(%5),%%r11 \n\t" \
  241. "mulq %4 \n\t" \
  242. "addq %%r10,%%rax \n\t" \
  243. "adcq $0,%%rdx \n\t" \
  244. "movq 0x18(%2),%%r10 \n\t" \
  245. "addq %3,%%rax \n\t" \
  246. "adcq $0,%%rdx \n\t" \
  247. "movq %%rax,0x10(%0) \n\t" \
  248. "movq %%rdx,%1 \n\t" \
  249. \
  250. "movq %%r11,%%rax \n\t" \
  251. "movq 0x20(%5),%%r11 \n\t" \
  252. "mulq %4 \n\t" \
  253. "addq %%r10,%%rax \n\t" \
  254. "adcq $0,%%rdx \n\t" \
  255. "movq 0x20(%2),%%r10 \n\t" \
  256. "addq %3,%%rax \n\t" \
  257. "adcq $0,%%rdx \n\t" \
  258. "movq %%rax,0x18(%0) \n\t" \
  259. "movq %%rdx,%1 \n\t" \
  260. \
  261. "movq %%r11,%%rax \n\t" \
  262. "movq 0x28(%5),%%r11 \n\t" \
  263. "mulq %4 \n\t" \
  264. "addq %%r10,%%rax \n\t" \
  265. "adcq $0,%%rdx \n\t" \
  266. "movq 0x28(%2),%%r10 \n\t" \
  267. "addq %3,%%rax \n\t" \
  268. "adcq $0,%%rdx \n\t" \
  269. "movq %%rax,0x20(%0) \n\t" \
  270. "movq %%rdx,%1 \n\t" \
  271. \
  272. "movq %%r11,%%rax \n\t" \
  273. "movq 0x30(%5),%%r11 \n\t" \
  274. "mulq %4 \n\t" \
  275. "addq %%r10,%%rax \n\t" \
  276. "adcq $0,%%rdx \n\t" \
  277. "movq 0x30(%2),%%r10 \n\t" \
  278. "addq %3,%%rax \n\t" \
  279. "adcq $0,%%rdx \n\t" \
  280. "movq %%rax,0x28(%0) \n\t" \
  281. "movq %%rdx,%1 \n\t" \
  282. \
  283. "movq %%r11,%%rax \n\t" \
  284. "movq 0x38(%5),%%r11 \n\t" \
  285. "mulq %4 \n\t" \
  286. "addq %%r10,%%rax \n\t" \
  287. "adcq $0,%%rdx \n\t" \
  288. "movq 0x38(%2),%%r10 \n\t" \
  289. "addq %3,%%rax \n\t" \
  290. "adcq $0,%%rdx \n\t" \
  291. "movq %%rax,0x30(%0) \n\t" \
  292. "movq %%rdx,%1 \n\t" \
  293. \
  294. "movq %%r11,%%rax \n\t" \
  295. "mulq %4 \n\t" \
  296. "addq %%r10,%%rax \n\t" \
  297. "adcq $0,%%rdx \n\t" \
  298. "addq %3,%%rax \n\t" \
  299. "adcq $0,%%rdx \n\t" \
  300. "movq %%rax,0x38(%0) \n\t" \
  301. "movq %%rdx,%1 \n\t" \
  302. \
  303. :"=r"(_c), "=r"(cy) \
  304. : "0"(_c), "1"(cy), "g"(mu), "r"(tmpm)\
  305. : "%rax", "%rdx", "%r10", "%r11", "cc")
  306. #define PROPCARRY \
  307. __asm__( \
  308. "addq %1,%0 \n\t" \
  309. "setb %%al \n\t" \
  310. "movzbq %%al,%1 \n\t" \
  311. :"=g"(_c[LO]), "=r"(cy) \
  312. :"0"(_c[LO]), "1"(cy) \
  313. : "%rax", "cc")
  314. /******************************************************************/
  315. #elif defined(TFM_SSE2)
  316. /* SSE2 code (assumes 32-bit fp_digits) */
  317. /* XMM register assignments:
  318. * xmm0 *tmpm++, then Mu * (*tmpm++)
  319. * xmm1 c[x], then Mu
  320. * xmm2 mp
  321. * xmm3 cy
  322. * xmm4 _c[LO]
  323. */
  324. #define MONT_START \
  325. __asm__("movd %0,%%mm2"::"g"(mp))
  326. #define MONT_FINI \
  327. __asm__("emms")
  328. #define LOOP_START \
  329. __asm__( \
  330. "movd %0,%%mm1 \n\t" \
  331. "pxor %%mm3,%%mm3 \n\t" \
  332. "pmuludq %%mm2,%%mm1 \n\t" \
  333. :: "g"(c[x]))
  334. /* pmuludq on mmx registers does a 32x32->64 multiply. */
  335. #define INNERMUL \
  336. __asm__( \
  337. "movd %1,%%mm4 \n\t" \
  338. "movd %2,%%mm0 \n\t" \
  339. "paddq %%mm4,%%mm3 \n\t" \
  340. "pmuludq %%mm1,%%mm0 \n\t" \
  341. "paddq %%mm0,%%mm3 \n\t" \
  342. "movd %%mm3,%0 \n\t" \
  343. "psrlq $32, %%mm3 \n\t" \
  344. :"=g"(_c[LO]) : "0"(_c[LO]), "g"(*tmpm++) );
  345. #define INNERMUL8 \
  346. __asm__( \
  347. "movd 0(%1),%%mm4 \n\t" \
  348. "movd 0(%2),%%mm0 \n\t" \
  349. "paddq %%mm4,%%mm3 \n\t" \
  350. "pmuludq %%mm1,%%mm0 \n\t" \
  351. "movd 4(%2),%%mm5 \n\t" \
  352. "paddq %%mm0,%%mm3 \n\t" \
  353. "movd 4(%1),%%mm6 \n\t" \
  354. "movd %%mm3,0(%0) \n\t" \
  355. "psrlq $32, %%mm3 \n\t" \
  356. \
  357. "paddq %%mm6,%%mm3 \n\t" \
  358. "pmuludq %%mm1,%%mm5 \n\t" \
  359. "movd 8(%2),%%mm6 \n\t" \
  360. "paddq %%mm5,%%mm3 \n\t" \
  361. "movd 8(%1),%%mm7 \n\t" \
  362. "movd %%mm3,4(%0) \n\t" \
  363. "psrlq $32, %%mm3 \n\t" \
  364. \
  365. "paddq %%mm7,%%mm3 \n\t" \
  366. "pmuludq %%mm1,%%mm6 \n\t" \
  367. "movd 12(%2),%%mm7 \n\t" \
  368. "paddq %%mm6,%%mm3 \n\t" \
  369. "movd 12(%1),%%mm5 \n\t" \
  370. "movd %%mm3,8(%0) \n\t" \
  371. "psrlq $32, %%mm3 \n\t" \
  372. \
  373. "paddq %%mm5,%%mm3 \n\t" \
  374. "pmuludq %%mm1,%%mm7 \n\t" \
  375. "movd 16(%2),%%mm5 \n\t" \
  376. "paddq %%mm7,%%mm3 \n\t" \
  377. "movd 16(%1),%%mm6 \n\t" \
  378. "movd %%mm3,12(%0) \n\t" \
  379. "psrlq $32, %%mm3 \n\t" \
  380. \
  381. "paddq %%mm6,%%mm3 \n\t" \
  382. "pmuludq %%mm1,%%mm5 \n\t" \
  383. "movd 20(%2),%%mm6 \n\t" \
  384. "paddq %%mm5,%%mm3 \n\t" \
  385. "movd 20(%1),%%mm7 \n\t" \
  386. "movd %%mm3,16(%0) \n\t" \
  387. "psrlq $32, %%mm3 \n\t" \
  388. \
  389. "paddq %%mm7,%%mm3 \n\t" \
  390. "pmuludq %%mm1,%%mm6 \n\t" \
  391. "movd 24(%2),%%mm7 \n\t" \
  392. "paddq %%mm6,%%mm3 \n\t" \
  393. "movd 24(%1),%%mm5 \n\t" \
  394. "movd %%mm3,20(%0) \n\t" \
  395. "psrlq $32, %%mm3 \n\t" \
  396. \
  397. "paddq %%mm5,%%mm3 \n\t" \
  398. "pmuludq %%mm1,%%mm7 \n\t" \
  399. "movd 28(%2),%%mm5 \n\t" \
  400. "paddq %%mm7,%%mm3 \n\t" \
  401. "movd 28(%1),%%mm6 \n\t" \
  402. "movd %%mm3,24(%0) \n\t" \
  403. "psrlq $32, %%mm3 \n\t" \
  404. \
  405. "paddq %%mm6,%%mm3 \n\t" \
  406. "pmuludq %%mm1,%%mm5 \n\t" \
  407. "paddq %%mm5,%%mm3 \n\t" \
  408. "movd %%mm3,28(%0) \n\t" \
  409. "psrlq $32, %%mm3 \n\t" \
  410. :"=r"(_c) : "0"(_c), "r"(tmpm) );
  411. /* TAO switched tmpm from "g" to "r" after gcc tried to index the indexed stack
  412. pointer */
  413. #define LOOP_END \
  414. __asm__( "movd %%mm3,%0 \n" :"=r"(cy))
  415. #define PROPCARRY \
  416. __asm__( \
  417. "addl %1,%0 \n\t" \
  418. "setb %%al \n\t" \
  419. "movzbl %%al,%1 \n\t" \
  420. :"=g"(_c[LO]), "=r"(cy) \
  421. :"0"(_c[LO]), "1"(cy) \
  422. : "%eax", "cc")
  423. /******************************************************************/
  424. #elif defined(TFM_ARM)
  425. /* ARMv4 code */
  426. #define MONT_START
  427. #define MONT_FINI
  428. #define LOOP_END
  429. #define LOOP_START \
  430. mu = c[x] * mp
  431. #ifdef __thumb__
  432. #define INNERMUL \
  433. __asm__( \
  434. " LDR r0,%1 \n\t" \
  435. " ADDS r0,r0,%0 \n\t" \
  436. " ITE CS \n\t" \
  437. " MOVCS %0,#1 \n\t" \
  438. " MOVCC %0,#0 \n\t" \
  439. " UMLAL r0,%0,%3,%4 \n\t" \
  440. " STR r0,%1 \n\t" \
  441. :"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(*tmpm++),"m"(_c[0]):"r0","cc");
  442. #define PROPCARRY \
  443. __asm__( \
  444. " LDR r0,%1 \n\t" \
  445. " ADDS r0,r0,%0 \n\t" \
  446. " STR r0,%1 \n\t" \
  447. " ITE CS \n\t" \
  448. " MOVCS %0,#1 \n\t" \
  449. " MOVCC %0,#0 \n\t" \
  450. :"=r"(cy),"=m"(_c[0]):"0"(cy),"m"(_c[0]):"r0","cc");
  451. /* TAO thumb mode uses ite (if then else) to detect carry directly
  452. * fixed unmatched constraint warning by changing 1 to m */
  453. #else /* __thumb__ */
  454. #define INNERMUL \
  455. __asm__( \
  456. " LDR r0,%1 \n\t" \
  457. " ADDS r0,r0,%0 \n\t" \
  458. " MOVCS %0,#1 \n\t" \
  459. " MOVCC %0,#0 \n\t" \
  460. " UMLAL r0,%0,%3,%4 \n\t" \
  461. " STR r0,%1 \n\t" \
  462. :"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(*tmpm++),"1"(_c[0]):"r0","cc");
  463. #define PROPCARRY \
  464. __asm__( \
  465. " LDR r0,%1 \n\t" \
  466. " ADDS r0,r0,%0 \n\t" \
  467. " STR r0,%1 \n\t" \
  468. " MOVCS %0,#1 \n\t" \
  469. " MOVCC %0,#0 \n\t" \
  470. :"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r0","cc");
  471. #endif /* __thumb__ */
  472. #elif defined(TFM_PPC32)
  473. /* PPC32 */
  474. #define MONT_START
  475. #define MONT_FINI
  476. #define LOOP_END
  477. #define LOOP_START \
  478. mu = c[x] * mp
  479. #define INNERMUL \
  480. __asm__( \
  481. " mullw 16,%3,%4 \n\t" \
  482. " mulhwu 17,%3,%4 \n\t" \
  483. " addc 16,16,%2 \n\t" \
  484. " addze 17,17 \n\t" \
  485. " addc %1,16,%5 \n\t" \
  486. " addze %0,17 \n\t" \
  487. :"=r"(cy),"=r"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"16", "17", "cc"); ++tmpm;
  488. #define PROPCARRY \
  489. __asm__( \
  490. " addc %1,%3,%2 \n\t" \
  491. " xor %0,%2,%2 \n\t" \
  492. " addze %0,%2 \n\t" \
  493. :"=r"(cy),"=r"(_c[0]):"0"(cy),"1"(_c[0]):"cc");
  494. #elif defined(TFM_PPC64)
  495. /* PPC64 */
  496. #define MONT_START
  497. #define MONT_FINI
  498. #define LOOP_END
  499. #define LOOP_START \
  500. mu = c[x] * mp
  501. #define INNERMUL \
  502. __asm__( \
  503. " mulld r16,%3,%4 \n\t" \
  504. " mulhdu r17,%3,%4 \n\t" \
  505. " addc r16,16,%0 \n\t" \
  506. " addze r17,r17 \n\t" \
  507. " ldx r18,0,%1 \n\t" \
  508. " addc r16,r16,r18 \n\t" \
  509. " addze %0,r17 \n\t" \
  510. " sdx r16,0,%1 \n\t" \
  511. :"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"r16", "r17", "r18","cc"); ++tmpm;
  512. #define PROPCARRY \
  513. __asm__( \
  514. " ldx r16,0,%1 \n\t" \
  515. " addc r16,r16,%0 \n\t" \
  516. " sdx r16,0,%1 \n\t" \
  517. " xor %0,%0,%0 \n\t" \
  518. " addze %0,%0 \n\t" \
  519. :"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r16","cc");
  520. /******************************************************************/
  521. #elif defined(TFM_AVR32)
  522. /* AVR32 */
  523. #define MONT_START
  524. #define MONT_FINI
  525. #define LOOP_END
  526. #define LOOP_START \
  527. mu = c[x] * mp
  528. #define INNERMUL \
  529. __asm__( \
  530. " ld.w r2,%1 \n\t" \
  531. " add r2,%0 \n\t" \
  532. " eor r3,r3 \n\t" \
  533. " acr r3 \n\t" \
  534. " macu.d r2,%3,%4 \n\t" \
  535. " st.w %1,r2 \n\t" \
  536. " mov %0,r3 \n\t" \
  537. :"=r"(cy),"=r"(_c):"0"(cy),"r"(mu),"r"(*tmpm++),"1"(_c):"r2","r3");
  538. #define PROPCARRY \
  539. __asm__( \
  540. " ld.w r2,%1 \n\t" \
  541. " add r2,%0 \n\t" \
  542. " st.w %1,r2 \n\t" \
  543. " eor %0,%0 \n\t" \
  544. " acr %0 \n\t" \
  545. :"=r"(cy),"=r"(&_c[0]):"0"(cy),"1"(&_c[0]):"r2","cc");
  546. /******************************************************************/
  547. #elif defined(TFM_MIPS)
  548. /* MIPS */
  549. #define MONT_START
  550. #define MONT_FINI
  551. #define LOOP_END
  552. #define LOOP_START \
  553. mu = c[x] * mp
  554. #define INNERMUL \
  555. __asm__( \
  556. " multu %3,%4 \n\t" \
  557. " mflo $12 \n\t" \
  558. " mfhi $13 \n\t" \
  559. " addu $12,$12,%0 \n\t" \
  560. " sltu $10,$12,%0 \n\t" \
  561. " addu $13,$13,$10 \n\t" \
  562. " lw $10,%1 \n\t" \
  563. " addu $12,$12,$10 \n\t" \
  564. " sltu $10,$12,$10 \n\t" \
  565. " addu %0,$13,$10 \n\t" \
  566. " sw $12,%1 \n\t" \
  567. :"+r"(cy),"+m"(_c[0]):""(cy),"r"(mu),"r"(tmpm[0]),""(_c[0]):"$10","$12","$13"); ++tmpm;
  568. #define PROPCARRY \
  569. __asm__( \
  570. " lw $10,%1 \n\t" \
  571. " addu $10,$10,%0 \n\t" \
  572. " sw $10,%1 \n\t" \
  573. " sltu %0,$10,%0 \n\t" \
  574. :"+r"(cy),"+m"(_c[0]):""(cy),""(_c[0]):"$10");
  575. /******************************************************************/
  576. #else
  577. /* ISO C code */
  578. #define MONT_START
  579. #define MONT_FINI
  580. #define LOOP_END
  581. #define LOOP_START \
  582. mu = c[x] * mp
  583. #define INNERMUL \
  584. do { fp_word t; \
  585. t = ((fp_word)_c[0] + (fp_word)cy) + \
  586. (((fp_word)mu) * ((fp_word)*tmpm++)); \
  587. _c[0] = (fp_digit)t; \
  588. cy = (fp_digit)(t >> DIGIT_BIT); \
  589. } while (0)
  590. #define PROPCARRY \
  591. do { fp_digit t = _c[0] += cy; cy = (t < cy); } while (0)
  592. #endif
  593. /******************************************************************/
  594. #define LO 0
  595. /* end fp_montogomery_reduce.c asm */
  596. /* start fp_sqr_comba.c asm */
  597. #if defined(TFM_X86)
  598. /* x86-32 optimized */
  599. #define COMBA_START
  600. #define CLEAR_CARRY \
  601. c0 = c1 = c2 = 0;
  602. #define COMBA_STORE(x) \
  603. x = c0;
  604. #define COMBA_STORE2(x) \
  605. x = c1;
  606. #define CARRY_FORWARD \
  607. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  608. #define COMBA_FINI
  609. #define SQRADD(i, j) \
  610. __asm__( \
  611. "movl %3,%%eax \n\t" \
  612. "mull %%eax \n\t" \
  613. "addl %%eax,%0 \n\t" \
  614. "adcl %%edx,%1 \n\t" \
  615. "adcl $0,%2 \n\t" \
  616. :"+m"(c0), "+m"(c1), "+m"(c2) \
  617. : "m"(i) \
  618. :"%eax","%edx","cc");
  619. #define SQRADD2(i, j) \
  620. __asm__( \
  621. "movl %3,%%eax \n\t" \
  622. "mull %4 \n\t" \
  623. "addl %%eax,%0 \n\t" \
  624. "adcl %%edx,%1 \n\t" \
  625. "adcl $0,%2 \n\t" \
  626. "addl %%eax,%0 \n\t" \
  627. "adcl %%edx,%1 \n\t" \
  628. "adcl $0,%2 \n\t" \
  629. :"+m"(c0), "+m"(c1), "+m"(c2) \
  630. : "m"(i), "m"(j) \
  631. :"%eax","%edx", "cc");
  632. #define SQRADDSC(i, j) \
  633. __asm__( \
  634. "movl %3,%%eax \n\t" \
  635. "mull %4 \n\t" \
  636. "movl %%eax,%0 \n\t" \
  637. "movl %%edx,%1 \n\t" \
  638. "xorl %2,%2 \n\t" \
  639. :"=r"(sc0), "=r"(sc1), "=r"(sc2) \
  640. : "g"(i), "g"(j) \
  641. :"%eax","%edx","cc");
  642. #define SQRADDAC(i, j) \
  643. __asm__( \
  644. "movl %6,%%eax \n\t" \
  645. "mull %7 \n\t" \
  646. "addl %%eax,%0 \n\t" \
  647. "adcl %%edx,%1 \n\t" \
  648. "adcl $0,%2 \n\t" \
  649. :"=r"(sc0), "=r"(sc1), "=r"(sc2) \
  650. : "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) \
  651. :"%eax","%edx","cc");
  652. #define SQRADDDB \
  653. __asm__( \
  654. "addl %6,%0 \n\t" \
  655. "adcl %7,%1 \n\t" \
  656. "adcl %8,%2 \n\t" \
  657. "addl %6,%0 \n\t" \
  658. "adcl %7,%1 \n\t" \
  659. "adcl %8,%2 \n\t" \
  660. :"=r"(c0), "=r"(c1), "=r"(c2) \
  661. : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), \
  662. "r"(sc2) \
  663. : "cc");
  664. #elif defined(TFM_X86_64)
  665. /* x86-64 optimized */
  666. #define COMBA_START
  667. #define CLEAR_CARRY \
  668. c0 = c1 = c2 = 0;
  669. #define COMBA_STORE(x) \
  670. x = c0;
  671. #define COMBA_STORE2(x) \
  672. x = c1;
  673. #define CARRY_FORWARD \
  674. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  675. #define COMBA_FINI
  676. #define SQRADD(i, j) \
  677. __asm__( \
  678. "movq %6,%%rax \n\t" \
  679. "mulq %%rax \n\t" \
  680. "addq %%rax,%0 \n\t" \
  681. "adcq %%rdx,%1 \n\t" \
  682. "adcq $0,%2 \n\t" \
  683. :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "x"(i) :"%rax","%rdx","cc");
  684. #define SQRADD2(i, j) \
  685. __asm__( \
  686. "movq %6,%%rax \n\t" \
  687. "mulq %7 \n\t" \
  688. "addq %%rax,%0 \n\t" \
  689. "adcq %%rdx,%1 \n\t" \
  690. "adcq $0,%2 \n\t" \
  691. "addq %%rax,%0 \n\t" \
  692. "adcq %%rdx,%1 \n\t" \
  693. "adcq $0,%2 \n\t" \
  694. :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","cc");
  695. #define SQRADDSC(i, j) \
  696. __asm__( \
  697. "movq %3,%%rax \n\t" \
  698. "mulq %4 \n\t" \
  699. "movq %%rax,%0 \n\t" \
  700. "movq %%rdx,%1 \n\t" \
  701. "xorq %2,%2 \n\t" \
  702. :"=r"(sc0), "=r"(sc1), "=r"(sc2): "g"(i), "g"(j) :"%rax","%rdx","cc");
  703. #define SQRADDAC(i, j) \
  704. __asm__( \
  705. "movq %6,%%rax \n\t" \
  706. "mulq %7 \n\t" \
  707. "addq %%rax,%0 \n\t" \
  708. "adcq %%rdx,%1 \n\t" \
  709. "adcq $0,%2 \n\t" \
  710. :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%rax","%rdx","cc");
  711. #define SQRADDDB \
  712. __asm__( \
  713. "addq %6,%0 \n\t" \
  714. "adcq %7,%1 \n\t" \
  715. "adcq %8,%2 \n\t" \
  716. "addq %6,%0 \n\t" \
  717. "adcq %7,%1 \n\t" \
  718. "adcq %8,%2 \n\t" \
  719. :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "cc");
  720. #elif defined(TFM_SSE2)
  721. /* SSE2 Optimized */
  722. #define COMBA_START
  723. #define CLEAR_CARRY \
  724. c0 = c1 = c2 = 0;
  725. #define COMBA_STORE(x) \
  726. x = c0;
  727. #define COMBA_STORE2(x) \
  728. x = c1;
  729. #define CARRY_FORWARD \
  730. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  731. #define COMBA_FINI \
  732. __asm__("emms");
  733. #define SQRADD(i, j) \
  734. __asm__( \
  735. "movd %6,%%mm0 \n\t" \
  736. "pmuludq %%mm0,%%mm0\n\t" \
  737. "movd %%mm0,%%eax \n\t" \
  738. "psrlq $32,%%mm0 \n\t" \
  739. "addl %%eax,%0 \n\t" \
  740. "movd %%mm0,%%eax \n\t" \
  741. "adcl %%eax,%1 \n\t" \
  742. "adcl $0,%2 \n\t" \
  743. :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","cc");
  744. #define SQRADD2(i, j) \
  745. __asm__( \
  746. "movd %6,%%mm0 \n\t" \
  747. "movd %7,%%mm1 \n\t" \
  748. "pmuludq %%mm1,%%mm0\n\t" \
  749. "movd %%mm0,%%eax \n\t" \
  750. "psrlq $32,%%mm0 \n\t" \
  751. "movd %%mm0,%%edx \n\t" \
  752. "addl %%eax,%0 \n\t" \
  753. "adcl %%edx,%1 \n\t" \
  754. "adcl $0,%2 \n\t" \
  755. "addl %%eax,%0 \n\t" \
  756. "adcl %%edx,%1 \n\t" \
  757. "adcl $0,%2 \n\t" \
  758. :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","cc");
  759. #define SQRADDSC(i, j) \
  760. __asm__( \
  761. "movd %3,%%mm0 \n\t" \
  762. "movd %4,%%mm1 \n\t" \
  763. "pmuludq %%mm1,%%mm0\n\t" \
  764. "movd %%mm0,%0 \n\t" \
  765. "psrlq $32,%%mm0 \n\t" \
  766. "movd %%mm0,%1 \n\t" \
  767. "xorl %2,%2 \n\t" \
  768. :"=r"(sc0), "=r"(sc1), "=r"(sc2): "m"(i), "m"(j));
  769. /* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */
  770. #define SQRADDAC(i, j) \
  771. __asm__( \
  772. "movd %6,%%mm0 \n\t" \
  773. "movd %7,%%mm1 \n\t" \
  774. "pmuludq %%mm1,%%mm0\n\t" \
  775. "movd %%mm0,%%eax \n\t" \
  776. "psrlq $32,%%mm0 \n\t" \
  777. "movd %%mm0,%%edx \n\t" \
  778. "addl %%eax,%0 \n\t" \
  779. "adcl %%edx,%1 \n\t" \
  780. "adcl $0,%2 \n\t" \
  781. :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "m"(i), "m"(j) :"%eax","%edx","cc");
  782. #define SQRADDDB \
  783. __asm__( \
  784. "addl %6,%0 \n\t" \
  785. "adcl %7,%1 \n\t" \
  786. "adcl %8,%2 \n\t" \
  787. "addl %6,%0 \n\t" \
  788. "adcl %7,%1 \n\t" \
  789. "adcl %8,%2 \n\t" \
  790. :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "cc");
  791. #elif defined(TFM_ARM)
  792. /* ARM code */
  793. #define COMBA_START
  794. #define CLEAR_CARRY \
  795. c0 = c1 = c2 = 0;
  796. #define COMBA_STORE(x) \
  797. x = c0;
  798. #define COMBA_STORE2(x) \
  799. x = c1;
  800. #define CARRY_FORWARD \
  801. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  802. #define COMBA_FINI
  803. /* multiplies point i and j, updates carry "c1" and digit c2 */
  804. #define SQRADD(i, j) \
  805. __asm__( \
  806. " UMULL r0,r1,%6,%6 \n\t" \
  807. " ADDS %0,%0,r0 \n\t" \
  808. " ADCS %1,%1,r1 \n\t" \
  809. " ADC %2,%2,#0 \n\t" \
  810. :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i) : "r0", "r1", "cc");
  811. /* for squaring some of the terms are doubled... */
  812. #define SQRADD2(i, j) \
  813. __asm__( \
  814. " UMULL r0,r1,%6,%7 \n\t" \
  815. " ADDS %0,%0,r0 \n\t" \
  816. " ADCS %1,%1,r1 \n\t" \
  817. " ADC %2,%2,#0 \n\t" \
  818. " ADDS %0,%0,r0 \n\t" \
  819. " ADCS %1,%1,r1 \n\t" \
  820. " ADC %2,%2,#0 \n\t" \
  821. :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "cc");
  822. #define SQRADDSC(i, j) \
  823. __asm__( \
  824. " UMULL %0,%1,%3,%4 \n\t" \
  825. " SUB %2,%2,%2 \n\t" \
  826. :"=r"(sc0), "=r"(sc1), "=r"(sc2) : "r"(i), "r"(j) : "cc");
  827. /* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */
  828. #define SQRADDAC(i, j) \
  829. __asm__( \
  830. " UMULL r0,r1,%6,%7 \n\t" \
  831. " ADDS %0,%0,r0 \n\t" \
  832. " ADCS %1,%1,r1 \n\t" \
  833. " ADC %2,%2,#0 \n\t" \
  834. :"=r"(sc0), "=r"(sc1), "=r"(sc2) : "0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j) : "r0", "r1", "cc");
  835. #define SQRADDDB \
  836. __asm__( \
  837. " ADDS %0,%0,%3 \n\t" \
  838. " ADCS %1,%1,%4 \n\t" \
  839. " ADC %2,%2,%5 \n\t" \
  840. " ADDS %0,%0,%3 \n\t" \
  841. " ADCS %1,%1,%4 \n\t" \
  842. " ADC %2,%2,%5 \n\t" \
  843. :"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc");
  844. #elif defined(TFM_PPC32)
  845. /* PPC32 */
  846. #define COMBA_START
  847. #define CLEAR_CARRY \
  848. c0 = c1 = c2 = 0;
  849. #define COMBA_STORE(x) \
  850. x = c0;
  851. #define COMBA_STORE2(x) \
  852. x = c1;
  853. #define CARRY_FORWARD \
  854. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  855. #define COMBA_FINI
  856. /* multiplies point i and j, updates carry "c1" and digit c2 */
  857. #define SQRADD(i, j) \
  858. __asm__( \
  859. " mullw 16,%6,%6 \n\t" \
  860. " addc %0,%0,16 \n\t" \
  861. " mulhwu 16,%6,%6 \n\t" \
  862. " adde %1,%1,16 \n\t" \
  863. " addze %2,%2 \n\t" \
  864. :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"16","cc");
  865. /* for squaring some of the terms are doubled... */
  866. #define SQRADD2(i, j) \
  867. __asm__( \
  868. " mullw 16,%6,%7 \n\t" \
  869. " mulhwu 17,%6,%7 \n\t" \
  870. " addc %0,%0,16 \n\t" \
  871. " adde %1,%1,17 \n\t" \
  872. " addze %2,%2 \n\t" \
  873. " addc %0,%0,16 \n\t" \
  874. " adde %1,%1,17 \n\t" \
  875. " addze %2,%2 \n\t" \
  876. :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16", "17","cc");
  877. #define SQRADDSC(i, j) \
  878. __asm__( \
  879. " mullw %0,%6,%7 \n\t" \
  880. " mulhwu %1,%6,%7 \n\t" \
  881. " xor %2,%2,%2 \n\t" \
  882. :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc");
  883. #define SQRADDAC(i, j) \
  884. __asm__( \
  885. " mullw 16,%6,%7 \n\t" \
  886. " addc %0,%0,16 \n\t" \
  887. " mulhwu 16,%6,%7 \n\t" \
  888. " adde %1,%1,16 \n\t" \
  889. " addze %2,%2 \n\t" \
  890. :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"16", "cc");
  891. #define SQRADDDB \
  892. __asm__( \
  893. " addc %0,%0,%3 \n\t" \
  894. " adde %1,%1,%4 \n\t" \
  895. " adde %2,%2,%5 \n\t" \
  896. " addc %0,%0,%3 \n\t" \
  897. " adde %1,%1,%4 \n\t" \
  898. " adde %2,%2,%5 \n\t" \
  899. :"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc");
  900. #elif defined(TFM_PPC64)
  901. /* PPC64 */
  902. #define COMBA_START
  903. #define CLEAR_CARRY \
  904. c0 = c1 = c2 = 0;
  905. #define COMBA_STORE(x) \
  906. x = c0;
  907. #define COMBA_STORE2(x) \
  908. x = c1;
  909. #define CARRY_FORWARD \
  910. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  911. #define COMBA_FINI
  912. /* multiplies point i and j, updates carry "c1" and digit c2 */
  913. #define SQRADD(i, j) \
  914. __asm__( \
  915. " mulld r16,%6,%6 \n\t" \
  916. " addc %0,%0,r16 \n\t" \
  917. " mulhdu r16,%6,%6 \n\t" \
  918. " adde %1,%1,r16 \n\t" \
  919. " addze %2,%2 \n\t" \
  920. :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"r16","cc");
  921. /* for squaring some of the terms are doubled... */
  922. #define SQRADD2(i, j) \
  923. __asm__( \
  924. " mulld r16,%6,%7 \n\t" \
  925. " mulhdu r17,%6,%7 \n\t" \
  926. " addc %0,%0,r16 \n\t" \
  927. " adde %1,%1,r17 \n\t" \
  928. " addze %2,%2 \n\t" \
  929. " addc %0,%0,r16 \n\t" \
  930. " adde %1,%1,r17 \n\t" \
  931. " addze %2,%2 \n\t" \
  932. :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r16", "r17","cc");
  933. #define SQRADDSC(i, j) \
  934. __asm__( \
  935. " mulld %0,%6,%7 \n\t" \
  936. " mulhdu %1,%6,%7 \n\t" \
  937. " xor %2,%2,%2 \n\t" \
  938. :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc");
  939. #define SQRADDAC(i, j) \
  940. __asm__( \
  941. " mulld r16,%6,%7 \n\t" \
  942. " addc %0,%0,r16 \n\t" \
  943. " mulhdu r16,%6,%7 \n\t" \
  944. " adde %1,%1,r16 \n\t" \
  945. " addze %2,%2 \n\t" \
  946. :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"r16", "cc");
  947. #define SQRADDDB \
  948. __asm__( \
  949. " addc %0,%0,%3 \n\t" \
  950. " adde %1,%1,%4 \n\t" \
  951. " adde %2,%2,%5 \n\t" \
  952. " addc %0,%0,%3 \n\t" \
  953. " adde %1,%1,%4 \n\t" \
  954. " adde %2,%2,%5 \n\t" \
  955. :"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc");
  956. #elif defined(TFM_AVR32)
  957. /* AVR32 */
  958. #define COMBA_START
  959. #define CLEAR_CARRY \
  960. c0 = c1 = c2 = 0;
  961. #define COMBA_STORE(x) \
  962. x = c0;
  963. #define COMBA_STORE2(x) \
  964. x = c1;
  965. #define CARRY_FORWARD \
  966. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  967. #define COMBA_FINI
  968. /* multiplies point i and j, updates carry "c1" and digit c2 */
  969. #define SQRADD(i, j) \
  970. __asm__( \
  971. " mulu.d r2,%6,%6 \n\t" \
  972. " add %0,%0,r2 \n\t" \
  973. " adc %1,%1,r3 \n\t" \
  974. " acr %2 \n\t" \
  975. :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"r2","r3");
  976. /* for squaring some of the terms are doubled... */
  977. #define SQRADD2(i, j) \
  978. __asm__( \
  979. " mulu.d r2,%6,%7 \n\t" \
  980. " add %0,%0,r2 \n\t" \
  981. " adc %1,%1,r3 \n\t" \
  982. " acr %2, \n\t" \
  983. " add %0,%0,r2 \n\t" \
  984. " adc %1,%1,r3 \n\t" \
  985. " acr %2, \n\t" \
  986. :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r2", "r3");
  987. #define SQRADDSC(i, j) \
  988. __asm__( \
  989. " mulu.d r2,%6,%7 \n\t" \
  990. " mov %0,r2 \n\t" \
  991. " mov %1,r3 \n\t" \
  992. " eor %2,%2 \n\t" \
  993. :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "r2", "r3");
  994. #define SQRADDAC(i, j) \
  995. __asm__( \
  996. " mulu.d r2,%6,%7 \n\t" \
  997. " add %0,%0,r2 \n\t" \
  998. " adc %1,%1,r3 \n\t" \
  999. " acr %2 \n\t" \
  1000. :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"r2", "r3");
  1001. #define SQRADDDB \
  1002. __asm__( \
  1003. " add %0,%0,%3 \n\t" \
  1004. " adc %1,%1,%4 \n\t" \
  1005. " adc %2,%2,%5 \n\t" \
  1006. " add %0,%0,%3 \n\t" \
  1007. " adc %1,%1,%4 \n\t" \
  1008. " adc %2,%2,%5 \n\t" \
  1009. :"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc");
  1010. #elif defined(TFM_MIPS)
  1011. /* MIPS */
  1012. #define COMBA_START
  1013. #define CLEAR_CARRY \
  1014. c0 = c1 = c2 = 0;
  1015. #define COMBA_STORE(x) \
  1016. x = c0;
  1017. #define COMBA_STORE2(x) \
  1018. x = c1;
  1019. #define CARRY_FORWARD \
  1020. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  1021. #define COMBA_FINI
  1022. /* multiplies point i and j, updates carry "c1" and digit c2 */
  1023. #define SQRADD(i, j) \
  1024. __asm__( \
  1025. " multu %6,%6 \n\t" \
  1026. " mflo $12 \n\t" \
  1027. " mfhi $13 \n\t" \
  1028. " addu %0,%0,$12 \n\t" \
  1029. " sltu $12,%0,$12 \n\t" \
  1030. " addu %1,%1,$13 \n\t" \
  1031. " sltu $13,%1,$13 \n\t" \
  1032. " addu %1,%1,$12 \n\t" \
  1033. " sltu $12,%1,$12 \n\t" \
  1034. " addu %2,%2,$13 \n\t" \
  1035. " addu %2,%2,$12 \n\t" \
  1036. :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"$12","$13");
  1037. /* for squaring some of the terms are doubled... */
  1038. #define SQRADD2(i, j) \
  1039. __asm__( \
  1040. " multu %6,%7 \n\t" \
  1041. " mflo $12 \n\t" \
  1042. " mfhi $13 \n\t" \
  1043. \
  1044. " addu %0,%0,$12 \n\t" \
  1045. " sltu $14,%0,$12 \n\t" \
  1046. " addu %1,%1,$13 \n\t" \
  1047. " sltu $15,%1,$13 \n\t" \
  1048. " addu %1,%1,$14 \n\t" \
  1049. " sltu $14,%1,$14 \n\t" \
  1050. " addu %2,%2,$15 \n\t" \
  1051. " addu %2,%2,$14 \n\t" \
  1052. \
  1053. " addu %0,%0,$12 \n\t" \
  1054. " sltu $14,%0,$12 \n\t" \
  1055. " addu %1,%1,$13 \n\t" \
  1056. " sltu $15,%1,$13 \n\t" \
  1057. " addu %1,%1,$14 \n\t" \
  1058. " sltu $14,%1,$14 \n\t" \
  1059. " addu %2,%2,$15 \n\t" \
  1060. " addu %2,%2,$14 \n\t" \
  1061. :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"$12", "$13", "$14", "$15");
  1062. #define SQRADDSC(i, j) \
  1063. __asm__( \
  1064. " multu %6,%7 \n\t" \
  1065. " mflo %0 \n\t" \
  1066. " mfhi %1 \n\t" \
  1067. " xor %2,%2,%2 \n\t" \
  1068. :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc");
  1069. #define SQRADDAC(i, j) \
  1070. __asm__( \
  1071. " multu %6,%7 \n\t" \
  1072. " mflo $12 \n\t" \
  1073. " mfhi $13 \n\t" \
  1074. " addu %0,%0,$12 \n\t" \
  1075. " sltu $12,%0,$12 \n\t" \
  1076. " addu %1,%1,$13 \n\t" \
  1077. " sltu $13,%1,$13 \n\t" \
  1078. " addu %1,%1,$12 \n\t" \
  1079. " sltu $12,%1,$12 \n\t" \
  1080. " addu %2,%2,$13 \n\t" \
  1081. " addu %2,%2,$12 \n\t" \
  1082. :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"$12", "$13", "$14");
  1083. #define SQRADDDB \
  1084. __asm__( \
  1085. " addu %0,%0,%3 \n\t" \
  1086. " sltu $10,%0,%3 \n\t" \
  1087. " addu %1,%1,$10 \n\t" \
  1088. " sltu $10,%1,$10 \n\t" \
  1089. " addu %1,%1,%4 \n\t" \
  1090. " sltu $11,%1,%4 \n\t" \
  1091. " addu %2,%2,$10 \n\t" \
  1092. " addu %2,%2,$11 \n\t" \
  1093. " addu %2,%2,%5 \n\t" \
  1094. \
  1095. " addu %0,%0,%3 \n\t" \
  1096. " sltu $10,%0,%3 \n\t" \
  1097. " addu %1,%1,$10 \n\t" \
  1098. " sltu $10,%1,$10 \n\t" \
  1099. " addu %1,%1,%4 \n\t" \
  1100. " sltu $11,%1,%4 \n\t" \
  1101. " addu %2,%2,$10 \n\t" \
  1102. " addu %2,%2,$11 \n\t" \
  1103. " addu %2,%2,%5 \n\t" \
  1104. :"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "$10", "$11");
  1105. #else
  1106. #define TFM_ISO
  1107. /* ISO C portable code */
  1108. #define COMBA_START
  1109. #define CLEAR_CARRY \
  1110. c0 = c1 = c2 = 0;
  1111. #define COMBA_STORE(x) \
  1112. x = c0;
  1113. #define COMBA_STORE2(x) \
  1114. x = c1;
  1115. #define CARRY_FORWARD \
  1116. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  1117. #define COMBA_FINI
  1118. /* multiplies point i and j, updates carry "c1" and digit c2 */
  1119. #define SQRADD(i, j) \
  1120. do { fp_word t; \
  1121. t = c0 + ((fp_word)i) * ((fp_word)j); c0 = (fp_digit)t; \
  1122. t = c1 + (t >> DIGIT_BIT); c1 = (fp_digit)t; \
  1123. c2 +=(fp_digit) (t >> DIGIT_BIT); \
  1124. } while (0);
  1125. /* for squaring some of the terms are doubled... */
  1126. #define SQRADD2(i, j) \
  1127. do { fp_word t; \
  1128. t = ((fp_word)i) * ((fp_word)j); \
  1129. tt = (fp_word)c0 + t; c0 = (fp_digit)tt; \
  1130. tt = (fp_word)c1 + (tt >> DIGIT_BIT); c1 = (fp_digit)tt; \
  1131. c2 +=(fp_digit)(tt >> DIGIT_BIT); \
  1132. tt = (fp_word)c0 + t; c0 = (fp_digit)tt; \
  1133. tt = (fp_word)c1 + (tt >> DIGIT_BIT); c1 = (fp_digit)tt; \
  1134. c2 +=(fp_digit)(tt >> DIGIT_BIT); \
  1135. } while (0);
  1136. #define SQRADDSC(i, j) \
  1137. do { fp_word t; \
  1138. t = ((fp_word)i) * ((fp_word)j); \
  1139. sc0 = (fp_digit)t; sc1 = (t >> DIGIT_BIT); sc2 = 0; \
  1140. } while (0);
  1141. #define SQRADDAC(i, j) \
  1142. do { fp_word t; \
  1143. t = sc0 + ((fp_word)i) * ((fp_word)j); sc0 = (fp_digit)t; \
  1144. t = sc1 + (t >> DIGIT_BIT); sc1 = (fp_digit)t; \
  1145. sc2 += (fp_digit)(t >> DIGIT_BIT); \
  1146. } while (0);
  1147. #define SQRADDDB \
  1148. do { fp_word t; \
  1149. t = ((fp_word)sc0) + ((fp_word)sc0) + c0; c0 = (fp_digit)t; \
  1150. t = ((fp_word)sc1) + ((fp_word)sc1) + c1 + (t >> DIGIT_BIT); \
  1151. c1 = (fp_digit)t; \
  1152. c2 = c2 + (fp_digit)(((fp_word)sc2) + ((fp_word)sc2) + (t >> DIGIT_BIT)); \
  1153. } while (0);
  1154. #endif
  1155. #ifdef TFM_SMALL_SET
  1156. #include "fp_sqr_comba_small_set.i"
  1157. #endif
  1158. #if defined(TFM_SQR3) && FP_SIZE >= 6
  1159. #include "fp_sqr_comba_3.i"
  1160. #endif
  1161. #if defined(TFM_SQR4) && FP_SIZE >= 8
  1162. #include "fp_sqr_comba_4.i"
  1163. #endif
  1164. #if defined(TFM_SQR6) && FP_SIZE >= 12
  1165. #include "fp_sqr_comba_6.i"
  1166. #endif
  1167. #if defined(TFM_SQR7) && FP_SIZE >= 14
  1168. #include "fp_sqr_comba_7.i"
  1169. #endif
  1170. #if defined(TFM_SQR8) && FP_SIZE >= 16
  1171. #include "fp_sqr_comba_8.i"
  1172. #endif
  1173. #if defined(TFM_SQR9) && FP_SIZE >= 18
  1174. #include "fp_sqr_comba_9.i"
  1175. #endif
  1176. #if defined(TFM_SQR12) && FP_SIZE >= 24
  1177. #include "fp_sqr_comba_12.i"
  1178. #endif
  1179. #if defined(TFM_SQR17) && FP_SIZE >= 34
  1180. #include "fp_sqr_comba_17.i"
  1181. #endif
  1182. #if defined(TFM_SQR20) && FP_SIZE >= 40
  1183. #include "fp_sqr_comba_20.i"
  1184. #endif
  1185. #if defined(TFM_SQR24) && FP_SIZE >= 48
  1186. #include "fp_sqr_comba_24.i"
  1187. #endif
  1188. #if defined(TFM_SQR28) && FP_SIZE >= 56
  1189. #include "fp_sqr_comba_28.i"
  1190. #endif
  1191. #if defined(TFM_SQR32) && FP_SIZE >= 64
  1192. #include "fp_sqr_comba_32.i"
  1193. #endif
  1194. #if defined(TFM_SQR48) && FP_SIZE >= 96
  1195. #include "fp_sqr_comba_48.i"
  1196. #endif
  1197. #if defined(TFM_SQR64) && FP_SIZE >= 128
  1198. #include "fp_sqr_comba_64.i"
  1199. #endif
  1200. /* end fp_sqr_comba.c asm */
  1201. /* start fp_mul_comba.c asm */
  1202. /* these are the combas. Worship them. */
  1203. #if defined(TFM_X86)
  1204. /* Generic x86 optimized code */
  1205. /* anything you need at the start */
  1206. #define COMBA_START
  1207. /* clear the chaining variables */
  1208. #define COMBA_CLEAR \
  1209. c0 = c1 = c2 = 0;
  1210. /* forward the carry to the next digit */
  1211. #define COMBA_FORWARD \
  1212. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  1213. /* store the first sum */
  1214. #define COMBA_STORE(x) \
  1215. x = c0;
  1216. /* store the second sum [carry] */
  1217. #define COMBA_STORE2(x) \
  1218. x = c1;
  1219. /* anything you need at the end */
  1220. #define COMBA_FINI
  1221. /* this should multiply i and j */
  1222. #define MULADD(i, j) \
  1223. __asm__( \
  1224. "movl %6,%%eax \n\t" \
  1225. "mull %7 \n\t" \
  1226. "addl %%eax,%0 \n\t" \
  1227. "adcl %%edx,%1 \n\t" \
  1228. "adcl $0,%2 \n\t" \
  1229. :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","cc");
  1230. #elif defined(TFM_X86_64)
  1231. /* x86-64 optimized */
  1232. /* anything you need at the start */
  1233. #define COMBA_START
  1234. /* clear the chaining variables */
  1235. #define COMBA_CLEAR \
  1236. c0 = c1 = c2 = 0;
  1237. /* forward the carry to the next digit */
  1238. #define COMBA_FORWARD \
  1239. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  1240. /* store the first sum */
  1241. #define COMBA_STORE(x) \
  1242. x = c0;
  1243. /* store the second sum [carry] */
  1244. #define COMBA_STORE2(x) \
  1245. x = c1;
  1246. /* anything you need at the end */
  1247. #define COMBA_FINI
  1248. /* this should multiply i and j */
  1249. #define MULADD(i, j) \
  1250. __asm__ ( \
  1251. "movq %6,%%rax \n\t" \
  1252. "mulq %7 \n\t" \
  1253. "addq %%rax,%0 \n\t" \
  1254. "adcq %%rdx,%1 \n\t" \
  1255. "adcq $0,%2 \n\t" \
  1256. :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","cc");
  1257. #if defined(HAVE_INTEL_MULX)
  1258. #define MULADD_BODY(a,b,carry,c) \
  1259. __asm__ volatile( \
  1260. "movq %[a0],%%rdx\n\t" \
  1261. "xorq %%rcx, %%rcx\n\t" \
  1262. "movq 0(%[cp]),%%r8\n\t" \
  1263. "movq 8(%[cp]),%%r9\n\t" \
  1264. "movq 16(%[cp]),%%r10\n\t" \
  1265. "movq 24(%[cp]),%%r11\n\t" \
  1266. \
  1267. "mulx (%[bp]),%%rax, %%rbx\n\t" \
  1268. "adcxq %[ca], %%r8\n\t" \
  1269. "adoxq %%rax, %%r8\n\t" \
  1270. "mulx 8(%[bp]),%%rax, %%rcx\n\t" \
  1271. "adcxq %%rbx, %%r9\n\t" \
  1272. "adoxq %%rax, %%r9\n\t" \
  1273. "mulx 16(%[bp]),%%rax, %%rbx\n\t" \
  1274. "adcxq %%rcx, %%r10\n\t" \
  1275. "adoxq %%rax, %%r10\n\t" \
  1276. "mulx 24(%[bp]),%%rax, %%rcx\n\t" \
  1277. "adcxq %%rbx, %%r11\n\t" \
  1278. "mov $0, %[ca]\n\t" \
  1279. "adoxq %%rax, %%r11\n\t" \
  1280. "adcxq %%rcx, %[ca]\n\t" \
  1281. "mov $0, %%rdx\n\t" \
  1282. "adoxq %%rdx, %[ca]\n\t" \
  1283. \
  1284. "movq %%r8, 0(%[cp])\n\t" \
  1285. "movq %%r9, 8(%[cp])\n\t" \
  1286. "movq %%r10, 16(%[cp])\n\t" \
  1287. "movq %%r11, 24(%[cp])\n\t" \
  1288. : [ca] "+r" (carry) \
  1289. : [a0] "r" (a->dp[ix]), [bp] "r" (&(b->dp[iy])), \
  1290. [cp] "r" (&(c->dp[iz])) \
  1291. : "%r8", "%r9", "%r10", "%r11", \
  1292. "%rdx", "%rax", "%rcx", "%rbx" \
  1293. )
  1294. #define TFM_INTEL_MUL_COMBA(a, b, ca, c) \
  1295. for (iz=0; iz<pa; iz++) c->dp[iz] = 0; \
  1296. for (ix=0; ix<a->used; ix++) { \
  1297. ca = 0; \
  1298. for (iy=0; iy<b->used; iy+=4) { \
  1299. iz = ix + iy; \
  1300. MULADD_BODY(a, b, ca, c); \
  1301. } \
  1302. c->dp[ix + iy] = ca; \
  1303. }
  1304. #endif
  1305. #elif defined(TFM_SSE2)
  1306. /* use SSE2 optimizations */
  1307. /* anything you need at the start */
  1308. #define COMBA_START
  1309. /* clear the chaining variables */
  1310. #define COMBA_CLEAR \
  1311. c0 = c1 = c2 = 0;
  1312. /* forward the carry to the next digit */
  1313. #define COMBA_FORWARD \
  1314. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  1315. /* store the first sum */
  1316. #define COMBA_STORE(x) \
  1317. x = c0;
  1318. /* store the second sum [carry] */
  1319. #define COMBA_STORE2(x) \
  1320. x = c1;
  1321. /* anything you need at the end */
  1322. #define COMBA_FINI \
  1323. __asm__("emms");
  1324. /* this should multiply i and j */
  1325. #define MULADD(i, j) \
  1326. __asm__( \
  1327. "movd %6,%%mm0 \n\t" \
  1328. "movd %7,%%mm1 \n\t" \
  1329. "pmuludq %%mm1,%%mm0\n\t" \
  1330. "movd %%mm0,%%eax \n\t" \
  1331. "psrlq $32,%%mm0 \n\t" \
  1332. "addl %%eax,%0 \n\t" \
  1333. "movd %%mm0,%%eax \n\t" \
  1334. "adcl %%eax,%1 \n\t" \
  1335. "adcl $0,%2 \n\t" \
  1336. :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","cc");
  1337. #elif defined(TFM_ARM)
  1338. /* ARM code */
  1339. #define COMBA_START
  1340. #define COMBA_CLEAR \
  1341. c0 = c1 = c2 = 0;
  1342. #define COMBA_FORWARD \
  1343. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  1344. #define COMBA_STORE(x) \
  1345. x = c0;
  1346. #define COMBA_STORE2(x) \
  1347. x = c1;
  1348. #define COMBA_FINI
  1349. #define MULADD(i, j) \
  1350. __asm__( \
  1351. " UMULL r0,r1,%6,%7 \n\t" \
  1352. " ADDS %0,%0,r0 \n\t" \
  1353. " ADCS %1,%1,r1 \n\t" \
  1354. " ADC %2,%2,#0 \n\t" \
  1355. :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "cc");
  1356. #elif defined(TFM_PPC32)
  1357. /* For 32-bit PPC */
  1358. #define COMBA_START
  1359. #define COMBA_CLEAR \
  1360. c0 = c1 = c2 = 0;
  1361. #define COMBA_FORWARD \
  1362. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  1363. #define COMBA_STORE(x) \
  1364. x = c0;
  1365. #define COMBA_STORE2(x) \
  1366. x = c1;
  1367. #define COMBA_FINI
  1368. /* untested: will mulhwu change the flags? Docs say no */
  1369. #define MULADD(i, j) \
  1370. __asm__( \
  1371. " mullw 16,%6,%7 \n\t" \
  1372. " addc %0,%0,16 \n\t" \
  1373. " mulhwu 16,%6,%7 \n\t" \
  1374. " adde %1,%1,16 \n\t" \
  1375. " addze %2,%2 \n\t" \
  1376. :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16");
  1377. #elif defined(TFM_PPC64)
  1378. /* For 64-bit PPC */
  1379. #define COMBA_START
  1380. #define COMBA_CLEAR \
  1381. c0 = c1 = c2 = 0;
  1382. #define COMBA_FORWARD \
  1383. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  1384. #define COMBA_STORE(x) \
  1385. x = c0;
  1386. #define COMBA_STORE2(x) \
  1387. x = c1;
  1388. #define COMBA_FINI
  1389. /* untested: will mulhdu change the flags? Docs say no */
  1390. #define MULADD(i, j) \
  1391. ____asm__( \
  1392. " mulld r16,%6,%7 \n\t" \
  1393. " addc %0,%0,16 \n\t" \
  1394. " mulhdu r16,%6,%7 \n\t" \
  1395. " adde %1,%1,16 \n\t" \
  1396. " addze %2,%2 \n\t" \
  1397. :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r16");
  1398. #elif defined(TFM_AVR32)
  1399. /* ISO C code */
  1400. #define COMBA_START
  1401. #define COMBA_CLEAR \
  1402. c0 = c1 = c2 = 0;
  1403. #define COMBA_FORWARD \
  1404. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  1405. #define COMBA_STORE(x) \
  1406. x = c0;
  1407. #define COMBA_STORE2(x) \
  1408. x = c1;
  1409. #define COMBA_FINI
  1410. #define MULADD(i, j) \
  1411. ____asm__( \
  1412. " mulu.d r2,%6,%7 \n\t"\
  1413. " add %0,r2 \n\t"\
  1414. " adc %1,%1,r3 \n\t"\
  1415. " acr %2 \n\t"\
  1416. :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r2","r3");
  1417. #elif defined(TFM_MIPS)
  1418. /* MIPS */
  1419. #define COMBA_START
  1420. #define COMBA_CLEAR \
  1421. c0 = c1 = c2 = 0;
  1422. #define COMBA_FORWARD \
  1423. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  1424. #define COMBA_STORE(x) \
  1425. x = c0;
  1426. #define COMBA_STORE2(x) \
  1427. x = c1;
  1428. #define COMBA_FINI
  1429. #define MULADD(i, j) \
  1430. __asm__( \
  1431. " multu %6,%7 \n\t" \
  1432. " mflo $12 \n\t" \
  1433. " mfhi $13 \n\t" \
  1434. " addu %0,%0,$12 \n\t" \
  1435. " sltu $12,%0,$12 \n\t" \
  1436. " addu %1,%1,$13 \n\t" \
  1437. " sltu $13,%1,$13 \n\t" \
  1438. " addu %1,%1,$12 \n\t" \
  1439. " sltu $12,%1,$12 \n\t" \
  1440. " addu %2,%2,$13 \n\t" \
  1441. " addu %2,%2,$12 \n\t" \
  1442. :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"$12","$13");
  1443. #else
  1444. /* ISO C code */
  1445. #define COMBA_START
  1446. #define COMBA_CLEAR \
  1447. c0 = c1 = c2 = 0;
  1448. #define COMBA_FORWARD \
  1449. do { c0 = c1; c1 = c2; c2 = 0; } while (0);
  1450. #define COMBA_STORE(x) \
  1451. x = c0;
  1452. #define COMBA_STORE2(x) \
  1453. x = c1;
  1454. #define COMBA_FINI
  1455. #define MULADD(i, j) \
  1456. do { fp_word t; \
  1457. t = (fp_word)c0 + ((fp_word)i) * ((fp_word)j); \
  1458. c0 = (fp_digit)t; \
  1459. t = (fp_word)c1 + (t >> DIGIT_BIT); \
  1460. c1 = (fp_digit)t; \
  1461. c2 += (fp_digit)(t >> DIGIT_BIT); \
  1462. } while (0);
  1463. #endif
  1464. #ifdef TFM_SMALL_SET
  1465. #include "fp_mul_comba_small_set.i"
  1466. #endif
  1467. #if defined(TFM_MUL3) && FP_SIZE >= 6
  1468. #include "fp_mul_comba_3.i"
  1469. #endif
  1470. #if defined(TFM_MUL4) && FP_SIZE >= 8
  1471. #include "fp_mul_comba_4.i"
  1472. #endif
  1473. #if defined(TFM_MUL6) && FP_SIZE >= 12
  1474. #include "fp_mul_comba_6.i"
  1475. #endif
  1476. #if defined(TFM_MUL7) && FP_SIZE >= 14
  1477. #include "fp_mul_comba_7.i"
  1478. #endif
  1479. #if defined(TFM_MUL8) && FP_SIZE >= 16
  1480. #include "fp_mul_comba_8.i"
  1481. #endif
  1482. #if defined(TFM_MUL9) && FP_SIZE >= 18
  1483. #include "fp_mul_comba_9.i"
  1484. #endif
  1485. #if defined(TFM_MUL12) && FP_SIZE >= 24
  1486. #include "fp_mul_comba_12.i"
  1487. #endif
  1488. #if defined(TFM_MUL17) && FP_SIZE >= 34
  1489. #include "fp_mul_comba_17.i"
  1490. #endif
  1491. #if defined(TFM_MUL20) && FP_SIZE >= 40
  1492. #include "fp_mul_comba_20.i"
  1493. #endif
  1494. #if defined(TFM_MUL24) && FP_SIZE >= 48
  1495. #include "fp_mul_comba_24.i"
  1496. #endif
  1497. #if defined(TFM_MUL28) && FP_SIZE >= 56
  1498. #include "fp_mul_comba_28.i"
  1499. #endif
  1500. #if defined(TFM_MUL32) && FP_SIZE >= 64
  1501. #include "fp_mul_comba_32.i"
  1502. #endif
  1503. #if defined(TFM_MUL48) && FP_SIZE >= 96
  1504. #include "fp_mul_comba_48.i"
  1505. #endif
  1506. #if defined(TFM_MUL64) && FP_SIZE >= 128
  1507. #include "fp_mul_comba_64.i"
  1508. #endif
  1509. /* end fp_mul_comba.c asm */