12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787 |
- /* asm.c
- *
- * Copyright (C) 2006-2023 wolfSSL Inc.
- *
- * This file is part of wolfSSL.
- *
- * wolfSSL is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * wolfSSL is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
- */
- #ifdef HAVE_CONFIG_H
- #include <config.h>
- #endif
- #include <wolfssl/wolfcrypt/settings.h>
- /*
- * Based on public domain TomsFastMath 0.10 by Tom St Denis, tomstdenis@iahu.ca,
- * http://math.libtomcrypt.com
- */
- /******************************************************************/
- /* fp_montgomery_reduce.c asm or generic */
- /* Each platform needs to query info type 1 from cpuid to see if aesni is
- * supported. Also, let's setup a macro for proper linkage w/o ABI conflicts
- */
- #if defined(HAVE_INTEL_MULX)
- #ifndef _MSC_VER
- #define cpuid(reg, leaf, sub)\
- __asm__ __volatile__ ("cpuid":\
- "=a" (reg[0]), "=b" (reg[1]), "=c" (reg[2]), "=d" (reg[3]) :\
- "a" (leaf), "c"(sub));
- #else
- #include <intrin.h>
- #define cpuid(a,b,c) __cpuidex((int*)a,b,c)
- #endif /* _MSC_VER */
- #define EAX 0
- #define EBX 1
- #define ECX 2
- #define EDX 3
- #define CPUID_AVX1 0x1
- #define CPUID_AVX2 0x2
- #define CPUID_RDRAND 0x4
- #define CPUID_RDSEED 0x8
- #define CPUID_BMI2 0x10 /* MULX, RORX */
- #define CPUID_ADX 0x20 /* ADCX, ADOX */
- #define IS_INTEL_AVX1 (cpuid_flags&CPUID_AVX1)
- #define IS_INTEL_AVX2 (cpuid_flags&CPUID_AVX2)
- #define IS_INTEL_BMI2 (cpuid_flags&CPUID_BMI2)
- #define IS_INTEL_ADX (cpuid_flags&CPUID_ADX)
- #define IS_INTEL_RDRAND (cpuid_flags&CPUID_RDRAND)
- #define IS_INTEL_RDSEED (cpuid_flags&CPUID_RDSEED)
- #define SET_FLAGS
- static word32 cpuid_check = 0 ;
- static word32 cpuid_flags = 0 ;
- static word32 cpuid_flag(word32 leaf, word32 sub, word32 num, word32 bit) {
- int got_intel_cpu = 0;
- int got_amd_cpu = 0;
- unsigned int reg[5];
- reg[4] = '\0' ;
- cpuid(reg, 0, 0);
- /* check for intel cpu */
- if( memcmp((char *)&(reg[EBX]), "Genu", 4) == 0 &&
- memcmp((char *)&(reg[EDX]), "ineI", 4) == 0 &&
- memcmp((char *)&(reg[ECX]), "ntel", 4) == 0) {
- got_intel_cpu = 1;
- }
- /* check for AMD cpu */
- if( memcmp((char *)&(reg[EBX]), "Auth", 4) == 0 &&
- memcmp((char *)&(reg[EDX]), "enti", 4) == 0 &&
- memcmp((char *)&(reg[ECX]), "cAMD", 4) == 0) {
- got_amd_cpu = 1;
- }
- if (got_intel_cpu || got_amd_cpu) {
- cpuid(reg, leaf, sub);
- return((reg[num]>>bit)&0x1) ;
- }
- return 0 ;
- }
- WC_INLINE static int set_cpuid_flags(void) {
- if(cpuid_check == 0) {
- if(cpuid_flag(7, 0, EBX, 8)){ cpuid_flags |= CPUID_BMI2 ; }
- if(cpuid_flag(7, 0, EBX,19)){ cpuid_flags |= CPUID_ADX ; }
- cpuid_check = 1 ;
- return 0 ;
- }
- return 1 ;
- }
- #define RETURN return
- #define IF_HAVE_INTEL_MULX(func, ret) \
- if(cpuid_check==0)set_cpuid_flags() ; \
- if(IS_INTEL_BMI2 && IS_INTEL_ADX){ func; ret ; }
- #else
- #define IF_HAVE_INTEL_MULX(func, ret) WC_DO_NOTHING
- #endif
- #if defined(TFM_X86) && !defined(TFM_SSE2)
- /* x86-32 code */
- #define MONT_START
- #define MONT_FINI
- #define LOOP_END
- #define LOOP_START \
- mu = c[x] * mp
- #define INNERMUL \
- __asm__( \
- "movl %5,%%eax \n\t" \
- "mull %4 \n\t" \
- "addl %1,%%eax \n\t" \
- "adcl $0,%%edx \n\t" \
- "addl %%eax,%0 \n\t" \
- "adcl $0,%%edx \n\t" \
- "movl %%edx,%1 \n\t" \
- :"=g"(_c[LO]), "=r"(cy) \
- :"0"(_c[LO]), "1"(cy), "r"(mu), "r"(*tmpm++) \
- : "%eax", "%edx", "cc")
- #define PROPCARRY \
- __asm__( \
- "addl %1,%0 \n\t" \
- "setb %%al \n\t" \
- "movzbl %%al,%1 \n\t" \
- :"=g"(_c[LO]), "=r"(cy) \
- :"0"(_c[LO]), "1"(cy) \
- : "%eax", "cc")
- /******************************************************************/
- #elif defined(TFM_X86_64)
- /* x86-64 code */
- #define MONT_START
- #define MONT_FINI
- #define LOOP_END
- #define LOOP_START \
- mu = c[x] * mp
- #define INNERMUL \
- __asm__( \
- "movq %5,%%rax \n\t" \
- "mulq %4 \n\t" \
- "addq %1,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "addq %%rax,%0 \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq %%rdx,%1 \n\t" \
- :"=g"(_c[LO]), "=r"(cy) \
- :"0"(_c[LO]), "1"(cy), "r"(mu), "r"(*tmpm++) \
- : "%rax", "%rdx", "cc")
- #if defined(HAVE_INTEL_MULX)
- #define MULX_INNERMUL8(x,y,z,cy) \
- __asm__ volatile ( \
- "movq %[yn], %%rdx\n\t" \
- "xorq %%rcx, %%rcx\n\t" \
- "movq 0(%[c]), %%r8\n\t" \
- "movq 8(%[c]), %%r9\n\t" \
- "movq 16(%[c]), %%r10\n\t" \
- "movq 24(%[c]), %%r11\n\t" \
- "movq 32(%[c]), %%r12\n\t" \
- "movq 40(%[c]), %%r13\n\t" \
- "movq 48(%[c]), %%r14\n\t" \
- "movq 56(%[c]), %%r15\n\t" \
- \
- "mulx 0(%[xp]), %%rax, %%rcx\n\t" \
- "adcxq %[cy], %%r8\n\t" \
- "adoxq %%rax, %%r8\n\t" \
- "mulx 8(%[xp]), %%rax, %[cy]\n\t" \
- "adcxq %%rcx, %%r9\n\t" \
- "adoxq %%rax, %%r9\n\t" \
- "mulx 16(%[xp]), %%rax, %%rcx\n\t" \
- "adcxq %[cy], %%r10\n\t" \
- "adoxq %%rax, %%r10\n\t" \
- "mulx 24(%[xp]), %%rax, %[cy]\n\t" \
- "adcxq %%rcx, %%r11\n\t" \
- "adoxq %%rax, %%r11\n\t" \
- "mulx 32(%[xp]), %%rax, %%rcx\n\t" \
- "adcxq %[cy], %%r12\n\t" \
- "adoxq %%rax, %%r12\n\t" \
- "mulx 40(%[xp]), %%rax, %[cy]\n\t" \
- "adcxq %%rcx, %%r13\n\t" \
- "adoxq %%rax, %%r13\n\t" \
- "mulx 48(%[xp]), %%rax, %%rcx\n\t" \
- "adcxq %[cy], %%r14\n\t" \
- "adoxq %%rax, %%r14\n\t" \
- "adcxq %%rcx, %%r15\n\t" \
- "mulx 56(%[xp]), %%rax, %[cy]\n\t" \
- "movq $0, %%rdx\n\t" \
- "adoxq %%rdx, %%rax\n\t" \
- "adcxq %%rdx, %[cy]\n\t" \
- "adoxq %%rdx, %[cy]\n\t" \
- "addq %%rax, %%r15\n\t" \
- "adcq $0, %[cy]\n\t" \
- \
- "movq %%r8, 0(%[c])\n\t" \
- "movq %%r9, 8(%[c])\n\t" \
- "movq %%r10, 16(%[c])\n\t" \
- "movq %%r11, 24(%[c])\n\t" \
- "movq %%r12, 32(%[c])\n\t" \
- "movq %%r13, 40(%[c])\n\t" \
- "movq %%r14, 48(%[c])\n\t" \
- "movq %%r15, 56(%[c])\n\t" \
- : [cy] "+r" (cy) \
- : [xp] "r" (x), [c] "r" (c_mulx), [yn] "rm" (y) \
- :"%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", \
- "%rdx", "%rax", "%rcx" \
- )
- #define INNERMUL8_MULX \
- {\
- MULX_INNERMUL8(tmpm, mu, _c, cy);\
- }
- #endif
- #define INNERMUL8 \
- __asm__( \
- "movq 0(%5),%%rax \n\t" \
- "movq 0(%2),%%r10 \n\t" \
- "movq 0x8(%5),%%r11 \n\t" \
- "mulq %4 \n\t" \
- "addq %%r10,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq 0x8(%2),%%r10 \n\t" \
- "addq %3,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq %%rax,0(%0) \n\t" \
- "movq %%rdx,%1 \n\t" \
- \
- "movq %%r11,%%rax \n\t" \
- "movq 0x10(%5),%%r11 \n\t" \
- "mulq %4 \n\t" \
- "addq %%r10,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq 0x10(%2),%%r10 \n\t" \
- "addq %3,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq %%rax,0x8(%0) \n\t" \
- "movq %%rdx,%1 \n\t" \
- \
- "movq %%r11,%%rax \n\t" \
- "movq 0x18(%5),%%r11 \n\t" \
- "mulq %4 \n\t" \
- "addq %%r10,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq 0x18(%2),%%r10 \n\t" \
- "addq %3,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq %%rax,0x10(%0) \n\t" \
- "movq %%rdx,%1 \n\t" \
- \
- "movq %%r11,%%rax \n\t" \
- "movq 0x20(%5),%%r11 \n\t" \
- "mulq %4 \n\t" \
- "addq %%r10,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq 0x20(%2),%%r10 \n\t" \
- "addq %3,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq %%rax,0x18(%0) \n\t" \
- "movq %%rdx,%1 \n\t" \
- \
- "movq %%r11,%%rax \n\t" \
- "movq 0x28(%5),%%r11 \n\t" \
- "mulq %4 \n\t" \
- "addq %%r10,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq 0x28(%2),%%r10 \n\t" \
- "addq %3,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq %%rax,0x20(%0) \n\t" \
- "movq %%rdx,%1 \n\t" \
- \
- "movq %%r11,%%rax \n\t" \
- "movq 0x30(%5),%%r11 \n\t" \
- "mulq %4 \n\t" \
- "addq %%r10,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq 0x30(%2),%%r10 \n\t" \
- "addq %3,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq %%rax,0x28(%0) \n\t" \
- "movq %%rdx,%1 \n\t" \
- \
- "movq %%r11,%%rax \n\t" \
- "movq 0x38(%5),%%r11 \n\t" \
- "mulq %4 \n\t" \
- "addq %%r10,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq 0x38(%2),%%r10 \n\t" \
- "addq %3,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq %%rax,0x30(%0) \n\t" \
- "movq %%rdx,%1 \n\t" \
- \
- "movq %%r11,%%rax \n\t" \
- "mulq %4 \n\t" \
- "addq %%r10,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "addq %3,%%rax \n\t" \
- "adcq $0,%%rdx \n\t" \
- "movq %%rax,0x38(%0) \n\t" \
- "movq %%rdx,%1 \n\t" \
- \
- :"=r"(_c), "=r"(cy) \
- : "0"(_c), "1"(cy), "g"(mu), "r"(tmpm)\
- : "%rax", "%rdx", "%r10", "%r11", "cc")
- #define PROPCARRY \
- __asm__( \
- "addq %1,%0 \n\t" \
- "setb %%al \n\t" \
- "movzbq %%al,%1 \n\t" \
- :"=g"(_c[LO]), "=r"(cy) \
- :"0"(_c[LO]), "1"(cy) \
- : "%rax", "cc")
- /******************************************************************/
- #elif defined(TFM_SSE2)
- /* SSE2 code (assumes 32-bit fp_digits) */
- /* XMM register assignments:
- * xmm0 *tmpm++, then Mu * (*tmpm++)
- * xmm1 c[x], then Mu
- * xmm2 mp
- * xmm3 cy
- * xmm4 _c[LO]
- */
- #define MONT_START \
- __asm__("movd %0,%%mm2"::"g"(mp))
- #define MONT_FINI \
- __asm__("emms")
- #define LOOP_START \
- __asm__( \
- "movd %0,%%mm1 \n\t" \
- "pxor %%mm3,%%mm3 \n\t" \
- "pmuludq %%mm2,%%mm1 \n\t" \
- :: "g"(c[x]))
- /* pmuludq on mmx registers does a 32x32->64 multiply. */
- #define INNERMUL \
- __asm__( \
- "movd %1,%%mm4 \n\t" \
- "movd %2,%%mm0 \n\t" \
- "paddq %%mm4,%%mm3 \n\t" \
- "pmuludq %%mm1,%%mm0 \n\t" \
- "paddq %%mm0,%%mm3 \n\t" \
- "movd %%mm3,%0 \n\t" \
- "psrlq $32, %%mm3 \n\t" \
- :"=g"(_c[LO]) : "0"(_c[LO]), "g"(*tmpm++) );
- #define INNERMUL8 \
- __asm__( \
- "movd 0(%1),%%mm4 \n\t" \
- "movd 0(%2),%%mm0 \n\t" \
- "paddq %%mm4,%%mm3 \n\t" \
- "pmuludq %%mm1,%%mm0 \n\t" \
- "movd 4(%2),%%mm5 \n\t" \
- "paddq %%mm0,%%mm3 \n\t" \
- "movd 4(%1),%%mm6 \n\t" \
- "movd %%mm3,0(%0) \n\t" \
- "psrlq $32, %%mm3 \n\t" \
- \
- "paddq %%mm6,%%mm3 \n\t" \
- "pmuludq %%mm1,%%mm5 \n\t" \
- "movd 8(%2),%%mm6 \n\t" \
- "paddq %%mm5,%%mm3 \n\t" \
- "movd 8(%1),%%mm7 \n\t" \
- "movd %%mm3,4(%0) \n\t" \
- "psrlq $32, %%mm3 \n\t" \
- \
- "paddq %%mm7,%%mm3 \n\t" \
- "pmuludq %%mm1,%%mm6 \n\t" \
- "movd 12(%2),%%mm7 \n\t" \
- "paddq %%mm6,%%mm3 \n\t" \
- "movd 12(%1),%%mm5 \n\t" \
- "movd %%mm3,8(%0) \n\t" \
- "psrlq $32, %%mm3 \n\t" \
- \
- "paddq %%mm5,%%mm3 \n\t" \
- "pmuludq %%mm1,%%mm7 \n\t" \
- "movd 16(%2),%%mm5 \n\t" \
- "paddq %%mm7,%%mm3 \n\t" \
- "movd 16(%1),%%mm6 \n\t" \
- "movd %%mm3,12(%0) \n\t" \
- "psrlq $32, %%mm3 \n\t" \
- \
- "paddq %%mm6,%%mm3 \n\t" \
- "pmuludq %%mm1,%%mm5 \n\t" \
- "movd 20(%2),%%mm6 \n\t" \
- "paddq %%mm5,%%mm3 \n\t" \
- "movd 20(%1),%%mm7 \n\t" \
- "movd %%mm3,16(%0) \n\t" \
- "psrlq $32, %%mm3 \n\t" \
- \
- "paddq %%mm7,%%mm3 \n\t" \
- "pmuludq %%mm1,%%mm6 \n\t" \
- "movd 24(%2),%%mm7 \n\t" \
- "paddq %%mm6,%%mm3 \n\t" \
- "movd 24(%1),%%mm5 \n\t" \
- "movd %%mm3,20(%0) \n\t" \
- "psrlq $32, %%mm3 \n\t" \
- \
- "paddq %%mm5,%%mm3 \n\t" \
- "pmuludq %%mm1,%%mm7 \n\t" \
- "movd 28(%2),%%mm5 \n\t" \
- "paddq %%mm7,%%mm3 \n\t" \
- "movd 28(%1),%%mm6 \n\t" \
- "movd %%mm3,24(%0) \n\t" \
- "psrlq $32, %%mm3 \n\t" \
- \
- "paddq %%mm6,%%mm3 \n\t" \
- "pmuludq %%mm1,%%mm5 \n\t" \
- "paddq %%mm5,%%mm3 \n\t" \
- "movd %%mm3,28(%0) \n\t" \
- "psrlq $32, %%mm3 \n\t" \
- :"=r"(_c) : "0"(_c), "r"(tmpm) );
- /* TAO switched tmpm from "g" to "r" after gcc tried to index the indexed stack
- pointer */
- #define LOOP_END \
- __asm__( "movd %%mm3,%0 \n" :"=r"(cy))
- #define PROPCARRY \
- __asm__( \
- "addl %1,%0 \n\t" \
- "setb %%al \n\t" \
- "movzbl %%al,%1 \n\t" \
- :"=g"(_c[LO]), "=r"(cy) \
- :"0"(_c[LO]), "1"(cy) \
- : "%eax", "cc")
- /******************************************************************/
- #elif defined(TFM_ARM)
- /* ARMv4 code */
- #define MONT_START
- #define MONT_FINI
- #define LOOP_END
- #define LOOP_START \
- mu = c[x] * mp
- #ifdef __thumb__
- #define INNERMUL \
- __asm__( \
- " LDR r0,%1 \n\t" \
- " ADDS r0,r0,%0 \n\t" \
- " ITE CS \n\t" \
- " MOVCS %0,#1 \n\t" \
- " MOVCC %0,#0 \n\t" \
- " UMLAL r0,%0,%3,%4 \n\t" \
- " STR r0,%1 \n\t" \
- :"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(*tmpm++),"m"(_c[0]):"r0","cc");
- #define PROPCARRY \
- __asm__( \
- " LDR r0,%1 \n\t" \
- " ADDS r0,r0,%0 \n\t" \
- " STR r0,%1 \n\t" \
- " ITE CS \n\t" \
- " MOVCS %0,#1 \n\t" \
- " MOVCC %0,#0 \n\t" \
- :"=r"(cy),"=m"(_c[0]):"0"(cy),"m"(_c[0]):"r0","cc");
- /* TAO thumb mode uses ite (if then else) to detect carry directly
- * fixed unmatched constraint warning by changing 1 to m */
- #else /* __thumb__ */
- #define INNERMUL \
- __asm__( \
- " LDR r0,%1 \n\t" \
- " ADDS r0,r0,%0 \n\t" \
- " MOVCS %0,#1 \n\t" \
- " MOVCC %0,#0 \n\t" \
- " UMLAL r0,%0,%3,%4 \n\t" \
- " STR r0,%1 \n\t" \
- :"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(*tmpm++),"1"(_c[0]):"r0","cc");
- #define PROPCARRY \
- __asm__( \
- " LDR r0,%1 \n\t" \
- " ADDS r0,r0,%0 \n\t" \
- " STR r0,%1 \n\t" \
- " MOVCS %0,#1 \n\t" \
- " MOVCC %0,#0 \n\t" \
- :"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r0","cc");
- #endif /* __thumb__ */
- #elif defined(TFM_PPC32)
- /* PPC32 */
- #define MONT_START
- #define MONT_FINI
- #define LOOP_END
- #define LOOP_START \
- mu = c[x] * mp
- #define INNERMUL \
- __asm__( \
- " mullw 16,%3,%4 \n\t" \
- " mulhwu 17,%3,%4 \n\t" \
- " addc 16,16,%2 \n\t" \
- " addze 17,17 \n\t" \
- " addc %1,16,%5 \n\t" \
- " addze %0,17 \n\t" \
- :"=r"(cy),"=r"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"16", "17", "cc"); ++tmpm;
- #define PROPCARRY \
- __asm__( \
- " addc %1,%3,%2 \n\t" \
- " xor %0,%2,%2 \n\t" \
- " addze %0,%2 \n\t" \
- :"=r"(cy),"=r"(_c[0]):"0"(cy),"1"(_c[0]):"cc");
- #elif defined(TFM_PPC64)
- /* PPC64 */
- #define MONT_START
- #define MONT_FINI
- #define LOOP_END
- #define LOOP_START \
- mu = c[x] * mp
- #define INNERMUL \
- __asm__( \
- " mulld r16,%3,%4 \n\t" \
- " mulhdu r17,%3,%4 \n\t" \
- " addc r16,16,%0 \n\t" \
- " addze r17,r17 \n\t" \
- " ldx r18,0,%1 \n\t" \
- " addc r16,r16,r18 \n\t" \
- " addze %0,r17 \n\t" \
- " sdx r16,0,%1 \n\t" \
- :"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"r16", "r17", "r18","cc"); ++tmpm;
- #define PROPCARRY \
- __asm__( \
- " ldx r16,0,%1 \n\t" \
- " addc r16,r16,%0 \n\t" \
- " sdx r16,0,%1 \n\t" \
- " xor %0,%0,%0 \n\t" \
- " addze %0,%0 \n\t" \
- :"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r16","cc");
- /******************************************************************/
- #elif defined(TFM_AVR32)
- /* AVR32 */
- #define MONT_START
- #define MONT_FINI
- #define LOOP_END
- #define LOOP_START \
- mu = c[x] * mp
- #define INNERMUL \
- __asm__( \
- " ld.w r2,%1 \n\t" \
- " add r2,%0 \n\t" \
- " eor r3,r3 \n\t" \
- " acr r3 \n\t" \
- " macu.d r2,%3,%4 \n\t" \
- " st.w %1,r2 \n\t" \
- " mov %0,r3 \n\t" \
- :"=r"(cy),"=r"(_c):"0"(cy),"r"(mu),"r"(*tmpm++),"1"(_c):"r2","r3");
- #define PROPCARRY \
- __asm__( \
- " ld.w r2,%1 \n\t" \
- " add r2,%0 \n\t" \
- " st.w %1,r2 \n\t" \
- " eor %0,%0 \n\t" \
- " acr %0 \n\t" \
- :"=r"(cy),"=r"(&_c[0]):"0"(cy),"1"(&_c[0]):"r2","cc");
- /******************************************************************/
- #elif defined(TFM_MIPS)
- /* MIPS */
- #define MONT_START
- #define MONT_FINI
- #define LOOP_END
- #define LOOP_START \
- mu = c[x] * mp
- #define INNERMUL \
- __asm__( \
- " multu %3,%4 \n\t" \
- " mflo $12 \n\t" \
- " mfhi $13 \n\t" \
- " addu $12,$12,%0 \n\t" \
- " sltu $10,$12,%0 \n\t" \
- " addu $13,$13,$10 \n\t" \
- " lw $10,%1 \n\t" \
- " addu $12,$12,$10 \n\t" \
- " sltu $10,$12,$10 \n\t" \
- " addu %0,$13,$10 \n\t" \
- " sw $12,%1 \n\t" \
- :"+r"(cy),"+m"(_c[0]):""(cy),"r"(mu),"r"(tmpm[0]),""(_c[0]):"$10","$12","$13"); ++tmpm;
- #define PROPCARRY \
- __asm__( \
- " lw $10,%1 \n\t" \
- " addu $10,$10,%0 \n\t" \
- " sw $10,%1 \n\t" \
- " sltu %0,$10,%0 \n\t" \
- :"+r"(cy),"+m"(_c[0]):""(cy),""(_c[0]):"$10");
- /******************************************************************/
- #else
- /* ISO C code */
- #define MONT_START
- #define MONT_FINI
- #define LOOP_END
- #define LOOP_START \
- mu = c[x] * mp
- #define INNERMUL \
- do { fp_word t; \
- t = ((fp_word)_c[0] + (fp_word)cy) + \
- (((fp_word)mu) * ((fp_word)*tmpm++)); \
- _c[0] = (fp_digit)t; \
- cy = (fp_digit)(t >> DIGIT_BIT); \
- } while (0)
- #define PROPCARRY \
- do { fp_digit t = _c[0] += cy; cy = (t < cy); } while (0)
- #endif
- /******************************************************************/
- #define LO 0
- /* end fp_montogomery_reduce.c asm */
- /* start fp_sqr_comba.c asm */
- #if defined(TFM_X86)
- /* x86-32 optimized */
- #define COMBA_START
- #define CLEAR_CARRY \
- c0 = c1 = c2 = 0;
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define CARRY_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_FINI
- #define SQRADD(i, j) \
- __asm__( \
- "movl %3,%%eax \n\t" \
- "mull %%eax \n\t" \
- "addl %%eax,%0 \n\t" \
- "adcl %%edx,%1 \n\t" \
- "adcl $0,%2 \n\t" \
- :"+m"(c0), "+m"(c1), "+m"(c2) \
- : "m"(i) \
- :"%eax","%edx","cc");
- #define SQRADD2(i, j) \
- __asm__( \
- "movl %3,%%eax \n\t" \
- "mull %4 \n\t" \
- "addl %%eax,%0 \n\t" \
- "adcl %%edx,%1 \n\t" \
- "adcl $0,%2 \n\t" \
- "addl %%eax,%0 \n\t" \
- "adcl %%edx,%1 \n\t" \
- "adcl $0,%2 \n\t" \
- :"+m"(c0), "+m"(c1), "+m"(c2) \
- : "m"(i), "m"(j) \
- :"%eax","%edx", "cc");
- #define SQRADDSC(i, j) \
- __asm__( \
- "movl %3,%%eax \n\t" \
- "mull %4 \n\t" \
- "movl %%eax,%0 \n\t" \
- "movl %%edx,%1 \n\t" \
- "xorl %2,%2 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2) \
- : "g"(i), "g"(j) \
- :"%eax","%edx","cc");
- #define SQRADDAC(i, j) \
- __asm__( \
- "movl %6,%%eax \n\t" \
- "mull %7 \n\t" \
- "addl %%eax,%0 \n\t" \
- "adcl %%edx,%1 \n\t" \
- "adcl $0,%2 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2) \
- : "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) \
- :"%eax","%edx","cc");
- #define SQRADDDB \
- __asm__( \
- "addl %6,%0 \n\t" \
- "adcl %7,%1 \n\t" \
- "adcl %8,%2 \n\t" \
- "addl %6,%0 \n\t" \
- "adcl %7,%1 \n\t" \
- "adcl %8,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2) \
- : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), \
- "r"(sc2) \
- : "cc");
- #elif defined(TFM_X86_64)
- /* x86-64 optimized */
- #define COMBA_START
- #define CLEAR_CARRY \
- c0 = c1 = c2 = 0;
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define CARRY_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_FINI
- #define SQRADD(i, j) \
- __asm__( \
- "movq %6,%%rax \n\t" \
- "mulq %%rax \n\t" \
- "addq %%rax,%0 \n\t" \
- "adcq %%rdx,%1 \n\t" \
- "adcq $0,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "x"(i) :"%rax","%rdx","cc");
- #define SQRADD2(i, j) \
- __asm__( \
- "movq %6,%%rax \n\t" \
- "mulq %7 \n\t" \
- "addq %%rax,%0 \n\t" \
- "adcq %%rdx,%1 \n\t" \
- "adcq $0,%2 \n\t" \
- "addq %%rax,%0 \n\t" \
- "adcq %%rdx,%1 \n\t" \
- "adcq $0,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","cc");
- #define SQRADDSC(i, j) \
- __asm__( \
- "movq %3,%%rax \n\t" \
- "mulq %4 \n\t" \
- "movq %%rax,%0 \n\t" \
- "movq %%rdx,%1 \n\t" \
- "xorq %2,%2 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2): "g"(i), "g"(j) :"%rax","%rdx","cc");
- #define SQRADDAC(i, j) \
- __asm__( \
- "movq %6,%%rax \n\t" \
- "mulq %7 \n\t" \
- "addq %%rax,%0 \n\t" \
- "adcq %%rdx,%1 \n\t" \
- "adcq $0,%2 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%rax","%rdx","cc");
- #define SQRADDDB \
- __asm__( \
- "addq %6,%0 \n\t" \
- "adcq %7,%1 \n\t" \
- "adcq %8,%2 \n\t" \
- "addq %6,%0 \n\t" \
- "adcq %7,%1 \n\t" \
- "adcq %8,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "cc");
- #elif defined(TFM_SSE2)
- /* SSE2 Optimized */
- #define COMBA_START
- #define CLEAR_CARRY \
- c0 = c1 = c2 = 0;
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define CARRY_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_FINI \
- __asm__("emms");
- #define SQRADD(i, j) \
- __asm__( \
- "movd %6,%%mm0 \n\t" \
- "pmuludq %%mm0,%%mm0\n\t" \
- "movd %%mm0,%%eax \n\t" \
- "psrlq $32,%%mm0 \n\t" \
- "addl %%eax,%0 \n\t" \
- "movd %%mm0,%%eax \n\t" \
- "adcl %%eax,%1 \n\t" \
- "adcl $0,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","cc");
- #define SQRADD2(i, j) \
- __asm__( \
- "movd %6,%%mm0 \n\t" \
- "movd %7,%%mm1 \n\t" \
- "pmuludq %%mm1,%%mm0\n\t" \
- "movd %%mm0,%%eax \n\t" \
- "psrlq $32,%%mm0 \n\t" \
- "movd %%mm0,%%edx \n\t" \
- "addl %%eax,%0 \n\t" \
- "adcl %%edx,%1 \n\t" \
- "adcl $0,%2 \n\t" \
- "addl %%eax,%0 \n\t" \
- "adcl %%edx,%1 \n\t" \
- "adcl $0,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","cc");
- #define SQRADDSC(i, j) \
- __asm__( \
- "movd %3,%%mm0 \n\t" \
- "movd %4,%%mm1 \n\t" \
- "pmuludq %%mm1,%%mm0\n\t" \
- "movd %%mm0,%0 \n\t" \
- "psrlq $32,%%mm0 \n\t" \
- "movd %%mm0,%1 \n\t" \
- "xorl %2,%2 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2): "m"(i), "m"(j));
- /* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */
- #define SQRADDAC(i, j) \
- __asm__( \
- "movd %6,%%mm0 \n\t" \
- "movd %7,%%mm1 \n\t" \
- "pmuludq %%mm1,%%mm0\n\t" \
- "movd %%mm0,%%eax \n\t" \
- "psrlq $32,%%mm0 \n\t" \
- "movd %%mm0,%%edx \n\t" \
- "addl %%eax,%0 \n\t" \
- "adcl %%edx,%1 \n\t" \
- "adcl $0,%2 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "m"(i), "m"(j) :"%eax","%edx","cc");
- #define SQRADDDB \
- __asm__( \
- "addl %6,%0 \n\t" \
- "adcl %7,%1 \n\t" \
- "adcl %8,%2 \n\t" \
- "addl %6,%0 \n\t" \
- "adcl %7,%1 \n\t" \
- "adcl %8,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(sc0), "r"(sc1), "r"(sc2) : "cc");
- #elif defined(TFM_ARM)
- /* ARM code */
- #define COMBA_START
- #define CLEAR_CARRY \
- c0 = c1 = c2 = 0;
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define CARRY_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_FINI
- /* multiplies point i and j, updates carry "c1" and digit c2 */
- #define SQRADD(i, j) \
- __asm__( \
- " UMULL r0,r1,%6,%6 \n\t" \
- " ADDS %0,%0,r0 \n\t" \
- " ADCS %1,%1,r1 \n\t" \
- " ADC %2,%2,#0 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i) : "r0", "r1", "cc");
- /* for squaring some of the terms are doubled... */
- #define SQRADD2(i, j) \
- __asm__( \
- " UMULL r0,r1,%6,%7 \n\t" \
- " ADDS %0,%0,r0 \n\t" \
- " ADCS %1,%1,r1 \n\t" \
- " ADC %2,%2,#0 \n\t" \
- " ADDS %0,%0,r0 \n\t" \
- " ADCS %1,%1,r1 \n\t" \
- " ADC %2,%2,#0 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "cc");
- #define SQRADDSC(i, j) \
- __asm__( \
- " UMULL %0,%1,%3,%4 \n\t" \
- " SUB %2,%2,%2 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2) : "r"(i), "r"(j) : "cc");
- /* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */
- #define SQRADDAC(i, j) \
- __asm__( \
- " UMULL r0,r1,%6,%7 \n\t" \
- " ADDS %0,%0,r0 \n\t" \
- " ADCS %1,%1,r1 \n\t" \
- " ADC %2,%2,#0 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2) : "0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j) : "r0", "r1", "cc");
- #define SQRADDDB \
- __asm__( \
- " ADDS %0,%0,%3 \n\t" \
- " ADCS %1,%1,%4 \n\t" \
- " ADC %2,%2,%5 \n\t" \
- " ADDS %0,%0,%3 \n\t" \
- " ADCS %1,%1,%4 \n\t" \
- " ADC %2,%2,%5 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc");
- #elif defined(TFM_PPC32)
- /* PPC32 */
- #define COMBA_START
- #define CLEAR_CARRY \
- c0 = c1 = c2 = 0;
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define CARRY_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_FINI
- /* multiplies point i and j, updates carry "c1" and digit c2 */
- #define SQRADD(i, j) \
- __asm__( \
- " mullw 16,%6,%6 \n\t" \
- " addc %0,%0,16 \n\t" \
- " mulhwu 16,%6,%6 \n\t" \
- " adde %1,%1,16 \n\t" \
- " addze %2,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"16","cc");
- /* for squaring some of the terms are doubled... */
- #define SQRADD2(i, j) \
- __asm__( \
- " mullw 16,%6,%7 \n\t" \
- " mulhwu 17,%6,%7 \n\t" \
- " addc %0,%0,16 \n\t" \
- " adde %1,%1,17 \n\t" \
- " addze %2,%2 \n\t" \
- " addc %0,%0,16 \n\t" \
- " adde %1,%1,17 \n\t" \
- " addze %2,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16", "17","cc");
- #define SQRADDSC(i, j) \
- __asm__( \
- " mullw %0,%6,%7 \n\t" \
- " mulhwu %1,%6,%7 \n\t" \
- " xor %2,%2,%2 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc");
- #define SQRADDAC(i, j) \
- __asm__( \
- " mullw 16,%6,%7 \n\t" \
- " addc %0,%0,16 \n\t" \
- " mulhwu 16,%6,%7 \n\t" \
- " adde %1,%1,16 \n\t" \
- " addze %2,%2 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"16", "cc");
- #define SQRADDDB \
- __asm__( \
- " addc %0,%0,%3 \n\t" \
- " adde %1,%1,%4 \n\t" \
- " adde %2,%2,%5 \n\t" \
- " addc %0,%0,%3 \n\t" \
- " adde %1,%1,%4 \n\t" \
- " adde %2,%2,%5 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc");
- #elif defined(TFM_PPC64)
- /* PPC64 */
- #define COMBA_START
- #define CLEAR_CARRY \
- c0 = c1 = c2 = 0;
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define CARRY_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_FINI
- /* multiplies point i and j, updates carry "c1" and digit c2 */
- #define SQRADD(i, j) \
- __asm__( \
- " mulld r16,%6,%6 \n\t" \
- " addc %0,%0,r16 \n\t" \
- " mulhdu r16,%6,%6 \n\t" \
- " adde %1,%1,r16 \n\t" \
- " addze %2,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"r16","cc");
- /* for squaring some of the terms are doubled... */
- #define SQRADD2(i, j) \
- __asm__( \
- " mulld r16,%6,%7 \n\t" \
- " mulhdu r17,%6,%7 \n\t" \
- " addc %0,%0,r16 \n\t" \
- " adde %1,%1,r17 \n\t" \
- " addze %2,%2 \n\t" \
- " addc %0,%0,r16 \n\t" \
- " adde %1,%1,r17 \n\t" \
- " addze %2,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r16", "r17","cc");
- #define SQRADDSC(i, j) \
- __asm__( \
- " mulld %0,%6,%7 \n\t" \
- " mulhdu %1,%6,%7 \n\t" \
- " xor %2,%2,%2 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc");
- #define SQRADDAC(i, j) \
- __asm__( \
- " mulld r16,%6,%7 \n\t" \
- " addc %0,%0,r16 \n\t" \
- " mulhdu r16,%6,%7 \n\t" \
- " adde %1,%1,r16 \n\t" \
- " addze %2,%2 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"r16", "cc");
- #define SQRADDDB \
- __asm__( \
- " addc %0,%0,%3 \n\t" \
- " adde %1,%1,%4 \n\t" \
- " adde %2,%2,%5 \n\t" \
- " addc %0,%0,%3 \n\t" \
- " adde %1,%1,%4 \n\t" \
- " adde %2,%2,%5 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc");
- #elif defined(TFM_AVR32)
- /* AVR32 */
- #define COMBA_START
- #define CLEAR_CARRY \
- c0 = c1 = c2 = 0;
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define CARRY_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_FINI
- /* multiplies point i and j, updates carry "c1" and digit c2 */
- #define SQRADD(i, j) \
- __asm__( \
- " mulu.d r2,%6,%6 \n\t" \
- " add %0,%0,r2 \n\t" \
- " adc %1,%1,r3 \n\t" \
- " acr %2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"r2","r3");
- /* for squaring some of the terms are doubled... */
- #define SQRADD2(i, j) \
- __asm__( \
- " mulu.d r2,%6,%7 \n\t" \
- " add %0,%0,r2 \n\t" \
- " adc %1,%1,r3 \n\t" \
- " acr %2, \n\t" \
- " add %0,%0,r2 \n\t" \
- " adc %1,%1,r3 \n\t" \
- " acr %2, \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r2", "r3");
- #define SQRADDSC(i, j) \
- __asm__( \
- " mulu.d r2,%6,%7 \n\t" \
- " mov %0,r2 \n\t" \
- " mov %1,r3 \n\t" \
- " eor %2,%2 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "r2", "r3");
- #define SQRADDAC(i, j) \
- __asm__( \
- " mulu.d r2,%6,%7 \n\t" \
- " add %0,%0,r2 \n\t" \
- " adc %1,%1,r3 \n\t" \
- " acr %2 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"r2", "r3");
- #define SQRADDDB \
- __asm__( \
- " add %0,%0,%3 \n\t" \
- " adc %1,%1,%4 \n\t" \
- " adc %2,%2,%5 \n\t" \
- " add %0,%0,%3 \n\t" \
- " adc %1,%1,%4 \n\t" \
- " adc %2,%2,%5 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc");
- #elif defined(TFM_MIPS)
- /* MIPS */
- #define COMBA_START
- #define CLEAR_CARRY \
- c0 = c1 = c2 = 0;
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define CARRY_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_FINI
- /* multiplies point i and j, updates carry "c1" and digit c2 */
- #define SQRADD(i, j) \
- __asm__( \
- " multu %6,%6 \n\t" \
- " mflo $12 \n\t" \
- " mfhi $13 \n\t" \
- " addu %0,%0,$12 \n\t" \
- " sltu $12,%0,$12 \n\t" \
- " addu %1,%1,$13 \n\t" \
- " sltu $13,%1,$13 \n\t" \
- " addu %1,%1,$12 \n\t" \
- " sltu $12,%1,$12 \n\t" \
- " addu %2,%2,$13 \n\t" \
- " addu %2,%2,$12 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"$12","$13");
- /* for squaring some of the terms are doubled... */
- #define SQRADD2(i, j) \
- __asm__( \
- " multu %6,%7 \n\t" \
- " mflo $12 \n\t" \
- " mfhi $13 \n\t" \
- \
- " addu %0,%0,$12 \n\t" \
- " sltu $14,%0,$12 \n\t" \
- " addu %1,%1,$13 \n\t" \
- " sltu $15,%1,$13 \n\t" \
- " addu %1,%1,$14 \n\t" \
- " sltu $14,%1,$14 \n\t" \
- " addu %2,%2,$15 \n\t" \
- " addu %2,%2,$14 \n\t" \
- \
- " addu %0,%0,$12 \n\t" \
- " sltu $14,%0,$12 \n\t" \
- " addu %1,%1,$13 \n\t" \
- " sltu $15,%1,$13 \n\t" \
- " addu %1,%1,$14 \n\t" \
- " sltu $14,%1,$14 \n\t" \
- " addu %2,%2,$15 \n\t" \
- " addu %2,%2,$14 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"$12", "$13", "$14", "$15");
- #define SQRADDSC(i, j) \
- __asm__( \
- " multu %6,%7 \n\t" \
- " mflo %0 \n\t" \
- " mfhi %1 \n\t" \
- " xor %2,%2,%2 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc");
- #define SQRADDAC(i, j) \
- __asm__( \
- " multu %6,%7 \n\t" \
- " mflo $12 \n\t" \
- " mfhi $13 \n\t" \
- " addu %0,%0,$12 \n\t" \
- " sltu $12,%0,$12 \n\t" \
- " addu %1,%1,$13 \n\t" \
- " sltu $13,%1,$13 \n\t" \
- " addu %1,%1,$12 \n\t" \
- " sltu $12,%1,$12 \n\t" \
- " addu %2,%2,$13 \n\t" \
- " addu %2,%2,$12 \n\t" \
- :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"$12", "$13", "$14");
- #define SQRADDDB \
- __asm__( \
- " addu %0,%0,%3 \n\t" \
- " sltu $10,%0,%3 \n\t" \
- " addu %1,%1,$10 \n\t" \
- " sltu $10,%1,$10 \n\t" \
- " addu %1,%1,%4 \n\t" \
- " sltu $11,%1,%4 \n\t" \
- " addu %2,%2,$10 \n\t" \
- " addu %2,%2,$11 \n\t" \
- " addu %2,%2,%5 \n\t" \
- \
- " addu %0,%0,%3 \n\t" \
- " sltu $10,%0,%3 \n\t" \
- " addu %1,%1,$10 \n\t" \
- " sltu $10,%1,$10 \n\t" \
- " addu %1,%1,%4 \n\t" \
- " sltu $11,%1,%4 \n\t" \
- " addu %2,%2,$10 \n\t" \
- " addu %2,%2,$11 \n\t" \
- " addu %2,%2,%5 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "$10", "$11");
- #else
- #define TFM_ISO
- /* ISO C portable code */
- #define COMBA_START
- #define CLEAR_CARRY \
- c0 = c1 = c2 = 0;
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define CARRY_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_FINI
- /* multiplies point i and j, updates carry "c1" and digit c2 */
- #define SQRADD(i, j) \
- do { fp_word t; \
- t = c0 + ((fp_word)i) * ((fp_word)j); c0 = (fp_digit)t; \
- t = c1 + (t >> DIGIT_BIT); c1 = (fp_digit)t; \
- c2 +=(fp_digit) (t >> DIGIT_BIT); \
- } while (0);
- /* for squaring some of the terms are doubled... */
- #define SQRADD2(i, j) \
- do { fp_word t; \
- t = ((fp_word)i) * ((fp_word)j); \
- tt = (fp_word)c0 + t; c0 = (fp_digit)tt; \
- tt = (fp_word)c1 + (tt >> DIGIT_BIT); c1 = (fp_digit)tt; \
- c2 +=(fp_digit)(tt >> DIGIT_BIT); \
- tt = (fp_word)c0 + t; c0 = (fp_digit)tt; \
- tt = (fp_word)c1 + (tt >> DIGIT_BIT); c1 = (fp_digit)tt; \
- c2 +=(fp_digit)(tt >> DIGIT_BIT); \
- } while (0);
- #define SQRADDSC(i, j) \
- do { fp_word t; \
- t = ((fp_word)i) * ((fp_word)j); \
- sc0 = (fp_digit)t; sc1 = (t >> DIGIT_BIT); sc2 = 0; \
- } while (0);
- #define SQRADDAC(i, j) \
- do { fp_word t; \
- t = sc0 + ((fp_word)i) * ((fp_word)j); sc0 = (fp_digit)t; \
- t = sc1 + (t >> DIGIT_BIT); sc1 = (fp_digit)t; \
- sc2 += (fp_digit)(t >> DIGIT_BIT); \
- } while (0);
- #define SQRADDDB \
- do { fp_word t; \
- t = ((fp_word)sc0) + ((fp_word)sc0) + c0; c0 = (fp_digit)t; \
- t = ((fp_word)sc1) + ((fp_word)sc1) + c1 + (t >> DIGIT_BIT); \
- c1 = (fp_digit)t; \
- c2 = c2 + (fp_digit)(((fp_word)sc2) + ((fp_word)sc2) + (t >> DIGIT_BIT)); \
- } while (0);
- #endif
- #ifdef TFM_SMALL_SET
- #include "fp_sqr_comba_small_set.i"
- #endif
- #if defined(TFM_SQR3) && FP_SIZE >= 6
- #include "fp_sqr_comba_3.i"
- #endif
- #if defined(TFM_SQR4) && FP_SIZE >= 8
- #include "fp_sqr_comba_4.i"
- #endif
- #if defined(TFM_SQR6) && FP_SIZE >= 12
- #include "fp_sqr_comba_6.i"
- #endif
- #if defined(TFM_SQR7) && FP_SIZE >= 14
- #include "fp_sqr_comba_7.i"
- #endif
- #if defined(TFM_SQR8) && FP_SIZE >= 16
- #include "fp_sqr_comba_8.i"
- #endif
- #if defined(TFM_SQR9) && FP_SIZE >= 18
- #include "fp_sqr_comba_9.i"
- #endif
- #if defined(TFM_SQR12) && FP_SIZE >= 24
- #include "fp_sqr_comba_12.i"
- #endif
- #if defined(TFM_SQR17) && FP_SIZE >= 34
- #include "fp_sqr_comba_17.i"
- #endif
- #if defined(TFM_SQR20) && FP_SIZE >= 40
- #include "fp_sqr_comba_20.i"
- #endif
- #if defined(TFM_SQR24) && FP_SIZE >= 48
- #include "fp_sqr_comba_24.i"
- #endif
- #if defined(TFM_SQR28) && FP_SIZE >= 56
- #include "fp_sqr_comba_28.i"
- #endif
- #if defined(TFM_SQR32) && FP_SIZE >= 64
- #include "fp_sqr_comba_32.i"
- #endif
- #if defined(TFM_SQR48) && FP_SIZE >= 96
- #include "fp_sqr_comba_48.i"
- #endif
- #if defined(TFM_SQR64) && FP_SIZE >= 128
- #include "fp_sqr_comba_64.i"
- #endif
- /* end fp_sqr_comba.c asm */
- /* start fp_mul_comba.c asm */
- /* these are the combas. Worship them. */
- #if defined(TFM_X86)
- /* Generic x86 optimized code */
- /* anything you need at the start */
- #define COMBA_START
- /* clear the chaining variables */
- #define COMBA_CLEAR \
- c0 = c1 = c2 = 0;
- /* forward the carry to the next digit */
- #define COMBA_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- /* store the first sum */
- #define COMBA_STORE(x) \
- x = c0;
- /* store the second sum [carry] */
- #define COMBA_STORE2(x) \
- x = c1;
- /* anything you need at the end */
- #define COMBA_FINI
- /* this should multiply i and j */
- #define MULADD(i, j) \
- __asm__( \
- "movl %6,%%eax \n\t" \
- "mull %7 \n\t" \
- "addl %%eax,%0 \n\t" \
- "adcl %%edx,%1 \n\t" \
- "adcl $0,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","cc");
- #elif defined(TFM_X86_64)
- /* x86-64 optimized */
- /* anything you need at the start */
- #define COMBA_START
- /* clear the chaining variables */
- #define COMBA_CLEAR \
- c0 = c1 = c2 = 0;
- /* forward the carry to the next digit */
- #define COMBA_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- /* store the first sum */
- #define COMBA_STORE(x) \
- x = c0;
- /* store the second sum [carry] */
- #define COMBA_STORE2(x) \
- x = c1;
- /* anything you need at the end */
- #define COMBA_FINI
- /* this should multiply i and j */
- #define MULADD(i, j) \
- __asm__ ( \
- "movq %6,%%rax \n\t" \
- "mulq %7 \n\t" \
- "addq %%rax,%0 \n\t" \
- "adcq %%rdx,%1 \n\t" \
- "adcq $0,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","cc");
- #if defined(HAVE_INTEL_MULX)
- #define MULADD_BODY(a,b,carry,c) \
- __asm__ volatile( \
- "movq %[a0],%%rdx\n\t" \
- "xorq %%rcx, %%rcx\n\t" \
- "movq 0(%[cp]),%%r8\n\t" \
- "movq 8(%[cp]),%%r9\n\t" \
- "movq 16(%[cp]),%%r10\n\t" \
- "movq 24(%[cp]),%%r11\n\t" \
- \
- "mulx (%[bp]),%%rax, %%rbx\n\t" \
- "adcxq %[ca], %%r8\n\t" \
- "adoxq %%rax, %%r8\n\t" \
- "mulx 8(%[bp]),%%rax, %%rcx\n\t" \
- "adcxq %%rbx, %%r9\n\t" \
- "adoxq %%rax, %%r9\n\t" \
- "mulx 16(%[bp]),%%rax, %%rbx\n\t" \
- "adcxq %%rcx, %%r10\n\t" \
- "adoxq %%rax, %%r10\n\t" \
- "mulx 24(%[bp]),%%rax, %%rcx\n\t" \
- "adcxq %%rbx, %%r11\n\t" \
- "mov $0, %[ca]\n\t" \
- "adoxq %%rax, %%r11\n\t" \
- "adcxq %%rcx, %[ca]\n\t" \
- "mov $0, %%rdx\n\t" \
- "adoxq %%rdx, %[ca]\n\t" \
- \
- "movq %%r8, 0(%[cp])\n\t" \
- "movq %%r9, 8(%[cp])\n\t" \
- "movq %%r10, 16(%[cp])\n\t" \
- "movq %%r11, 24(%[cp])\n\t" \
- : [ca] "+r" (carry) \
- : [a0] "r" (a->dp[ix]), [bp] "r" (&(b->dp[iy])), \
- [cp] "r" (&(c->dp[iz])) \
- : "%r8", "%r9", "%r10", "%r11", \
- "%rdx", "%rax", "%rcx", "%rbx" \
- )
- #define TFM_INTEL_MUL_COMBA(a, b, ca, c) \
- for (iz=0; iz<pa; iz++) c->dp[iz] = 0; \
- for (ix=0; ix<a->used; ix++) { \
- ca = 0; \
- for (iy=0; iy<b->used; iy+=4) { \
- iz = ix + iy; \
- MULADD_BODY(a, b, ca, c); \
- } \
- c->dp[ix + iy] = ca; \
- }
- #endif
- #elif defined(TFM_SSE2)
- /* use SSE2 optimizations */
- /* anything you need at the start */
- #define COMBA_START
- /* clear the chaining variables */
- #define COMBA_CLEAR \
- c0 = c1 = c2 = 0;
- /* forward the carry to the next digit */
- #define COMBA_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- /* store the first sum */
- #define COMBA_STORE(x) \
- x = c0;
- /* store the second sum [carry] */
- #define COMBA_STORE2(x) \
- x = c1;
- /* anything you need at the end */
- #define COMBA_FINI \
- __asm__("emms");
- /* this should multiply i and j */
- #define MULADD(i, j) \
- __asm__( \
- "movd %6,%%mm0 \n\t" \
- "movd %7,%%mm1 \n\t" \
- "pmuludq %%mm1,%%mm0\n\t" \
- "movd %%mm0,%%eax \n\t" \
- "psrlq $32,%%mm0 \n\t" \
- "addl %%eax,%0 \n\t" \
- "movd %%mm0,%%eax \n\t" \
- "adcl %%eax,%1 \n\t" \
- "adcl $0,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","cc");
- #elif defined(TFM_ARM)
- /* ARM code */
- #define COMBA_START
- #define COMBA_CLEAR \
- c0 = c1 = c2 = 0;
- #define COMBA_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define COMBA_FINI
- #define MULADD(i, j) \
- __asm__( \
- " UMULL r0,r1,%6,%7 \n\t" \
- " ADDS %0,%0,r0 \n\t" \
- " ADCS %1,%1,r1 \n\t" \
- " ADC %2,%2,#0 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "cc");
- #elif defined(TFM_PPC32)
- /* For 32-bit PPC */
- #define COMBA_START
- #define COMBA_CLEAR \
- c0 = c1 = c2 = 0;
- #define COMBA_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define COMBA_FINI
- /* untested: will mulhwu change the flags? Docs say no */
- #define MULADD(i, j) \
- __asm__( \
- " mullw 16,%6,%7 \n\t" \
- " addc %0,%0,16 \n\t" \
- " mulhwu 16,%6,%7 \n\t" \
- " adde %1,%1,16 \n\t" \
- " addze %2,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16");
- #elif defined(TFM_PPC64)
- /* For 64-bit PPC */
- #define COMBA_START
- #define COMBA_CLEAR \
- c0 = c1 = c2 = 0;
- #define COMBA_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define COMBA_FINI
- /* untested: will mulhdu change the flags? Docs say no */
- #define MULADD(i, j) \
- ____asm__( \
- " mulld r16,%6,%7 \n\t" \
- " addc %0,%0,16 \n\t" \
- " mulhdu r16,%6,%7 \n\t" \
- " adde %1,%1,16 \n\t" \
- " addze %2,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r16");
- #elif defined(TFM_AVR32)
- /* ISO C code */
- #define COMBA_START
- #define COMBA_CLEAR \
- c0 = c1 = c2 = 0;
- #define COMBA_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define COMBA_FINI
- #define MULADD(i, j) \
- ____asm__( \
- " mulu.d r2,%6,%7 \n\t"\
- " add %0,r2 \n\t"\
- " adc %1,%1,r3 \n\t"\
- " acr %2 \n\t"\
- :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r2","r3");
- #elif defined(TFM_MIPS)
- /* MIPS */
- #define COMBA_START
- #define COMBA_CLEAR \
- c0 = c1 = c2 = 0;
- #define COMBA_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define COMBA_FINI
- #define MULADD(i, j) \
- __asm__( \
- " multu %6,%7 \n\t" \
- " mflo $12 \n\t" \
- " mfhi $13 \n\t" \
- " addu %0,%0,$12 \n\t" \
- " sltu $12,%0,$12 \n\t" \
- " addu %1,%1,$13 \n\t" \
- " sltu $13,%1,$13 \n\t" \
- " addu %1,%1,$12 \n\t" \
- " sltu $12,%1,$12 \n\t" \
- " addu %2,%2,$13 \n\t" \
- " addu %2,%2,$12 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"$12","$13");
- #else
- /* ISO C code */
- #define COMBA_START
- #define COMBA_CLEAR \
- c0 = c1 = c2 = 0;
- #define COMBA_FORWARD \
- do { c0 = c1; c1 = c2; c2 = 0; } while (0);
- #define COMBA_STORE(x) \
- x = c0;
- #define COMBA_STORE2(x) \
- x = c1;
- #define COMBA_FINI
- #define MULADD(i, j) \
- do { fp_word t; \
- t = (fp_word)c0 + ((fp_word)i) * ((fp_word)j); \
- c0 = (fp_digit)t; \
- t = (fp_word)c1 + (t >> DIGIT_BIT); \
- c1 = (fp_digit)t; \
- c2 += (fp_digit)(t >> DIGIT_BIT); \
- } while (0);
- #endif
- #ifdef TFM_SMALL_SET
- #include "fp_mul_comba_small_set.i"
- #endif
- #if defined(TFM_MUL3) && FP_SIZE >= 6
- #include "fp_mul_comba_3.i"
- #endif
- #if defined(TFM_MUL4) && FP_SIZE >= 8
- #include "fp_mul_comba_4.i"
- #endif
- #if defined(TFM_MUL6) && FP_SIZE >= 12
- #include "fp_mul_comba_6.i"
- #endif
- #if defined(TFM_MUL7) && FP_SIZE >= 14
- #include "fp_mul_comba_7.i"
- #endif
- #if defined(TFM_MUL8) && FP_SIZE >= 16
- #include "fp_mul_comba_8.i"
- #endif
- #if defined(TFM_MUL9) && FP_SIZE >= 18
- #include "fp_mul_comba_9.i"
- #endif
- #if defined(TFM_MUL12) && FP_SIZE >= 24
- #include "fp_mul_comba_12.i"
- #endif
- #if defined(TFM_MUL17) && FP_SIZE >= 34
- #include "fp_mul_comba_17.i"
- #endif
- #if defined(TFM_MUL20) && FP_SIZE >= 40
- #include "fp_mul_comba_20.i"
- #endif
- #if defined(TFM_MUL24) && FP_SIZE >= 48
- #include "fp_mul_comba_24.i"
- #endif
- #if defined(TFM_MUL28) && FP_SIZE >= 56
- #include "fp_mul_comba_28.i"
- #endif
- #if defined(TFM_MUL32) && FP_SIZE >= 64
- #include "fp_mul_comba_32.i"
- #endif
- #if defined(TFM_MUL48) && FP_SIZE >= 96
- #include "fp_mul_comba_48.i"
- #endif
- #if defined(TFM_MUL64) && FP_SIZE >= 128
- #include "fp_mul_comba_64.i"
- #endif
- /* end fp_mul_comba.c asm */
|