12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685 |
- #!/usr/bin/env perl
- # ====================================================================
- # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
- # project. The module is, however, dual licensed under OpenSSL and
- # CRYPTOGAMS licenses depending on where you obtain it. For further
- # details see http://www.openssl.org/~appro/cryptogams/.
- # ====================================================================
- # October 2005.
- #
- # Montgomery multiplication routine for x86_64. While it gives modest
- # 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
- # than twice, >2x, as fast. Most common rsa1024 sign is improved by
- # respectful 50%. It remains to be seen if loop unrolling and
- # dedicated squaring routine can provide further improvement...
- # July 2011.
- #
- # Add dedicated squaring procedure. Performance improvement varies
- # from platform to platform, but in average it's ~5%/15%/25%/33%
- # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
- # August 2011.
- #
- # Unroll and modulo-schedule inner loops in such manner that they
- # are "fallen through" for input lengths of 8, which is critical for
- # 1024-bit RSA *sign*. Average performance improvement in comparison
- # to *initial* version of this module from 2005 is ~0%/30%/40%/45%
- # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
- $flavour = shift;
- $output = shift;
- if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
- $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
- $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
- ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
- ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
- die "can't locate x86_64-xlate.pl";
- open STDOUT,"| $^X $xlate $flavour $output";
- # int bn_mul_mont(
- $rp="%rdi"; # BN_ULONG *rp,
- $ap="%rsi"; # const BN_ULONG *ap,
- $bp="%rdx"; # const BN_ULONG *bp,
- $np="%rcx"; # const BN_ULONG *np,
- $n0="%r8"; # const BN_ULONG *n0,
- $num="%r9"; # int num);
- $lo0="%r10";
- $hi0="%r11";
- $hi1="%r13";
- $i="%r14";
- $j="%r15";
- $m0="%rbx";
- $m1="%rbp";
- $code=<<___;
- .text
- .globl bn_mul_mont
- .type bn_mul_mont,\@function,6
- .align 16
- bn_mul_mont:
- test \$3,${num}d
- jnz .Lmul_enter
- cmp \$8,${num}d
- jb .Lmul_enter
- cmp $ap,$bp
- jne .Lmul4x_enter
- jmp .Lsqr4x_enter
- .align 16
- .Lmul_enter:
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- mov ${num}d,${num}d
- lea 2($num),%r10
- mov %rsp,%r11
- neg %r10
- lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2))
- and \$-1024,%rsp # minimize TLB usage
- mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
- .Lmul_body:
- mov $bp,%r12 # reassign $bp
- ___
- $bp="%r12";
- $code.=<<___;
- mov ($n0),$n0 # pull n0[0] value
- mov ($bp),$m0 # m0=bp[0]
- mov ($ap),%rax
- xor $i,$i # i=0
- xor $j,$j # j=0
- mov $n0,$m1
- mulq $m0 # ap[0]*bp[0]
- mov %rax,$lo0
- mov ($np),%rax
- imulq $lo0,$m1 # "tp[0]"*n0
- mov %rdx,$hi0
- mulq $m1 # np[0]*m1
- add %rax,$lo0 # discarded
- mov 8($ap),%rax
- adc \$0,%rdx
- mov %rdx,$hi1
- lea 1($j),$j # j++
- jmp .L1st_enter
- .align 16
- .L1st:
- add %rax,$hi1
- mov ($ap,$j,8),%rax
- adc \$0,%rdx
- add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
- mov $lo0,$hi0
- adc \$0,%rdx
- mov $hi1,-16(%rsp,$j,8) # tp[j-1]
- mov %rdx,$hi1
- .L1st_enter:
- mulq $m0 # ap[j]*bp[0]
- add %rax,$hi0
- mov ($np,$j,8),%rax
- adc \$0,%rdx
- lea 1($j),$j # j++
- mov %rdx,$lo0
- mulq $m1 # np[j]*m1
- cmp $num,$j
- jne .L1st
- add %rax,$hi1
- mov ($ap),%rax # ap[0]
- adc \$0,%rdx
- add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
- adc \$0,%rdx
- mov $hi1,-16(%rsp,$j,8) # tp[j-1]
- mov %rdx,$hi1
- mov $lo0,$hi0
- xor %rdx,%rdx
- add $hi0,$hi1
- adc \$0,%rdx
- mov $hi1,-8(%rsp,$num,8)
- mov %rdx,(%rsp,$num,8) # store upmost overflow bit
- lea 1($i),$i # i++
- jmp .Louter
- .align 16
- .Louter:
- mov ($bp,$i,8),$m0 # m0=bp[i]
- xor $j,$j # j=0
- mov $n0,$m1
- mov (%rsp),$lo0
- mulq $m0 # ap[0]*bp[i]
- add %rax,$lo0 # ap[0]*bp[i]+tp[0]
- mov ($np),%rax
- adc \$0,%rdx
- imulq $lo0,$m1 # tp[0]*n0
- mov %rdx,$hi0
- mulq $m1 # np[0]*m1
- add %rax,$lo0 # discarded
- mov 8($ap),%rax
- adc \$0,%rdx
- mov 8(%rsp),$lo0 # tp[1]
- mov %rdx,$hi1
- lea 1($j),$j # j++
- jmp .Linner_enter
- .align 16
- .Linner:
- add %rax,$hi1
- mov ($ap,$j,8),%rax
- adc \$0,%rdx
- add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
- mov (%rsp,$j,8),$lo0
- adc \$0,%rdx
- mov $hi1,-16(%rsp,$j,8) # tp[j-1]
- mov %rdx,$hi1
- .Linner_enter:
- mulq $m0 # ap[j]*bp[i]
- add %rax,$hi0
- mov ($np,$j,8),%rax
- adc \$0,%rdx
- add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
- mov %rdx,$hi0
- adc \$0,$hi0
- lea 1($j),$j # j++
- mulq $m1 # np[j]*m1
- cmp $num,$j
- jne .Linner
- add %rax,$hi1
- mov ($ap),%rax # ap[0]
- adc \$0,%rdx
- add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
- mov (%rsp,$j,8),$lo0
- adc \$0,%rdx
- mov $hi1,-16(%rsp,$j,8) # tp[j-1]
- mov %rdx,$hi1
- xor %rdx,%rdx
- add $hi0,$hi1
- adc \$0,%rdx
- add $lo0,$hi1 # pull upmost overflow bit
- adc \$0,%rdx
- mov $hi1,-8(%rsp,$num,8)
- mov %rdx,(%rsp,$num,8) # store upmost overflow bit
- lea 1($i),$i # i++
- cmp $num,$i
- jl .Louter
- xor $i,$i # i=0 and clear CF!
- mov (%rsp),%rax # tp[0]
- lea (%rsp),$ap # borrow ap for tp
- mov $num,$j # j=num
- jmp .Lsub
- .align 16
- .Lsub: sbb ($np,$i,8),%rax
- mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
- mov 8($ap,$i,8),%rax # tp[i+1]
- lea 1($i),$i # i++
- dec $j # doesnn't affect CF!
- jnz .Lsub
- sbb \$0,%rax # handle upmost overflow bit
- xor $i,$i
- and %rax,$ap
- not %rax
- mov $rp,$np
- and %rax,$np
- mov $num,$j # j=num
- or $np,$ap # ap=borrow?tp:rp
- .align 16
- .Lcopy: # copy or in-place refresh
- mov ($ap,$i,8),%rax
- mov $i,(%rsp,$i,8) # zap temporary vector
- mov %rax,($rp,$i,8) # rp[i]=tp[i]
- lea 1($i),$i
- sub \$1,$j
- jnz .Lcopy
- mov 8(%rsp,$num,8),%rsi # restore %rsp
- mov \$1,%rax
- mov (%rsi),%r15
- mov 8(%rsi),%r14
- mov 16(%rsi),%r13
- mov 24(%rsi),%r12
- mov 32(%rsi),%rbp
- mov 40(%rsi),%rbx
- lea 48(%rsi),%rsp
- .Lmul_epilogue:
- ret
- .size bn_mul_mont,.-bn_mul_mont
- ___
- {{{
- my @A=("%r10","%r11");
- my @N=("%r13","%rdi");
- $code.=<<___;
- .type bn_mul4x_mont,\@function,6
- .align 16
- bn_mul4x_mont:
- .Lmul4x_enter:
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- mov ${num}d,${num}d
- lea 4($num),%r10
- mov %rsp,%r11
- neg %r10
- lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
- and \$-1024,%rsp # minimize TLB usage
- mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
- .Lmul4x_body:
- mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
- mov %rdx,%r12 # reassign $bp
- ___
- $bp="%r12";
- $code.=<<___;
- mov ($n0),$n0 # pull n0[0] value
- mov ($bp),$m0 # m0=bp[0]
- mov ($ap),%rax
- xor $i,$i # i=0
- xor $j,$j # j=0
- mov $n0,$m1
- mulq $m0 # ap[0]*bp[0]
- mov %rax,$A[0]
- mov ($np),%rax
- imulq $A[0],$m1 # "tp[0]"*n0
- mov %rdx,$A[1]
- mulq $m1 # np[0]*m1
- add %rax,$A[0] # discarded
- mov 8($ap),%rax
- adc \$0,%rdx
- mov %rdx,$N[1]
- mulq $m0
- add %rax,$A[1]
- mov 8($np),%rax
- adc \$0,%rdx
- mov %rdx,$A[0]
- mulq $m1
- add %rax,$N[1]
- mov 16($ap),%rax
- adc \$0,%rdx
- add $A[1],$N[1]
- lea 4($j),$j # j++
- adc \$0,%rdx
- mov $N[1],(%rsp)
- mov %rdx,$N[0]
- jmp .L1st4x
- .align 16
- .L1st4x:
- mulq $m0 # ap[j]*bp[0]
- add %rax,$A[0]
- mov -16($np,$j,8),%rax
- adc \$0,%rdx
- mov %rdx,$A[1]
- mulq $m1 # np[j]*m1
- add %rax,$N[0]
- mov -8($ap,$j,8),%rax
- adc \$0,%rdx
- add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
- adc \$0,%rdx
- mov $N[0],-24(%rsp,$j,8) # tp[j-1]
- mov %rdx,$N[1]
- mulq $m0 # ap[j]*bp[0]
- add %rax,$A[1]
- mov -8($np,$j,8),%rax
- adc \$0,%rdx
- mov %rdx,$A[0]
- mulq $m1 # np[j]*m1
- add %rax,$N[1]
- mov ($ap,$j,8),%rax
- adc \$0,%rdx
- add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
- adc \$0,%rdx
- mov $N[1],-16(%rsp,$j,8) # tp[j-1]
- mov %rdx,$N[0]
- mulq $m0 # ap[j]*bp[0]
- add %rax,$A[0]
- mov ($np,$j,8),%rax
- adc \$0,%rdx
- mov %rdx,$A[1]
- mulq $m1 # np[j]*m1
- add %rax,$N[0]
- mov 8($ap,$j,8),%rax
- adc \$0,%rdx
- add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
- adc \$0,%rdx
- mov $N[0],-8(%rsp,$j,8) # tp[j-1]
- mov %rdx,$N[1]
- mulq $m0 # ap[j]*bp[0]
- add %rax,$A[1]
- mov 8($np,$j,8),%rax
- adc \$0,%rdx
- lea 4($j),$j # j++
- mov %rdx,$A[0]
- mulq $m1 # np[j]*m1
- add %rax,$N[1]
- mov -16($ap,$j,8),%rax
- adc \$0,%rdx
- add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
- adc \$0,%rdx
- mov $N[1],-32(%rsp,$j,8) # tp[j-1]
- mov %rdx,$N[0]
- cmp $num,$j
- jl .L1st4x
- mulq $m0 # ap[j]*bp[0]
- add %rax,$A[0]
- mov -16($np,$j,8),%rax
- adc \$0,%rdx
- mov %rdx,$A[1]
- mulq $m1 # np[j]*m1
- add %rax,$N[0]
- mov -8($ap,$j,8),%rax
- adc \$0,%rdx
- add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
- adc \$0,%rdx
- mov $N[0],-24(%rsp,$j,8) # tp[j-1]
- mov %rdx,$N[1]
- mulq $m0 # ap[j]*bp[0]
- add %rax,$A[1]
- mov -8($np,$j,8),%rax
- adc \$0,%rdx
- mov %rdx,$A[0]
- mulq $m1 # np[j]*m1
- add %rax,$N[1]
- mov ($ap),%rax # ap[0]
- adc \$0,%rdx
- add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
- adc \$0,%rdx
- mov $N[1],-16(%rsp,$j,8) # tp[j-1]
- mov %rdx,$N[0]
- xor $N[1],$N[1]
- add $A[0],$N[0]
- adc \$0,$N[1]
- mov $N[0],-8(%rsp,$j,8)
- mov $N[1],(%rsp,$j,8) # store upmost overflow bit
- lea 1($i),$i # i++
- .align 4
- .Louter4x:
- mov ($bp,$i,8),$m0 # m0=bp[i]
- xor $j,$j # j=0
- mov (%rsp),$A[0]
- mov $n0,$m1
- mulq $m0 # ap[0]*bp[i]
- add %rax,$A[0] # ap[0]*bp[i]+tp[0]
- mov ($np),%rax
- adc \$0,%rdx
- imulq $A[0],$m1 # tp[0]*n0
- mov %rdx,$A[1]
- mulq $m1 # np[0]*m1
- add %rax,$A[0] # "$N[0]", discarded
- mov 8($ap),%rax
- adc \$0,%rdx
- mov %rdx,$N[1]
- mulq $m0 # ap[j]*bp[i]
- add %rax,$A[1]
- mov 8($np),%rax
- adc \$0,%rdx
- add 8(%rsp),$A[1] # +tp[1]
- adc \$0,%rdx
- mov %rdx,$A[0]
- mulq $m1 # np[j]*m1
- add %rax,$N[1]
- mov 16($ap),%rax
- adc \$0,%rdx
- add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
- lea 4($j),$j # j+=2
- adc \$0,%rdx
- mov $N[1],(%rsp) # tp[j-1]
- mov %rdx,$N[0]
- jmp .Linner4x
- .align 16
- .Linner4x:
- mulq $m0 # ap[j]*bp[i]
- add %rax,$A[0]
- mov -16($np,$j,8),%rax
- adc \$0,%rdx
- add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
- adc \$0,%rdx
- mov %rdx,$A[1]
- mulq $m1 # np[j]*m1
- add %rax,$N[0]
- mov -8($ap,$j,8),%rax
- adc \$0,%rdx
- add $A[0],$N[0]
- adc \$0,%rdx
- mov $N[0],-24(%rsp,$j,8) # tp[j-1]
- mov %rdx,$N[1]
- mulq $m0 # ap[j]*bp[i]
- add %rax,$A[1]
- mov -8($np,$j,8),%rax
- adc \$0,%rdx
- add -8(%rsp,$j,8),$A[1]
- adc \$0,%rdx
- mov %rdx,$A[0]
- mulq $m1 # np[j]*m1
- add %rax,$N[1]
- mov ($ap,$j,8),%rax
- adc \$0,%rdx
- add $A[1],$N[1]
- adc \$0,%rdx
- mov $N[1],-16(%rsp,$j,8) # tp[j-1]
- mov %rdx,$N[0]
- mulq $m0 # ap[j]*bp[i]
- add %rax,$A[0]
- mov ($np,$j,8),%rax
- adc \$0,%rdx
- add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
- adc \$0,%rdx
- mov %rdx,$A[1]
- mulq $m1 # np[j]*m1
- add %rax,$N[0]
- mov 8($ap,$j,8),%rax
- adc \$0,%rdx
- add $A[0],$N[0]
- adc \$0,%rdx
- mov $N[0],-8(%rsp,$j,8) # tp[j-1]
- mov %rdx,$N[1]
- mulq $m0 # ap[j]*bp[i]
- add %rax,$A[1]
- mov 8($np,$j,8),%rax
- adc \$0,%rdx
- add 8(%rsp,$j,8),$A[1]
- adc \$0,%rdx
- lea 4($j),$j # j++
- mov %rdx,$A[0]
- mulq $m1 # np[j]*m1
- add %rax,$N[1]
- mov -16($ap,$j,8),%rax
- adc \$0,%rdx
- add $A[1],$N[1]
- adc \$0,%rdx
- mov $N[1],-32(%rsp,$j,8) # tp[j-1]
- mov %rdx,$N[0]
- cmp $num,$j
- jl .Linner4x
- mulq $m0 # ap[j]*bp[i]
- add %rax,$A[0]
- mov -16($np,$j,8),%rax
- adc \$0,%rdx
- add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
- adc \$0,%rdx
- mov %rdx,$A[1]
- mulq $m1 # np[j]*m1
- add %rax,$N[0]
- mov -8($ap,$j,8),%rax
- adc \$0,%rdx
- add $A[0],$N[0]
- adc \$0,%rdx
- mov $N[0],-24(%rsp,$j,8) # tp[j-1]
- mov %rdx,$N[1]
- mulq $m0 # ap[j]*bp[i]
- add %rax,$A[1]
- mov -8($np,$j,8),%rax
- adc \$0,%rdx
- add -8(%rsp,$j,8),$A[1]
- adc \$0,%rdx
- lea 1($i),$i # i++
- mov %rdx,$A[0]
- mulq $m1 # np[j]*m1
- add %rax,$N[1]
- mov ($ap),%rax # ap[0]
- adc \$0,%rdx
- add $A[1],$N[1]
- adc \$0,%rdx
- mov $N[1],-16(%rsp,$j,8) # tp[j-1]
- mov %rdx,$N[0]
- xor $N[1],$N[1]
- add $A[0],$N[0]
- adc \$0,$N[1]
- add (%rsp,$num,8),$N[0] # pull upmost overflow bit
- adc \$0,$N[1]
- mov $N[0],-8(%rsp,$j,8)
- mov $N[1],(%rsp,$j,8) # store upmost overflow bit
- cmp $num,$i
- jl .Louter4x
- ___
- {
- my @ri=("%rax","%rdx",$m0,$m1);
- $code.=<<___;
- mov 16(%rsp,$num,8),$rp # restore $rp
- mov 0(%rsp),@ri[0] # tp[0]
- pxor %xmm0,%xmm0
- mov 8(%rsp),@ri[1] # tp[1]
- shr \$2,$num # num/=4
- lea (%rsp),$ap # borrow ap for tp
- xor $i,$i # i=0 and clear CF!
- sub 0($np),@ri[0]
- mov 16($ap),@ri[2] # tp[2]
- mov 24($ap),@ri[3] # tp[3]
- sbb 8($np),@ri[1]
- lea -1($num),$j # j=num/4-1
- jmp .Lsub4x
- .align 16
- .Lsub4x:
- mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
- mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
- sbb 16($np,$i,8),@ri[2]
- mov 32($ap,$i,8),@ri[0] # tp[i+1]
- mov 40($ap,$i,8),@ri[1]
- sbb 24($np,$i,8),@ri[3]
- mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
- mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
- sbb 32($np,$i,8),@ri[0]
- mov 48($ap,$i,8),@ri[2]
- mov 56($ap,$i,8),@ri[3]
- sbb 40($np,$i,8),@ri[1]
- lea 4($i),$i # i++
- dec $j # doesnn't affect CF!
- jnz .Lsub4x
- mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
- mov 32($ap,$i,8),@ri[0] # load overflow bit
- sbb 16($np,$i,8),@ri[2]
- mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
- sbb 24($np,$i,8),@ri[3]
- mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
- sbb \$0,@ri[0] # handle upmost overflow bit
- mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
- xor $i,$i # i=0
- and @ri[0],$ap
- not @ri[0]
- mov $rp,$np
- and @ri[0],$np
- lea -1($num),$j
- or $np,$ap # ap=borrow?tp:rp
- movdqu ($ap),%xmm1
- movdqa %xmm0,(%rsp)
- movdqu %xmm1,($rp)
- jmp .Lcopy4x
- .align 16
- .Lcopy4x: # copy or in-place refresh
- movdqu 16($ap,$i),%xmm2
- movdqu 32($ap,$i),%xmm1
- movdqa %xmm0,16(%rsp,$i)
- movdqu %xmm2,16($rp,$i)
- movdqa %xmm0,32(%rsp,$i)
- movdqu %xmm1,32($rp,$i)
- lea 32($i),$i
- dec $j
- jnz .Lcopy4x
- shl \$2,$num
- movdqu 16($ap,$i),%xmm2
- movdqa %xmm0,16(%rsp,$i)
- movdqu %xmm2,16($rp,$i)
- ___
- }
- $code.=<<___;
- mov 8(%rsp,$num,8),%rsi # restore %rsp
- mov \$1,%rax
- mov (%rsi),%r15
- mov 8(%rsi),%r14
- mov 16(%rsi),%r13
- mov 24(%rsi),%r12
- mov 32(%rsi),%rbp
- mov 40(%rsi),%rbx
- lea 48(%rsi),%rsp
- .Lmul4x_epilogue:
- ret
- .size bn_mul4x_mont,.-bn_mul4x_mont
- ___
- }}}
- {{{
- ######################################################################
- # void bn_sqr4x_mont(
- my $rptr="%rdi"; # const BN_ULONG *rptr,
- my $aptr="%rsi"; # const BN_ULONG *aptr,
- my $bptr="%rdx"; # not used
- my $nptr="%rcx"; # const BN_ULONG *nptr,
- my $n0 ="%r8"; # const BN_ULONG *n0);
- my $num ="%r9"; # int num, has to be divisible by 4 and
- # not less than 8
- my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
- my @A0=("%r10","%r11");
- my @A1=("%r12","%r13");
- my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
- $code.=<<___;
- .type bn_sqr4x_mont,\@function,6
- .align 16
- bn_sqr4x_mont:
- .Lsqr4x_enter:
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- shl \$3,${num}d # convert $num to bytes
- xor %r10,%r10
- mov %rsp,%r11 # put aside %rsp
- sub $num,%r10 # -$num
- mov ($n0),$n0 # *n0
- lea -72(%rsp,%r10,2),%rsp # alloca(frame+2*$num)
- and \$-1024,%rsp # minimize TLB usage
- ##############################################################
- # Stack layout
- #
- # +0 saved $num, used in reduction section
- # +8 &t[2*$num], used in reduction section
- # +32 saved $rptr
- # +40 saved $nptr
- # +48 saved *n0
- # +56 saved %rsp
- # +64 t[2*$num]
- #
- mov $rptr,32(%rsp) # save $rptr
- mov $nptr,40(%rsp)
- mov $n0, 48(%rsp)
- mov %r11, 56(%rsp) # save original %rsp
- .Lsqr4x_body:
- ##############################################################
- # Squaring part:
- #
- # a) multiply-n-add everything but a[i]*a[i];
- # b) shift result of a) by 1 to the left and accumulate
- # a[i]*a[i] products;
- #
- lea 32(%r10),$i # $i=-($num-32)
- lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
- mov $num,$j # $j=$num
- # comments apply to $num==8 case
- mov -32($aptr,$i),$a0 # a[0]
- lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
- mov -24($aptr,$i),%rax # a[1]
- lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
- mov -16($aptr,$i),$ai # a[2]
- mov %rax,$a1
- mul $a0 # a[1]*a[0]
- mov %rax,$A0[0] # a[1]*a[0]
- mov $ai,%rax # a[2]
- mov %rdx,$A0[1]
- mov $A0[0],-24($tptr,$i) # t[1]
- xor $A0[0],$A0[0]
- mul $a0 # a[2]*a[0]
- add %rax,$A0[1]
- mov $ai,%rax
- adc %rdx,$A0[0]
- mov $A0[1],-16($tptr,$i) # t[2]
- lea -16($i),$j # j=-16
- mov 8($aptr,$j),$ai # a[3]
- mul $a1 # a[2]*a[1]
- mov %rax,$A1[0] # a[2]*a[1]+t[3]
- mov $ai,%rax
- mov %rdx,$A1[1]
- xor $A0[1],$A0[1]
- add $A1[0],$A0[0]
- lea 16($j),$j
- adc \$0,$A0[1]
- mul $a0 # a[3]*a[0]
- add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
- mov $ai,%rax
- adc %rdx,$A0[1]
- mov $A0[0],-8($tptr,$j) # t[3]
- jmp .Lsqr4x_1st
- .align 16
- .Lsqr4x_1st:
- mov ($aptr,$j),$ai # a[4]
- xor $A1[0],$A1[0]
- mul $a1 # a[3]*a[1]
- add %rax,$A1[1] # a[3]*a[1]+t[4]
- mov $ai,%rax
- adc %rdx,$A1[0]
- xor $A0[0],$A0[0]
- add $A1[1],$A0[1]
- adc \$0,$A0[0]
- mul $a0 # a[4]*a[0]
- add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
- mov $ai,%rax # a[3]
- adc %rdx,$A0[0]
- mov $A0[1],($tptr,$j) # t[4]
- mov 8($aptr,$j),$ai # a[5]
- xor $A1[1],$A1[1]
- mul $a1 # a[4]*a[3]
- add %rax,$A1[0] # a[4]*a[3]+t[5]
- mov $ai,%rax
- adc %rdx,$A1[1]
- xor $A0[1],$A0[1]
- add $A1[0],$A0[0]
- lea 16($j),$j
- adc \$0,$A0[1]
- mul $a0 # a[5]*a[2]
- add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
- mov $ai,%rax
- adc %rdx,$A0[1]
- mov $A0[0],-8($tptr,$j) # t[5]
- mov ($aptr,$j),$ai # a[6]
- xor $A1[0],$A1[0]
- mul $a1 # a[5]*a[3]
- add %rax,$A1[1] # a[5]*a[3]+t[6]
- mov $ai,%rax
- adc %rdx,$A1[0]
- xor $A0[0],$A0[0]
- add $A1[1],$A0[1]
- adc \$0,$A0[0]
- mul $a0 # a[6]*a[2]
- add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
- mov $ai,%rax # a[3]
- adc %rdx,$A0[0]
- mov $A0[1],($tptr,$j) # t[6]
- mov 8($aptr,$j),$ai # a[7]
- xor $A1[1],$A1[1]
- mul $a1 # a[6]*a[5]
- add %rax,$A1[0] # a[6]*a[5]+t[7]
- mov $ai,%rax
- adc %rdx,$A1[1]
- xor $A0[1],$A0[1]
- add $A1[0],$A0[0]
- lea 16($j),$j
- adc \$0,$A0[1]
- mul $a0 # a[7]*a[4]
- add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
- mov $ai,%rax
- adc %rdx,$A0[1]
- mov $A0[0],-8($tptr,$j) # t[7]
- cmp \$0,$j
- jne .Lsqr4x_1st
- xor $A1[0],$A1[0]
- add $A0[1],$A1[1]
- adc \$0,$A1[0]
- mul $a1 # a[7]*a[5]
- add %rax,$A1[1]
- adc %rdx,$A1[0]
- mov $A1[1],($tptr) # t[8]
- lea 16($i),$i
- mov $A1[0],8($tptr) # t[9]
- jmp .Lsqr4x_outer
- .align 16
- .Lsqr4x_outer: # comments apply to $num==6 case
- mov -32($aptr,$i),$a0 # a[0]
- lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
- mov -24($aptr,$i),%rax # a[1]
- lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
- mov -16($aptr,$i),$ai # a[2]
- mov %rax,$a1
- mov -24($tptr,$i),$A0[0] # t[1]
- xor $A0[1],$A0[1]
- mul $a0 # a[1]*a[0]
- add %rax,$A0[0] # a[1]*a[0]+t[1]
- mov $ai,%rax # a[2]
- adc %rdx,$A0[1]
- mov $A0[0],-24($tptr,$i) # t[1]
- xor $A0[0],$A0[0]
- add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
- adc \$0,$A0[0]
- mul $a0 # a[2]*a[0]
- add %rax,$A0[1]
- mov $ai,%rax
- adc %rdx,$A0[0]
- mov $A0[1],-16($tptr,$i) # t[2]
- lea -16($i),$j # j=-16
- xor $A1[0],$A1[0]
- mov 8($aptr,$j),$ai # a[3]
- xor $A1[1],$A1[1]
- add 8($tptr,$j),$A1[0]
- adc \$0,$A1[1]
- mul $a1 # a[2]*a[1]
- add %rax,$A1[0] # a[2]*a[1]+t[3]
- mov $ai,%rax
- adc %rdx,$A1[1]
- xor $A0[1],$A0[1]
- add $A1[0],$A0[0]
- adc \$0,$A0[1]
- mul $a0 # a[3]*a[0]
- add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
- mov $ai,%rax
- adc %rdx,$A0[1]
- mov $A0[0],8($tptr,$j) # t[3]
- lea 16($j),$j
- jmp .Lsqr4x_inner
- .align 16
- .Lsqr4x_inner:
- mov ($aptr,$j),$ai # a[4]
- xor $A1[0],$A1[0]
- add ($tptr,$j),$A1[1]
- adc \$0,$A1[0]
- mul $a1 # a[3]*a[1]
- add %rax,$A1[1] # a[3]*a[1]+t[4]
- mov $ai,%rax
- adc %rdx,$A1[0]
- xor $A0[0],$A0[0]
- add $A1[1],$A0[1]
- adc \$0,$A0[0]
- mul $a0 # a[4]*a[0]
- add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
- mov $ai,%rax # a[3]
- adc %rdx,$A0[0]
- mov $A0[1],($tptr,$j) # t[4]
- mov 8($aptr,$j),$ai # a[5]
- xor $A1[1],$A1[1]
- add 8($tptr,$j),$A1[0]
- adc \$0,$A1[1]
- mul $a1 # a[4]*a[3]
- add %rax,$A1[0] # a[4]*a[3]+t[5]
- mov $ai,%rax
- adc %rdx,$A1[1]
- xor $A0[1],$A0[1]
- add $A1[0],$A0[0]
- lea 16($j),$j # j++
- adc \$0,$A0[1]
- mul $a0 # a[5]*a[2]
- add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
- mov $ai,%rax
- adc %rdx,$A0[1]
- mov $A0[0],-8($tptr,$j) # t[5]
- cmp \$0,$j
- jne .Lsqr4x_inner
- xor $A1[0],$A1[0]
- add $A0[1],$A1[1]
- adc \$0,$A1[0]
- mul $a1 # a[5]*a[3]
- add %rax,$A1[1]
- adc %rdx,$A1[0]
- mov $A1[1],($tptr) # t[6]
- mov $A1[0],8($tptr) # t[7]
- add \$16,$i
- jnz .Lsqr4x_outer
- # comments apply to $num==4 case
- mov -32($aptr),$a0 # a[0]
- lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
- mov -24($aptr),%rax # a[1]
- lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
- mov -16($aptr),$ai # a[2]
- mov %rax,$a1
- mov -24($tptr),$A0[0] # t[1]
- xor $A0[1],$A0[1]
- mul $a0 # a[1]*a[0]
- add %rax,$A0[0] # a[1]*a[0]+t[1]
- mov $ai,%rax # a[2]
- adc %rdx,$A0[1]
- mov $A0[0],-24($tptr) # t[1]
- xor $A0[0],$A0[0]
- add -16($tptr),$A0[1] # a[2]*a[0]+t[2]
- adc \$0,$A0[0]
- mul $a0 # a[2]*a[0]
- add %rax,$A0[1]
- mov $ai,%rax
- adc %rdx,$A0[0]
- mov $A0[1],-16($tptr) # t[2]
- xor $A1[0],$A1[0]
- mov -8($aptr),$ai # a[3]
- xor $A1[1],$A1[1]
- add -8($tptr),$A1[0]
- adc \$0,$A1[1]
- mul $a1 # a[2]*a[1]
- add %rax,$A1[0] # a[2]*a[1]+t[3]
- mov $ai,%rax
- adc %rdx,$A1[1]
- xor $A0[1],$A0[1]
- add $A1[0],$A0[0]
- adc \$0,$A0[1]
- mul $a0 # a[3]*a[0]
- add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
- mov $ai,%rax
- adc %rdx,$A0[1]
- mov $A0[0],-8($tptr) # t[3]
- xor $A1[0],$A1[0]
- add $A0[1],$A1[1]
- adc \$0,$A1[0]
- mul $a1 # a[3]*a[1]
- add %rax,$A1[1]
- mov -16($aptr),%rax # a[2]
- adc %rdx,$A1[0]
- mov $A1[1],($tptr) # t[4]
- mov $A1[0],8($tptr) # t[5]
- mul $ai # a[2]*a[3]
- ___
- {
- my ($shift,$carry)=($a0,$a1);
- my @S=(@A1,$ai,$n0);
- $code.=<<___;
- add \$16,$i
- xor $shift,$shift
- sub $num,$i # $i=16-$num
- xor $carry,$carry
- add $A1[0],%rax # t[5]
- adc \$0,%rdx
- mov %rax,8($tptr) # t[5]
- mov %rdx,16($tptr) # t[6]
- mov $carry,24($tptr) # t[7]
- mov -16($aptr,$i),%rax # a[0]
- lea 64(%rsp,$num,2),$tptr
- xor $A0[0],$A0[0] # t[0]
- mov -24($tptr,$i,2),$A0[1] # t[1]
- lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
- shr \$63,$A0[0]
- lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
- shr \$63,$A0[1]
- or $A0[0],$S[1] # | t[2*i]>>63
- mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
- mov $A0[1],$shift # shift=t[2*i+1]>>63
- mul %rax # a[i]*a[i]
- neg $carry # mov $carry,cf
- mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
- adc %rax,$S[0]
- mov -8($aptr,$i),%rax # a[i+1] # prefetch
- mov $S[0],-32($tptr,$i,2)
- adc %rdx,$S[1]
- lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
- mov $S[1],-24($tptr,$i,2)
- sbb $carry,$carry # mov cf,$carry
- shr \$63,$A0[0]
- lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
- shr \$63,$A0[1]
- or $A0[0],$S[3] # | t[2*i]>>63
- mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
- mov $A0[1],$shift # shift=t[2*i+1]>>63
- mul %rax # a[i]*a[i]
- neg $carry # mov $carry,cf
- mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
- adc %rax,$S[2]
- mov 0($aptr,$i),%rax # a[i+1] # prefetch
- mov $S[2],-16($tptr,$i,2)
- adc %rdx,$S[3]
- lea 16($i),$i
- mov $S[3],-40($tptr,$i,2)
- sbb $carry,$carry # mov cf,$carry
- jmp .Lsqr4x_shift_n_add
- .align 16
- .Lsqr4x_shift_n_add:
- lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
- shr \$63,$A0[0]
- lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
- shr \$63,$A0[1]
- or $A0[0],$S[1] # | t[2*i]>>63
- mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
- mov $A0[1],$shift # shift=t[2*i+1]>>63
- mul %rax # a[i]*a[i]
- neg $carry # mov $carry,cf
- mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
- adc %rax,$S[0]
- mov -8($aptr,$i),%rax # a[i+1] # prefetch
- mov $S[0],-32($tptr,$i,2)
- adc %rdx,$S[1]
- lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
- mov $S[1],-24($tptr,$i,2)
- sbb $carry,$carry # mov cf,$carry
- shr \$63,$A0[0]
- lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
- shr \$63,$A0[1]
- or $A0[0],$S[3] # | t[2*i]>>63
- mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
- mov $A0[1],$shift # shift=t[2*i+1]>>63
- mul %rax # a[i]*a[i]
- neg $carry # mov $carry,cf
- mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
- adc %rax,$S[2]
- mov 0($aptr,$i),%rax # a[i+1] # prefetch
- mov $S[2],-16($tptr,$i,2)
- adc %rdx,$S[3]
- lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
- mov $S[3],-8($tptr,$i,2)
- sbb $carry,$carry # mov cf,$carry
- shr \$63,$A0[0]
- lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
- shr \$63,$A0[1]
- or $A0[0],$S[1] # | t[2*i]>>63
- mov 16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
- mov $A0[1],$shift # shift=t[2*i+1]>>63
- mul %rax # a[i]*a[i]
- neg $carry # mov $carry,cf
- mov 24($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
- adc %rax,$S[0]
- mov 8($aptr,$i),%rax # a[i+1] # prefetch
- mov $S[0],0($tptr,$i,2)
- adc %rdx,$S[1]
- lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
- mov $S[1],8($tptr,$i,2)
- sbb $carry,$carry # mov cf,$carry
- shr \$63,$A0[0]
- lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
- shr \$63,$A0[1]
- or $A0[0],$S[3] # | t[2*i]>>63
- mov 32($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
- mov $A0[1],$shift # shift=t[2*i+1]>>63
- mul %rax # a[i]*a[i]
- neg $carry # mov $carry,cf
- mov 40($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
- adc %rax,$S[2]
- mov 16($aptr,$i),%rax # a[i+1] # prefetch
- mov $S[2],16($tptr,$i,2)
- adc %rdx,$S[3]
- mov $S[3],24($tptr,$i,2)
- sbb $carry,$carry # mov cf,$carry
- add \$32,$i
- jnz .Lsqr4x_shift_n_add
- lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
- shr \$63,$A0[0]
- lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
- shr \$63,$A0[1]
- or $A0[0],$S[1] # | t[2*i]>>63
- mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
- mov $A0[1],$shift # shift=t[2*i+1]>>63
- mul %rax # a[i]*a[i]
- neg $carry # mov $carry,cf
- mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
- adc %rax,$S[0]
- mov -8($aptr),%rax # a[i+1] # prefetch
- mov $S[0],-32($tptr)
- adc %rdx,$S[1]
- lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
- mov $S[1],-24($tptr)
- sbb $carry,$carry # mov cf,$carry
- shr \$63,$A0[0]
- lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
- shr \$63,$A0[1]
- or $A0[0],$S[3] # | t[2*i]>>63
- mul %rax # a[i]*a[i]
- neg $carry # mov $carry,cf
- adc %rax,$S[2]
- adc %rdx,$S[3]
- mov $S[2],-16($tptr)
- mov $S[3],-8($tptr)
- ___
- }
- ##############################################################
- # Montgomery reduction part, "word-by-word" algorithm.
- #
- {
- my ($topbit,$nptr)=("%rbp",$aptr);
- my ($m0,$m1)=($a0,$a1);
- my @Ni=("%rbx","%r9");
- $code.=<<___;
- mov 40(%rsp),$nptr # restore $nptr
- mov 48(%rsp),$n0 # restore *n0
- xor $j,$j
- mov $num,0(%rsp) # save $num
- sub $num,$j # $j=-$num
- mov 64(%rsp),$A0[0] # t[0] # modsched #
- mov $n0,$m0 # # modsched #
- lea 64(%rsp,$num,2),%rax # end of t[] buffer
- lea 64(%rsp,$num),$tptr # end of t[] window
- mov %rax,8(%rsp) # save end of t[] buffer
- lea ($nptr,$num),$nptr # end of n[] buffer
- xor $topbit,$topbit # $topbit=0
- mov 0($nptr,$j),%rax # n[0] # modsched #
- mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
- imulq $A0[0],$m0 # m0=t[0]*n0 # modsched #
- mov %rax,$Ni[0] # # modsched #
- jmp .Lsqr4x_mont_outer
- .align 16
- .Lsqr4x_mont_outer:
- xor $A0[1],$A0[1]
- mul $m0 # n[0]*m0
- add %rax,$A0[0] # n[0]*m0+t[0]
- mov $Ni[1],%rax
- adc %rdx,$A0[1]
- mov $n0,$m1
- xor $A0[0],$A0[0]
- add 8($tptr,$j),$A0[1]
- adc \$0,$A0[0]
- mul $m0 # n[1]*m0
- add %rax,$A0[1] # n[1]*m0+t[1]
- mov $Ni[0],%rax
- adc %rdx,$A0[0]
- imulq $A0[1],$m1
- mov 16($nptr,$j),$Ni[0] # n[2]
- xor $A1[1],$A1[1]
- add $A0[1],$A1[0]
- adc \$0,$A1[1]
- mul $m1 # n[0]*m1
- add %rax,$A1[0] # n[0]*m1+"t[1]"
- mov $Ni[0],%rax
- adc %rdx,$A1[1]
- mov $A1[0],8($tptr,$j) # "t[1]"
- xor $A0[1],$A0[1]
- add 16($tptr,$j),$A0[0]
- adc \$0,$A0[1]
- mul $m0 # n[2]*m0
- add %rax,$A0[0] # n[2]*m0+t[2]
- mov $Ni[1],%rax
- adc %rdx,$A0[1]
- mov 24($nptr,$j),$Ni[1] # n[3]
- xor $A1[0],$A1[0]
- add $A0[0],$A1[1]
- adc \$0,$A1[0]
- mul $m1 # n[1]*m1
- add %rax,$A1[1] # n[1]*m1+"t[2]"
- mov $Ni[1],%rax
- adc %rdx,$A1[0]
- mov $A1[1],16($tptr,$j) # "t[2]"
- xor $A0[0],$A0[0]
- add 24($tptr,$j),$A0[1]
- lea 32($j),$j
- adc \$0,$A0[0]
- mul $m0 # n[3]*m0
- add %rax,$A0[1] # n[3]*m0+t[3]
- mov $Ni[0],%rax
- adc %rdx,$A0[0]
- jmp .Lsqr4x_mont_inner
- .align 16
- .Lsqr4x_mont_inner:
- mov ($nptr,$j),$Ni[0] # n[4]
- xor $A1[1],$A1[1]
- add $A0[1],$A1[0]
- adc \$0,$A1[1]
- mul $m1 # n[2]*m1
- add %rax,$A1[0] # n[2]*m1+"t[3]"
- mov $Ni[0],%rax
- adc %rdx,$A1[1]
- mov $A1[0],-8($tptr,$j) # "t[3]"
- xor $A0[1],$A0[1]
- add ($tptr,$j),$A0[0]
- adc \$0,$A0[1]
- mul $m0 # n[4]*m0
- add %rax,$A0[0] # n[4]*m0+t[4]
- mov $Ni[1],%rax
- adc %rdx,$A0[1]
- mov 8($nptr,$j),$Ni[1] # n[5]
- xor $A1[0],$A1[0]
- add $A0[0],$A1[1]
- adc \$0,$A1[0]
- mul $m1 # n[3]*m1
- add %rax,$A1[1] # n[3]*m1+"t[4]"
- mov $Ni[1],%rax
- adc %rdx,$A1[0]
- mov $A1[1],($tptr,$j) # "t[4]"
- xor $A0[0],$A0[0]
- add 8($tptr,$j),$A0[1]
- adc \$0,$A0[0]
- mul $m0 # n[5]*m0
- add %rax,$A0[1] # n[5]*m0+t[5]
- mov $Ni[0],%rax
- adc %rdx,$A0[0]
- mov 16($nptr,$j),$Ni[0] # n[6]
- xor $A1[1],$A1[1]
- add $A0[1],$A1[0]
- adc \$0,$A1[1]
- mul $m1 # n[4]*m1
- add %rax,$A1[0] # n[4]*m1+"t[5]"
- mov $Ni[0],%rax
- adc %rdx,$A1[1]
- mov $A1[0],8($tptr,$j) # "t[5]"
- xor $A0[1],$A0[1]
- add 16($tptr,$j),$A0[0]
- adc \$0,$A0[1]
- mul $m0 # n[6]*m0
- add %rax,$A0[0] # n[6]*m0+t[6]
- mov $Ni[1],%rax
- adc %rdx,$A0[1]
- mov 24($nptr,$j),$Ni[1] # n[7]
- xor $A1[0],$A1[0]
- add $A0[0],$A1[1]
- adc \$0,$A1[0]
- mul $m1 # n[5]*m1
- add %rax,$A1[1] # n[5]*m1+"t[6]"
- mov $Ni[1],%rax
- adc %rdx,$A1[0]
- mov $A1[1],16($tptr,$j) # "t[6]"
- xor $A0[0],$A0[0]
- add 24($tptr,$j),$A0[1]
- lea 32($j),$j
- adc \$0,$A0[0]
- mul $m0 # n[7]*m0
- add %rax,$A0[1] # n[7]*m0+t[7]
- mov $Ni[0],%rax
- adc %rdx,$A0[0]
- cmp \$0,$j
- jne .Lsqr4x_mont_inner
- sub 0(%rsp),$j # $j=-$num # modsched #
- mov $n0,$m0 # # modsched #
- xor $A1[1],$A1[1]
- add $A0[1],$A1[0]
- adc \$0,$A1[1]
- mul $m1 # n[6]*m1
- add %rax,$A1[0] # n[6]*m1+"t[7]"
- mov $Ni[1],%rax
- adc %rdx,$A1[1]
- mov $A1[0],-8($tptr) # "t[7]"
- xor $A0[1],$A0[1]
- add ($tptr),$A0[0] # +t[8]
- adc \$0,$A0[1]
- mov 0($nptr,$j),$Ni[0] # n[0] # modsched #
- add $topbit,$A0[0]
- adc \$0,$A0[1]
- imulq 16($tptr,$j),$m0 # m0=t[0]*n0 # modsched #
- xor $A1[0],$A1[0]
- mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
- add $A0[0],$A1[1]
- mov 16($tptr,$j),$A0[0] # t[0] # modsched #
- adc \$0,$A1[0]
- mul $m1 # n[7]*m1
- add %rax,$A1[1] # n[7]*m1+"t[8]"
- mov $Ni[0],%rax # # modsched #
- adc %rdx,$A1[0]
- mov $A1[1],($tptr) # "t[8]"
- xor $topbit,$topbit
- add 8($tptr),$A1[0] # +t[9]
- adc $topbit,$topbit
- add $A0[1],$A1[0]
- lea 16($tptr),$tptr # "t[$num]>>128"
- adc \$0,$topbit
- mov $A1[0],-8($tptr) # "t[9]"
- cmp 8(%rsp),$tptr # are we done?
- jb .Lsqr4x_mont_outer
- mov 0(%rsp),$num # restore $num
- mov $topbit,($tptr) # save $topbit
- ___
- }
- ##############################################################
- # Post-condition, 4x unrolled copy from bn_mul_mont
- #
- {
- my ($tptr,$nptr)=("%rbx",$aptr);
- my @ri=("%rax","%rdx","%r10","%r11");
- $code.=<<___;
- mov 64(%rsp,$num),@ri[0] # tp[0]
- lea 64(%rsp,$num),$tptr # upper half of t[2*$num] holds result
- mov 40(%rsp),$nptr # restore $nptr
- shr \$5,$num # num/4
- mov 8($tptr),@ri[1] # t[1]
- xor $i,$i # i=0 and clear CF!
- mov 32(%rsp),$rptr # restore $rptr
- sub 0($nptr),@ri[0]
- mov 16($tptr),@ri[2] # t[2]
- mov 24($tptr),@ri[3] # t[3]
- sbb 8($nptr),@ri[1]
- lea -1($num),$j # j=num/4-1
- jmp .Lsqr4x_sub
- .align 16
- .Lsqr4x_sub:
- mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
- mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
- sbb 16($nptr,$i,8),@ri[2]
- mov 32($tptr,$i,8),@ri[0] # tp[i+1]
- mov 40($tptr,$i,8),@ri[1]
- sbb 24($nptr,$i,8),@ri[3]
- mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
- mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
- sbb 32($nptr,$i,8),@ri[0]
- mov 48($tptr,$i,8),@ri[2]
- mov 56($tptr,$i,8),@ri[3]
- sbb 40($nptr,$i,8),@ri[1]
- lea 4($i),$i # i++
- dec $j # doesn't affect CF!
- jnz .Lsqr4x_sub
- mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
- mov 32($tptr,$i,8),@ri[0] # load overflow bit
- sbb 16($nptr,$i,8),@ri[2]
- mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
- sbb 24($nptr,$i,8),@ri[3]
- mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
- sbb \$0,@ri[0] # handle upmost overflow bit
- mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
- xor $i,$i # i=0
- and @ri[0],$tptr
- not @ri[0]
- mov $rptr,$nptr
- and @ri[0],$nptr
- lea -1($num),$j
- or $nptr,$tptr # tp=borrow?tp:rp
- pxor %xmm0,%xmm0
- lea 64(%rsp,$num,8),$nptr
- movdqu ($tptr),%xmm1
- lea ($nptr,$num,8),$nptr
- movdqa %xmm0,64(%rsp) # zap lower half of temporary vector
- movdqa %xmm0,($nptr) # zap upper half of temporary vector
- movdqu %xmm1,($rptr)
- jmp .Lsqr4x_copy
- .align 16
- .Lsqr4x_copy: # copy or in-place refresh
- movdqu 16($tptr,$i),%xmm2
- movdqu 32($tptr,$i),%xmm1
- movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
- movdqa %xmm0,96(%rsp,$i) # zap lower half of temporary vector
- movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
- movdqa %xmm0,32($nptr,$i) # zap upper half of temporary vector
- movdqu %xmm2,16($rptr,$i)
- movdqu %xmm1,32($rptr,$i)
- lea 32($i),$i
- dec $j
- jnz .Lsqr4x_copy
- movdqu 16($tptr,$i),%xmm2
- movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
- movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
- movdqu %xmm2,16($rptr,$i)
- ___
- }
- $code.=<<___;
- mov 56(%rsp),%rsi # restore %rsp
- mov \$1,%rax
- mov 0(%rsi),%r15
- mov 8(%rsi),%r14
- mov 16(%rsi),%r13
- mov 24(%rsi),%r12
- mov 32(%rsi),%rbp
- mov 40(%rsi),%rbx
- lea 48(%rsi),%rsp
- .Lsqr4x_epilogue:
- ret
- .size bn_sqr4x_mont,.-bn_sqr4x_mont
- ___
- }}}
- $code.=<<___;
- .asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
- .align 16
- ___
- # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
- # CONTEXT *context,DISPATCHER_CONTEXT *disp)
- if ($win64) {
- $rec="%rcx";
- $frame="%rdx";
- $context="%r8";
- $disp="%r9";
- $code.=<<___;
- .extern __imp_RtlVirtualUnwind
- .type mul_handler,\@abi-omnipotent
- .align 16
- mul_handler:
- push %rsi
- push %rdi
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- pushfq
- sub \$64,%rsp
- mov 120($context),%rax # pull context->Rax
- mov 248($context),%rbx # pull context->Rip
- mov 8($disp),%rsi # disp->ImageBase
- mov 56($disp),%r11 # disp->HandlerData
- mov 0(%r11),%r10d # HandlerData[0]
- lea (%rsi,%r10),%r10 # end of prologue label
- cmp %r10,%rbx # context->Rip<end of prologue label
- jb .Lcommon_seh_tail
- mov 152($context),%rax # pull context->Rsp
- mov 4(%r11),%r10d # HandlerData[1]
- lea (%rsi,%r10),%r10 # epilogue label
- cmp %r10,%rbx # context->Rip>=epilogue label
- jae .Lcommon_seh_tail
- mov 192($context),%r10 # pull $num
- mov 8(%rax,%r10,8),%rax # pull saved stack pointer
- lea 48(%rax),%rax
- mov -8(%rax),%rbx
- mov -16(%rax),%rbp
- mov -24(%rax),%r12
- mov -32(%rax),%r13
- mov -40(%rax),%r14
- mov -48(%rax),%r15
- mov %rbx,144($context) # restore context->Rbx
- mov %rbp,160($context) # restore context->Rbp
- mov %r12,216($context) # restore context->R12
- mov %r13,224($context) # restore context->R13
- mov %r14,232($context) # restore context->R14
- mov %r15,240($context) # restore context->R15
- jmp .Lcommon_seh_tail
- .size mul_handler,.-mul_handler
- .type sqr_handler,\@abi-omnipotent
- .align 16
- sqr_handler:
- push %rsi
- push %rdi
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- pushfq
- sub \$64,%rsp
- mov 120($context),%rax # pull context->Rax
- mov 248($context),%rbx # pull context->Rip
- lea .Lsqr4x_body(%rip),%r10
- cmp %r10,%rbx # context->Rip<.Lsqr_body
- jb .Lcommon_seh_tail
- mov 152($context),%rax # pull context->Rsp
- lea .Lsqr4x_epilogue(%rip),%r10
- cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
- jae .Lcommon_seh_tail
- mov 56(%rax),%rax # pull saved stack pointer
- lea 48(%rax),%rax
- mov -8(%rax),%rbx
- mov -16(%rax),%rbp
- mov -24(%rax),%r12
- mov -32(%rax),%r13
- mov -40(%rax),%r14
- mov -48(%rax),%r15
- mov %rbx,144($context) # restore context->Rbx
- mov %rbp,160($context) # restore context->Rbp
- mov %r12,216($context) # restore context->R12
- mov %r13,224($context) # restore context->R13
- mov %r14,232($context) # restore context->R14
- mov %r15,240($context) # restore context->R15
- .Lcommon_seh_tail:
- mov 8(%rax),%rdi
- mov 16(%rax),%rsi
- mov %rax,152($context) # restore context->Rsp
- mov %rsi,168($context) # restore context->Rsi
- mov %rdi,176($context) # restore context->Rdi
- mov 40($disp),%rdi # disp->ContextRecord
- mov $context,%rsi # context
- mov \$154,%ecx # sizeof(CONTEXT)
- .long 0xa548f3fc # cld; rep movsq
- mov $disp,%rsi
- xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
- mov 8(%rsi),%rdx # arg2, disp->ImageBase
- mov 0(%rsi),%r8 # arg3, disp->ControlPc
- mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
- mov 40(%rsi),%r10 # disp->ContextRecord
- lea 56(%rsi),%r11 # &disp->HandlerData
- lea 24(%rsi),%r12 # &disp->EstablisherFrame
- mov %r10,32(%rsp) # arg5
- mov %r11,40(%rsp) # arg6
- mov %r12,48(%rsp) # arg7
- mov %rcx,56(%rsp) # arg8, (NULL)
- call *__imp_RtlVirtualUnwind(%rip)
- mov \$1,%eax # ExceptionContinueSearch
- add \$64,%rsp
- popfq
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbp
- pop %rbx
- pop %rdi
- pop %rsi
- ret
- .size sqr_handler,.-sqr_handler
- .section .pdata
- .align 4
- .rva .LSEH_begin_bn_mul_mont
- .rva .LSEH_end_bn_mul_mont
- .rva .LSEH_info_bn_mul_mont
- .rva .LSEH_begin_bn_mul4x_mont
- .rva .LSEH_end_bn_mul4x_mont
- .rva .LSEH_info_bn_mul4x_mont
- .rva .LSEH_begin_bn_sqr4x_mont
- .rva .LSEH_end_bn_sqr4x_mont
- .rva .LSEH_info_bn_sqr4x_mont
- .section .xdata
- .align 8
- .LSEH_info_bn_mul_mont:
- .byte 9,0,0,0
- .rva mul_handler
- .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
- .LSEH_info_bn_mul4x_mont:
- .byte 9,0,0,0
- .rva mul_handler
- .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
- .LSEH_info_bn_sqr4x_mont:
- .byte 9,0,0,0
- .rva sqr_handler
- ___
- }
- print $code;
- close STDOUT;
|