123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385 |
- #! /usr/bin/env perl
- # Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
- #
- # Licensed under the Apache License 2.0 (the "License"). You may not use
- # this file except in compliance with the License. You can obtain a copy
- # in the file LICENSE in the source distribution or at
- # https://www.openssl.org/source/license.html
- #
- # ====================================================================
- # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
- # project. The module is, however, dual licensed under OpenSSL and
- # CRYPTOGAMS licenses depending on where you obtain it. For further
- # details see http://www.openssl.org/~appro/cryptogams/.
- # ====================================================================
- #
- # ECP_NISTZ256 module for PPC64.
- #
- # August 2016.
- #
- # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
- # http://eprint.iacr.org/2013/816.
- #
- # with/without -DECP_NISTZ256_ASM
- # POWER7 +260-530%
- # POWER8 +220-340%
- # $output is the last argument if it looks like a file (it has an extension)
- # $flavour is the first argument if it doesn't look like a file
- $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
- $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
- $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
- ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
- ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
- die "can't locate ppc-xlate.pl";
- open OUT,"| \"$^X\" $xlate $flavour \"$output\""
- or die "can't call $xlate: $!";
- *STDOUT=*OUT;
- my $sp="r1";
- {
- my ($rp,$ap,$bp,$bi,$acc0,$acc1,$acc2,$acc3,$poly1,$poly3,
- $acc4,$acc5,$a0,$a1,$a2,$a3,$t0,$t1,$t2,$t3) =
- map("r$_",(3..12,22..31));
- my ($acc6,$acc7)=($bp,$bi); # used in __ecp_nistz256_sqr_mont
- $code.=<<___;
- .machine "any"
- .text
- ___
- ########################################################################
- # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
- #
- $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
- open TABLE,"<ecp_nistz256_table.c" or
- open TABLE,"<${dir}../ecp_nistz256_table.c" or
- die "failed to open ecp_nistz256_table.c:",$!;
- use integer;
- foreach(<TABLE>) {
- s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
- }
- close TABLE;
- # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
- # 64*16*37-1 is because $#arr returns last valid index or @arr, not
- # amount of elements.
- die "insane number of elements" if ($#arr != 64*16*37-1);
- $code.=<<___;
- .type ecp_nistz256_precomputed,\@object
- .globl ecp_nistz256_precomputed
- .align 12
- ecp_nistz256_precomputed:
- ___
- ########################################################################
- # this conversion smashes P256_POINT_AFFINE by individual bytes with
- # 64 byte interval, similar to
- # 1111222233334444
- # 1234123412341234
- for(1..37) {
- @tbl = splice(@arr,0,64*16);
- for($i=0;$i<64;$i++) {
- undef @line;
- for($j=0;$j<64;$j++) {
- push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
- }
- $code.=".byte\t";
- $code.=join(',',map { sprintf "0x%02x",$_} @line);
- $code.="\n";
- }
- }
- $code.=<<___;
- .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
- .asciz "ECP_NISTZ256 for PPC64, CRYPTOGAMS by <appro\@openssl.org>"
- # void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4],
- # const BN_ULONG x2[4]);
- .globl ecp_nistz256_mul_mont
- .align 5
- ecp_nistz256_mul_mont:
- stdu $sp,-128($sp)
- mflr r0
- std r22,48($sp)
- std r23,56($sp)
- std r24,64($sp)
- std r25,72($sp)
- std r26,80($sp)
- std r27,88($sp)
- std r28,96($sp)
- std r29,104($sp)
- std r30,112($sp)
- std r31,120($sp)
- ld $a0,0($ap)
- ld $bi,0($bp)
- ld $a1,8($ap)
- ld $a2,16($ap)
- ld $a3,24($ap)
- li $poly1,-1
- srdi $poly1,$poly1,32 # 0x00000000ffffffff
- li $poly3,1
- orc $poly3,$poly3,$poly1 # 0xffffffff00000001
- bl __ecp_nistz256_mul_mont
- mtlr r0
- ld r22,48($sp)
- ld r23,56($sp)
- ld r24,64($sp)
- ld r25,72($sp)
- ld r26,80($sp)
- ld r27,88($sp)
- ld r28,96($sp)
- ld r29,104($sp)
- ld r30,112($sp)
- ld r31,120($sp)
- addi $sp,$sp,128
- blr
- .long 0
- .byte 0,12,4,0,0x80,10,3,0
- .long 0
- .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
- # void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
- .globl ecp_nistz256_sqr_mont
- .align 4
- ecp_nistz256_sqr_mont:
- stdu $sp,-128($sp)
- mflr r0
- std r22,48($sp)
- std r23,56($sp)
- std r24,64($sp)
- std r25,72($sp)
- std r26,80($sp)
- std r27,88($sp)
- std r28,96($sp)
- std r29,104($sp)
- std r30,112($sp)
- std r31,120($sp)
- ld $a0,0($ap)
- ld $a1,8($ap)
- ld $a2,16($ap)
- ld $a3,24($ap)
- li $poly1,-1
- srdi $poly1,$poly1,32 # 0x00000000ffffffff
- li $poly3,1
- orc $poly3,$poly3,$poly1 # 0xffffffff00000001
- bl __ecp_nistz256_sqr_mont
- mtlr r0
- ld r22,48($sp)
- ld r23,56($sp)
- ld r24,64($sp)
- ld r25,72($sp)
- ld r26,80($sp)
- ld r27,88($sp)
- ld r28,96($sp)
- ld r29,104($sp)
- ld r30,112($sp)
- ld r31,120($sp)
- addi $sp,$sp,128
- blr
- .long 0
- .byte 0,12,4,0,0x80,10,2,0
- .long 0
- .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
- # void ecp_nistz256_add(BN_ULONG x0[4],const BN_ULONG x1[4],
- # const BN_ULONG x2[4]);
- .globl ecp_nistz256_add
- .align 4
- ecp_nistz256_add:
- stdu $sp,-128($sp)
- mflr r0
- std r28,96($sp)
- std r29,104($sp)
- std r30,112($sp)
- std r31,120($sp)
- ld $acc0,0($ap)
- ld $t0, 0($bp)
- ld $acc1,8($ap)
- ld $t1, 8($bp)
- ld $acc2,16($ap)
- ld $t2, 16($bp)
- ld $acc3,24($ap)
- ld $t3, 24($bp)
- li $poly1,-1
- srdi $poly1,$poly1,32 # 0x00000000ffffffff
- li $poly3,1
- orc $poly3,$poly3,$poly1 # 0xffffffff00000001
- bl __ecp_nistz256_add
- mtlr r0
- ld r28,96($sp)
- ld r29,104($sp)
- ld r30,112($sp)
- ld r31,120($sp)
- addi $sp,$sp,128
- blr
- .long 0
- .byte 0,12,4,0,0x80,4,3,0
- .long 0
- .size ecp_nistz256_add,.-ecp_nistz256_add
- # void ecp_nistz256_div_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]);
- .globl ecp_nistz256_div_by_2
- .align 4
- ecp_nistz256_div_by_2:
- stdu $sp,-128($sp)
- mflr r0
- std r28,96($sp)
- std r29,104($sp)
- std r30,112($sp)
- std r31,120($sp)
- ld $acc0,0($ap)
- ld $acc1,8($ap)
- ld $acc2,16($ap)
- ld $acc3,24($ap)
- li $poly1,-1
- srdi $poly1,$poly1,32 # 0x00000000ffffffff
- li $poly3,1
- orc $poly3,$poly3,$poly1 # 0xffffffff00000001
- bl __ecp_nistz256_div_by_2
- mtlr r0
- ld r28,96($sp)
- ld r29,104($sp)
- ld r30,112($sp)
- ld r31,120($sp)
- addi $sp,$sp,128
- blr
- .long 0
- .byte 0,12,4,0,0x80,4,2,0
- .long 0
- .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
- # void ecp_nistz256_mul_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]);
- .globl ecp_nistz256_mul_by_2
- .align 4
- ecp_nistz256_mul_by_2:
- stdu $sp,-128($sp)
- mflr r0
- std r28,96($sp)
- std r29,104($sp)
- std r30,112($sp)
- std r31,120($sp)
- ld $acc0,0($ap)
- ld $acc1,8($ap)
- ld $acc2,16($ap)
- ld $acc3,24($ap)
- mr $t0,$acc0
- mr $t1,$acc1
- mr $t2,$acc2
- mr $t3,$acc3
- li $poly1,-1
- srdi $poly1,$poly1,32 # 0x00000000ffffffff
- li $poly3,1
- orc $poly3,$poly3,$poly1 # 0xffffffff00000001
- bl __ecp_nistz256_add # ret = a+a // 2*a
- mtlr r0
- ld r28,96($sp)
- ld r29,104($sp)
- ld r30,112($sp)
- ld r31,120($sp)
- addi $sp,$sp,128
- blr
- .long 0
- .byte 0,12,4,0,0x80,4,3,0
- .long 0
- .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
- # void ecp_nistz256_mul_by_3(BN_ULONG x0[4],const BN_ULONG x1[4]);
- .globl ecp_nistz256_mul_by_3
- .align 4
- ecp_nistz256_mul_by_3:
- stdu $sp,-128($sp)
- mflr r0
- std r28,96($sp)
- std r29,104($sp)
- std r30,112($sp)
- std r31,120($sp)
- ld $acc0,0($ap)
- ld $acc1,8($ap)
- ld $acc2,16($ap)
- ld $acc3,24($ap)
- mr $t0,$acc0
- std $acc0,64($sp)
- mr $t1,$acc1
- std $acc1,72($sp)
- mr $t2,$acc2
- std $acc2,80($sp)
- mr $t3,$acc3
- std $acc3,88($sp)
- li $poly1,-1
- srdi $poly1,$poly1,32 # 0x00000000ffffffff
- li $poly3,1
- orc $poly3,$poly3,$poly1 # 0xffffffff00000001
- bl __ecp_nistz256_add # ret = a+a // 2*a
- ld $t0,64($sp)
- ld $t1,72($sp)
- ld $t2,80($sp)
- ld $t3,88($sp)
- bl __ecp_nistz256_add # ret += a // 2*a+a=3*a
- mtlr r0
- ld r28,96($sp)
- ld r29,104($sp)
- ld r30,112($sp)
- ld r31,120($sp)
- addi $sp,$sp,128
- blr
- .long 0
- .byte 0,12,4,0,0x80,4,2,0
- .long 0
- .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
- # void ecp_nistz256_sub(BN_ULONG x0[4],const BN_ULONG x1[4],
- # const BN_ULONG x2[4]);
- .globl ecp_nistz256_sub
- .align 4
- ecp_nistz256_sub:
- stdu $sp,-128($sp)
- mflr r0
- std r28,96($sp)
- std r29,104($sp)
- std r30,112($sp)
- std r31,120($sp)
- ld $acc0,0($ap)
- ld $acc1,8($ap)
- ld $acc2,16($ap)
- ld $acc3,24($ap)
- li $poly1,-1
- srdi $poly1,$poly1,32 # 0x00000000ffffffff
- li $poly3,1
- orc $poly3,$poly3,$poly1 # 0xffffffff00000001
- bl __ecp_nistz256_sub_from
- mtlr r0
- ld r28,96($sp)
- ld r29,104($sp)
- ld r30,112($sp)
- ld r31,120($sp)
- addi $sp,$sp,128
- blr
- .long 0
- .byte 0,12,4,0,0x80,4,3,0
- .long 0
- .size ecp_nistz256_sub,.-ecp_nistz256_sub
- # void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]);
- .globl ecp_nistz256_neg
- .align 4
- ecp_nistz256_neg:
- stdu $sp,-128($sp)
- mflr r0
- std r28,96($sp)
- std r29,104($sp)
- std r30,112($sp)
- std r31,120($sp)
- mr $bp,$ap
- li $acc0,0
- li $acc1,0
- li $acc2,0
- li $acc3,0
- li $poly1,-1
- srdi $poly1,$poly1,32 # 0x00000000ffffffff
- li $poly3,1
- orc $poly3,$poly3,$poly1 # 0xffffffff00000001
- bl __ecp_nistz256_sub_from
- mtlr r0
- ld r28,96($sp)
- ld r29,104($sp)
- ld r30,112($sp)
- ld r31,120($sp)
- addi $sp,$sp,128
- blr
- .long 0
- .byte 0,12,4,0,0x80,4,2,0
- .long 0
- .size ecp_nistz256_neg,.-ecp_nistz256_neg
- # note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded
- # to $a0-$a3 and b[0] - to $bi
- .type __ecp_nistz256_mul_mont,\@function
- .align 4
- __ecp_nistz256_mul_mont:
- mulld $acc0,$a0,$bi # a[0]*b[0]
- mulhdu $t0,$a0,$bi
- mulld $acc1,$a1,$bi # a[1]*b[0]
- mulhdu $t1,$a1,$bi
- mulld $acc2,$a2,$bi # a[2]*b[0]
- mulhdu $t2,$a2,$bi
- mulld $acc3,$a3,$bi # a[3]*b[0]
- mulhdu $t3,$a3,$bi
- ld $bi,8($bp) # b[1]
- addc $acc1,$acc1,$t0 # accumulate high parts of multiplication
- sldi $t0,$acc0,32
- adde $acc2,$acc2,$t1
- srdi $t1,$acc0,32
- adde $acc3,$acc3,$t2
- addze $acc4,$t3
- li $acc5,0
- ___
- for($i=1;$i<4;$i++) {
- ################################################################
- # Reduction iteration is normally performed by accumulating
- # result of multiplication of modulus by "magic" digit [and
- # omitting least significant word, which is guaranteed to
- # be 0], but thanks to special form of modulus and "magic"
- # digit being equal to least significant word, it can be
- # performed with additions and subtractions alone. Indeed:
- #
- # ffff0001.00000000.0000ffff.ffffffff
- # * abcdefgh
- # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
- #
- # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
- # rewrite above as:
- #
- # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
- # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
- # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
- #
- # or marking redundant operations:
- #
- # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
- # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
- # - 0000abcd.efgh0000.--------.--------.--------
- $code.=<<___;
- subfc $t2,$t0,$acc0 # "*0xffff0001"
- subfe $t3,$t1,$acc0
- addc $acc0,$acc1,$t0 # +=acc[0]<<96 and omit acc[0]
- adde $acc1,$acc2,$t1
- adde $acc2,$acc3,$t2 # +=acc[0]*0xffff0001
- adde $acc3,$acc4,$t3
- addze $acc4,$acc5
- mulld $t0,$a0,$bi # lo(a[0]*b[i])
- mulld $t1,$a1,$bi # lo(a[1]*b[i])
- mulld $t2,$a2,$bi # lo(a[2]*b[i])
- mulld $t3,$a3,$bi # lo(a[3]*b[i])
- addc $acc0,$acc0,$t0 # accumulate low parts of multiplication
- mulhdu $t0,$a0,$bi # hi(a[0]*b[i])
- adde $acc1,$acc1,$t1
- mulhdu $t1,$a1,$bi # hi(a[1]*b[i])
- adde $acc2,$acc2,$t2
- mulhdu $t2,$a2,$bi # hi(a[2]*b[i])
- adde $acc3,$acc3,$t3
- mulhdu $t3,$a3,$bi # hi(a[3]*b[i])
- addze $acc4,$acc4
- ___
- $code.=<<___ if ($i<3);
- ld $bi,8*($i+1)($bp) # b[$i+1]
- ___
- $code.=<<___;
- addc $acc1,$acc1,$t0 # accumulate high parts of multiplication
- sldi $t0,$acc0,32
- adde $acc2,$acc2,$t1
- srdi $t1,$acc0,32
- adde $acc3,$acc3,$t2
- adde $acc4,$acc4,$t3
- li $acc5,0
- addze $acc5,$acc5
- ___
- }
- $code.=<<___;
- # last reduction
- subfc $t2,$t0,$acc0 # "*0xffff0001"
- subfe $t3,$t1,$acc0
- addc $acc0,$acc1,$t0 # +=acc[0]<<96 and omit acc[0]
- adde $acc1,$acc2,$t1
- adde $acc2,$acc3,$t2 # +=acc[0]*0xffff0001
- adde $acc3,$acc4,$t3
- addze $acc4,$acc5
- li $t2,0
- addic $acc0,$acc0,1 # ret -= modulus
- subfe $acc1,$poly1,$acc1
- subfe $acc2,$t2,$acc2
- subfe $acc3,$poly3,$acc3
- subfe $acc4,$t2,$acc4
- addc $acc0,$acc0,$acc4 # ret += modulus if borrow
- and $t1,$poly1,$acc4
- and $t3,$poly3,$acc4
- adde $acc1,$acc1,$t1
- addze $acc2,$acc2
- adde $acc3,$acc3,$t3
- std $acc0,0($rp)
- std $acc1,8($rp)
- std $acc2,16($rp)
- std $acc3,24($rp)
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,1,0
- .long 0
- .size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
- # note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded
- # to $a0-$a3
- .type __ecp_nistz256_sqr_mont,\@function
- .align 4
- __ecp_nistz256_sqr_mont:
- ################################################################
- # | | | | | |a1*a0| |
- # | | | | |a2*a0| | |
- # | |a3*a2|a3*a0| | | |
- # | | | |a2*a1| | | |
- # | | |a3*a1| | | | |
- # *| | | | | | | | 2|
- # +|a3*a3|a2*a2|a1*a1|a0*a0|
- # |--+--+--+--+--+--+--+--|
- # |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
- #
- # "can't overflow" below mark carrying into high part of
- # multiplication result, which can't overflow, because it
- # can never be all ones.
- mulld $acc1,$a1,$a0 # a[1]*a[0]
- mulhdu $t1,$a1,$a0
- mulld $acc2,$a2,$a0 # a[2]*a[0]
- mulhdu $t2,$a2,$a0
- mulld $acc3,$a3,$a0 # a[3]*a[0]
- mulhdu $acc4,$a3,$a0
- addc $acc2,$acc2,$t1 # accumulate high parts of multiplication
- mulld $t0,$a2,$a1 # a[2]*a[1]
- mulhdu $t1,$a2,$a1
- adde $acc3,$acc3,$t2
- mulld $t2,$a3,$a1 # a[3]*a[1]
- mulhdu $t3,$a3,$a1
- addze $acc4,$acc4 # can't overflow
- mulld $acc5,$a3,$a2 # a[3]*a[2]
- mulhdu $acc6,$a3,$a2
- addc $t1,$t1,$t2 # accumulate high parts of multiplication
- addze $t2,$t3 # can't overflow
- addc $acc3,$acc3,$t0 # accumulate low parts of multiplication
- adde $acc4,$acc4,$t1
- adde $acc5,$acc5,$t2
- addze $acc6,$acc6 # can't overflow
- addc $acc1,$acc1,$acc1 # acc[1-6]*=2
- adde $acc2,$acc2,$acc2
- adde $acc3,$acc3,$acc3
- adde $acc4,$acc4,$acc4
- adde $acc5,$acc5,$acc5
- adde $acc6,$acc6,$acc6
- li $acc7,0
- addze $acc7,$acc7
- mulld $acc0,$a0,$a0 # a[0]*a[0]
- mulhdu $a0,$a0,$a0
- mulld $t1,$a1,$a1 # a[1]*a[1]
- mulhdu $a1,$a1,$a1
- mulld $t2,$a2,$a2 # a[2]*a[2]
- mulhdu $a2,$a2,$a2
- mulld $t3,$a3,$a3 # a[3]*a[3]
- mulhdu $a3,$a3,$a3
- addc $acc1,$acc1,$a0 # +a[i]*a[i]
- sldi $t0,$acc0,32
- adde $acc2,$acc2,$t1
- srdi $t1,$acc0,32
- adde $acc3,$acc3,$a1
- adde $acc4,$acc4,$t2
- adde $acc5,$acc5,$a2
- adde $acc6,$acc6,$t3
- adde $acc7,$acc7,$a3
- ___
- for($i=0;$i<3;$i++) { # reductions, see commentary in
- # multiplication for details
- $code.=<<___;
- subfc $t2,$t0,$acc0 # "*0xffff0001"
- subfe $t3,$t1,$acc0
- addc $acc0,$acc1,$t0 # +=acc[0]<<96 and omit acc[0]
- sldi $t0,$acc0,32
- adde $acc1,$acc2,$t1
- srdi $t1,$acc0,32
- adde $acc2,$acc3,$t2 # +=acc[0]*0xffff0001
- addze $acc3,$t3 # can't overflow
- ___
- }
- $code.=<<___;
- subfc $t2,$t0,$acc0 # "*0xffff0001"
- subfe $t3,$t1,$acc0
- addc $acc0,$acc1,$t0 # +=acc[0]<<96 and omit acc[0]
- adde $acc1,$acc2,$t1
- adde $acc2,$acc3,$t2 # +=acc[0]*0xffff0001
- addze $acc3,$t3 # can't overflow
- addc $acc0,$acc0,$acc4 # accumulate upper half
- adde $acc1,$acc1,$acc5
- adde $acc2,$acc2,$acc6
- adde $acc3,$acc3,$acc7
- li $t2,0
- addze $acc4,$t2
- addic $acc0,$acc0,1 # ret -= modulus
- subfe $acc1,$poly1,$acc1
- subfe $acc2,$t2,$acc2
- subfe $acc3,$poly3,$acc3
- subfe $acc4,$t2,$acc4
- addc $acc0,$acc0,$acc4 # ret += modulus if borrow
- and $t1,$poly1,$acc4
- and $t3,$poly3,$acc4
- adde $acc1,$acc1,$t1
- addze $acc2,$acc2
- adde $acc3,$acc3,$t3
- std $acc0,0($rp)
- std $acc1,8($rp)
- std $acc2,16($rp)
- std $acc3,24($rp)
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,1,0
- .long 0
- .size __ecp_nistz256_sqr_mont,.-__ecp_nistz256_sqr_mont
- # Note that __ecp_nistz256_add expects both input vectors pre-loaded to
- # $a0-$a3 and $t0-$t3. This is done because it's used in multiple
- # contexts, e.g. in multiplication by 2 and 3...
- .type __ecp_nistz256_add,\@function
- .align 4
- __ecp_nistz256_add:
- addc $acc0,$acc0,$t0 # ret = a+b
- adde $acc1,$acc1,$t1
- adde $acc2,$acc2,$t2
- li $t2,0
- adde $acc3,$acc3,$t3
- addze $t0,$t2
- # if a+b >= modulus, subtract modulus
- #
- # But since comparison implies subtraction, we subtract
- # modulus and then add it back if subtraction borrowed.
- subic $acc0,$acc0,-1
- subfe $acc1,$poly1,$acc1
- subfe $acc2,$t2,$acc2
- subfe $acc3,$poly3,$acc3
- subfe $t0,$t2,$t0
- addc $acc0,$acc0,$t0
- and $t1,$poly1,$t0
- and $t3,$poly3,$t0
- adde $acc1,$acc1,$t1
- addze $acc2,$acc2
- adde $acc3,$acc3,$t3
- std $acc0,0($rp)
- std $acc1,8($rp)
- std $acc2,16($rp)
- std $acc3,24($rp)
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,3,0
- .long 0
- .size __ecp_nistz256_add,.-__ecp_nistz256_add
- .type __ecp_nistz256_sub_from,\@function
- .align 4
- __ecp_nistz256_sub_from:
- ld $t0,0($bp)
- ld $t1,8($bp)
- ld $t2,16($bp)
- ld $t3,24($bp)
- subfc $acc0,$t0,$acc0 # ret = a-b
- subfe $acc1,$t1,$acc1
- subfe $acc2,$t2,$acc2
- subfe $acc3,$t3,$acc3
- subfe $t0,$t0,$t0 # t0 = borrow ? -1 : 0
- # if a-b borrowed, add modulus
- addc $acc0,$acc0,$t0 # ret -= modulus & t0
- and $t1,$poly1,$t0
- and $t3,$poly3,$t0
- adde $acc1,$acc1,$t1
- addze $acc2,$acc2
- adde $acc3,$acc3,$t3
- std $acc0,0($rp)
- std $acc1,8($rp)
- std $acc2,16($rp)
- std $acc3,24($rp)
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,3,0
- .long 0
- .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
- .type __ecp_nistz256_sub_morf,\@function
- .align 4
- __ecp_nistz256_sub_morf:
- ld $t0,0($bp)
- ld $t1,8($bp)
- ld $t2,16($bp)
- ld $t3,24($bp)
- subfc $acc0,$acc0,$t0 # ret = b-a
- subfe $acc1,$acc1,$t1
- subfe $acc2,$acc2,$t2
- subfe $acc3,$acc3,$t3
- subfe $t0,$t0,$t0 # t0 = borrow ? -1 : 0
- # if b-a borrowed, add modulus
- addc $acc0,$acc0,$t0 # ret -= modulus & t0
- and $t1,$poly1,$t0
- and $t3,$poly3,$t0
- adde $acc1,$acc1,$t1
- addze $acc2,$acc2
- adde $acc3,$acc3,$t3
- std $acc0,0($rp)
- std $acc1,8($rp)
- std $acc2,16($rp)
- std $acc3,24($rp)
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,3,0
- .long 0
- .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
- .type __ecp_nistz256_div_by_2,\@function
- .align 4
- __ecp_nistz256_div_by_2:
- andi. $t0,$acc0,1
- addic $acc0,$acc0,-1 # a += modulus
- neg $t0,$t0
- adde $acc1,$acc1,$poly1
- not $t0,$t0
- addze $acc2,$acc2
- li $t2,0
- adde $acc3,$acc3,$poly3
- and $t1,$poly1,$t0
- addze $ap,$t2 # ap = carry
- and $t3,$poly3,$t0
- subfc $acc0,$t0,$acc0 # a -= modulus if a was even
- subfe $acc1,$t1,$acc1
- subfe $acc2,$t2,$acc2
- subfe $acc3,$t3,$acc3
- subfe $ap, $t2,$ap
- srdi $acc0,$acc0,1
- sldi $t0,$acc1,63
- srdi $acc1,$acc1,1
- sldi $t1,$acc2,63
- srdi $acc2,$acc2,1
- sldi $t2,$acc3,63
- srdi $acc3,$acc3,1
- sldi $t3,$ap,63
- or $acc0,$acc0,$t0
- or $acc1,$acc1,$t1
- or $acc2,$acc2,$t2
- or $acc3,$acc3,$t3
- std $acc0,0($rp)
- std $acc1,8($rp)
- std $acc2,16($rp)
- std $acc3,24($rp)
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,1,0
- .long 0
- .size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
- ___
- ########################################################################
- # following subroutines are "literal" implementation of those found in
- # ecp_nistz256.c
- #
- ########################################################################
- # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
- #
- if (1) {
- my $FRAME=64+32*4+12*8;
- my ($S,$M,$Zsqr,$tmp0)=map(64+32*$_,(0..3));
- # above map() describes stack layout with 4 temporary
- # 256-bit vectors on top.
- my ($rp_real,$ap_real) = map("r$_",(20,21));
- $code.=<<___;
- .globl ecp_nistz256_point_double
- .align 5
- ecp_nistz256_point_double:
- stdu $sp,-$FRAME($sp)
- mflr r0
- std r20,$FRAME-8*12($sp)
- std r21,$FRAME-8*11($sp)
- std r22,$FRAME-8*10($sp)
- std r23,$FRAME-8*9($sp)
- std r24,$FRAME-8*8($sp)
- std r25,$FRAME-8*7($sp)
- std r26,$FRAME-8*6($sp)
- std r27,$FRAME-8*5($sp)
- std r28,$FRAME-8*4($sp)
- std r29,$FRAME-8*3($sp)
- std r30,$FRAME-8*2($sp)
- std r31,$FRAME-8*1($sp)
- li $poly1,-1
- srdi $poly1,$poly1,32 # 0x00000000ffffffff
- li $poly3,1
- orc $poly3,$poly3,$poly1 # 0xffffffff00000001
- .Ldouble_shortcut:
- ld $acc0,32($ap)
- ld $acc1,40($ap)
- ld $acc2,48($ap)
- ld $acc3,56($ap)
- mr $t0,$acc0
- mr $t1,$acc1
- mr $t2,$acc2
- mr $t3,$acc3
- ld $a0,64($ap) # forward load for p256_sqr_mont
- ld $a1,72($ap)
- ld $a2,80($ap)
- ld $a3,88($ap)
- mr $rp_real,$rp
- mr $ap_real,$ap
- addi $rp,$sp,$S
- bl __ecp_nistz256_add # p256_mul_by_2(S, in_y);
- addi $rp,$sp,$Zsqr
- bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Zsqr, in_z);
- ld $t0,0($ap_real)
- ld $t1,8($ap_real)
- ld $t2,16($ap_real)
- ld $t3,24($ap_real)
- mr $a0,$acc0 # put Zsqr aside for p256_sub
- mr $a1,$acc1
- mr $a2,$acc2
- mr $a3,$acc3
- addi $rp,$sp,$M
- bl __ecp_nistz256_add # p256_add(M, Zsqr, in_x);
- addi $bp,$ap_real,0
- mr $acc0,$a0 # restore Zsqr
- mr $acc1,$a1
- mr $acc2,$a2
- mr $acc3,$a3
- ld $a0,$S+0($sp) # forward load for p256_sqr_mont
- ld $a1,$S+8($sp)
- ld $a2,$S+16($sp)
- ld $a3,$S+24($sp)
- addi $rp,$sp,$Zsqr
- bl __ecp_nistz256_sub_morf # p256_sub(Zsqr, in_x, Zsqr);
- addi $rp,$sp,$S
- bl __ecp_nistz256_sqr_mont # p256_sqr_mont(S, S);
- ld $bi,32($ap_real)
- ld $a0,64($ap_real)
- ld $a1,72($ap_real)
- ld $a2,80($ap_real)
- ld $a3,88($ap_real)
- addi $bp,$ap_real,32
- addi $rp,$sp,$tmp0
- bl __ecp_nistz256_mul_mont # p256_mul_mont(tmp0, in_z, in_y);
- mr $t0,$acc0
- mr $t1,$acc1
- mr $t2,$acc2
- mr $t3,$acc3
- ld $a0,$S+0($sp) # forward load for p256_sqr_mont
- ld $a1,$S+8($sp)
- ld $a2,$S+16($sp)
- ld $a3,$S+24($sp)
- addi $rp,$rp_real,64
- bl __ecp_nistz256_add # p256_mul_by_2(res_z, tmp0);
- addi $rp,$sp,$tmp0
- bl __ecp_nistz256_sqr_mont # p256_sqr_mont(tmp0, S);
- ld $bi,$Zsqr($sp) # forward load for p256_mul_mont
- ld $a0,$M+0($sp)
- ld $a1,$M+8($sp)
- ld $a2,$M+16($sp)
- ld $a3,$M+24($sp)
- addi $rp,$rp_real,32
- bl __ecp_nistz256_div_by_2 # p256_div_by_2(res_y, tmp0);
- addi $bp,$sp,$Zsqr
- addi $rp,$sp,$M
- bl __ecp_nistz256_mul_mont # p256_mul_mont(M, M, Zsqr);
- mr $t0,$acc0 # duplicate M
- mr $t1,$acc1
- mr $t2,$acc2
- mr $t3,$acc3
- mr $a0,$acc0 # put M aside
- mr $a1,$acc1
- mr $a2,$acc2
- mr $a3,$acc3
- addi $rp,$sp,$M
- bl __ecp_nistz256_add
- mr $t0,$a0 # restore M
- mr $t1,$a1
- mr $t2,$a2
- mr $t3,$a3
- ld $bi,0($ap_real) # forward load for p256_mul_mont
- ld $a0,$S+0($sp)
- ld $a1,$S+8($sp)
- ld $a2,$S+16($sp)
- ld $a3,$S+24($sp)
- bl __ecp_nistz256_add # p256_mul_by_3(M, M);
- addi $bp,$ap_real,0
- addi $rp,$sp,$S
- bl __ecp_nistz256_mul_mont # p256_mul_mont(S, S, in_x);
- mr $t0,$acc0
- mr $t1,$acc1
- mr $t2,$acc2
- mr $t3,$acc3
- ld $a0,$M+0($sp) # forward load for p256_sqr_mont
- ld $a1,$M+8($sp)
- ld $a2,$M+16($sp)
- ld $a3,$M+24($sp)
- addi $rp,$sp,$tmp0
- bl __ecp_nistz256_add # p256_mul_by_2(tmp0, S);
- addi $rp,$rp_real,0
- bl __ecp_nistz256_sqr_mont # p256_sqr_mont(res_x, M);
- addi $bp,$sp,$tmp0
- bl __ecp_nistz256_sub_from # p256_sub(res_x, res_x, tmp0);
- addi $bp,$sp,$S
- addi $rp,$sp,$S
- bl __ecp_nistz256_sub_morf # p256_sub(S, S, res_x);
- ld $bi,$M($sp)
- mr $a0,$acc0 # copy S
- mr $a1,$acc1
- mr $a2,$acc2
- mr $a3,$acc3
- addi $bp,$sp,$M
- bl __ecp_nistz256_mul_mont # p256_mul_mont(S, S, M);
- addi $bp,$rp_real,32
- addi $rp,$rp_real,32
- bl __ecp_nistz256_sub_from # p256_sub(res_y, S, res_y);
- mtlr r0
- ld r20,$FRAME-8*12($sp)
- ld r21,$FRAME-8*11($sp)
- ld r22,$FRAME-8*10($sp)
- ld r23,$FRAME-8*9($sp)
- ld r24,$FRAME-8*8($sp)
- ld r25,$FRAME-8*7($sp)
- ld r26,$FRAME-8*6($sp)
- ld r27,$FRAME-8*5($sp)
- ld r28,$FRAME-8*4($sp)
- ld r29,$FRAME-8*3($sp)
- ld r30,$FRAME-8*2($sp)
- ld r31,$FRAME-8*1($sp)
- addi $sp,$sp,$FRAME
- blr
- .long 0
- .byte 0,12,4,0,0x80,12,2,0
- .long 0
- .size ecp_nistz256_point_double,.-ecp_nistz256_point_double
- ___
- }
- ########################################################################
- # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
- # const P256_POINT *in2);
- if (1) {
- my $FRAME = 64 + 32*12 + 16*8;
- my ($res_x,$res_y,$res_z,
- $H,$Hsqr,$R,$Rsqr,$Hcub,
- $U1,$U2,$S1,$S2)=map(64+32*$_,(0..11));
- my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
- # above map() describes stack layout with 12 temporary
- # 256-bit vectors on top.
- my ($rp_real,$ap_real,$bp_real,$in1infty,$in2infty,$temp)=map("r$_",(16..21));
- $code.=<<___;
- .globl ecp_nistz256_point_add
- .align 5
- ecp_nistz256_point_add:
- stdu $sp,-$FRAME($sp)
- mflr r0
- std r16,$FRAME-8*16($sp)
- std r17,$FRAME-8*15($sp)
- std r18,$FRAME-8*14($sp)
- std r19,$FRAME-8*13($sp)
- std r20,$FRAME-8*12($sp)
- std r21,$FRAME-8*11($sp)
- std r22,$FRAME-8*10($sp)
- std r23,$FRAME-8*9($sp)
- std r24,$FRAME-8*8($sp)
- std r25,$FRAME-8*7($sp)
- std r26,$FRAME-8*6($sp)
- std r27,$FRAME-8*5($sp)
- std r28,$FRAME-8*4($sp)
- std r29,$FRAME-8*3($sp)
- std r30,$FRAME-8*2($sp)
- std r31,$FRAME-8*1($sp)
- li $poly1,-1
- srdi $poly1,$poly1,32 # 0x00000000ffffffff
- li $poly3,1
- orc $poly3,$poly3,$poly1 # 0xffffffff00000001
- ld $a0,64($bp) # in2_z
- ld $a1,72($bp)
- ld $a2,80($bp)
- ld $a3,88($bp)
- mr $rp_real,$rp
- mr $ap_real,$ap
- mr $bp_real,$bp
- or $t0,$a0,$a1
- or $t2,$a2,$a3
- or $in2infty,$t0,$t2
- neg $t0,$in2infty
- or $in2infty,$in2infty,$t0
- sradi $in2infty,$in2infty,63 # !in2infty
- addi $rp,$sp,$Z2sqr
- bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Z2sqr, in2_z);
- ld $a0,64($ap_real) # in1_z
- ld $a1,72($ap_real)
- ld $a2,80($ap_real)
- ld $a3,88($ap_real)
- or $t0,$a0,$a1
- or $t2,$a2,$a3
- or $in1infty,$t0,$t2
- neg $t0,$in1infty
- or $in1infty,$in1infty,$t0
- sradi $in1infty,$in1infty,63 # !in1infty
- addi $rp,$sp,$Z1sqr
- bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Z1sqr, in1_z);
- ld $bi,64($bp_real)
- ld $a0,$Z2sqr+0($sp)
- ld $a1,$Z2sqr+8($sp)
- ld $a2,$Z2sqr+16($sp)
- ld $a3,$Z2sqr+24($sp)
- addi $bp,$bp_real,64
- addi $rp,$sp,$S1
- bl __ecp_nistz256_mul_mont # p256_mul_mont(S1, Z2sqr, in2_z);
- ld $bi,64($ap_real)
- ld $a0,$Z1sqr+0($sp)
- ld $a1,$Z1sqr+8($sp)
- ld $a2,$Z1sqr+16($sp)
- ld $a3,$Z1sqr+24($sp)
- addi $bp,$ap_real,64
- addi $rp,$sp,$S2
- bl __ecp_nistz256_mul_mont # p256_mul_mont(S2, Z1sqr, in1_z);
- ld $bi,32($ap_real)
- ld $a0,$S1+0($sp)
- ld $a1,$S1+8($sp)
- ld $a2,$S1+16($sp)
- ld $a3,$S1+24($sp)
- addi $bp,$ap_real,32
- addi $rp,$sp,$S1
- bl __ecp_nistz256_mul_mont # p256_mul_mont(S1, S1, in1_y);
- ld $bi,32($bp_real)
- ld $a0,$S2+0($sp)
- ld $a1,$S2+8($sp)
- ld $a2,$S2+16($sp)
- ld $a3,$S2+24($sp)
- addi $bp,$bp_real,32
- addi $rp,$sp,$S2
- bl __ecp_nistz256_mul_mont # p256_mul_mont(S2, S2, in2_y);
- addi $bp,$sp,$S1
- ld $bi,$Z2sqr($sp) # forward load for p256_mul_mont
- ld $a0,0($ap_real)
- ld $a1,8($ap_real)
- ld $a2,16($ap_real)
- ld $a3,24($ap_real)
- addi $rp,$sp,$R
- bl __ecp_nistz256_sub_from # p256_sub(R, S2, S1);
- or $acc0,$acc0,$acc1 # see if result is zero
- or $acc2,$acc2,$acc3
- or $temp,$acc0,$acc2
- addi $bp,$sp,$Z2sqr
- addi $rp,$sp,$U1
- bl __ecp_nistz256_mul_mont # p256_mul_mont(U1, in1_x, Z2sqr);
- ld $bi,$Z1sqr($sp)
- ld $a0,0($bp_real)
- ld $a1,8($bp_real)
- ld $a2,16($bp_real)
- ld $a3,24($bp_real)
- addi $bp,$sp,$Z1sqr
- addi $rp,$sp,$U2
- bl __ecp_nistz256_mul_mont # p256_mul_mont(U2, in2_x, Z1sqr);
- addi $bp,$sp,$U1
- ld $a0,$R+0($sp) # forward load for p256_sqr_mont
- ld $a1,$R+8($sp)
- ld $a2,$R+16($sp)
- ld $a3,$R+24($sp)
- addi $rp,$sp,$H
- bl __ecp_nistz256_sub_from # p256_sub(H, U2, U1);
- or $acc0,$acc0,$acc1 # see if result is zero
- or $acc2,$acc2,$acc3
- or. $acc0,$acc0,$acc2
- bne .Ladd_proceed # is_equal(U1,U2)?
- and. $t0,$in1infty,$in2infty
- beq .Ladd_proceed # (in1infty || in2infty)?
- cmpldi $temp,0
- beq .Ladd_double # is_equal(S1,S2)?
- xor $a0,$a0,$a0
- std $a0,0($rp_real)
- std $a0,8($rp_real)
- std $a0,16($rp_real)
- std $a0,24($rp_real)
- std $a0,32($rp_real)
- std $a0,40($rp_real)
- std $a0,48($rp_real)
- std $a0,56($rp_real)
- std $a0,64($rp_real)
- std $a0,72($rp_real)
- std $a0,80($rp_real)
- std $a0,88($rp_real)
- b .Ladd_done
- .align 4
- .Ladd_double:
- ld $bp,0($sp) # back-link
- mr $ap,$ap_real
- mr $rp,$rp_real
- ld r16,$FRAME-8*16($sp)
- ld r17,$FRAME-8*15($sp)
- ld r18,$FRAME-8*14($sp)
- ld r19,$FRAME-8*13($sp)
- stdu $bp,$FRAME-288($sp) # difference in stack frame sizes
- b .Ldouble_shortcut
- .align 4
- .Ladd_proceed:
- addi $rp,$sp,$Rsqr
- bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Rsqr, R);
- ld $bi,64($ap_real)
- ld $a0,$H+0($sp)
- ld $a1,$H+8($sp)
- ld $a2,$H+16($sp)
- ld $a3,$H+24($sp)
- addi $bp,$ap_real,64
- addi $rp,$sp,$res_z
- bl __ecp_nistz256_mul_mont # p256_mul_mont(res_z, H, in1_z);
- ld $a0,$H+0($sp)
- ld $a1,$H+8($sp)
- ld $a2,$H+16($sp)
- ld $a3,$H+24($sp)
- addi $rp,$sp,$Hsqr
- bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Hsqr, H);
- ld $bi,64($bp_real)
- ld $a0,$res_z+0($sp)
- ld $a1,$res_z+8($sp)
- ld $a2,$res_z+16($sp)
- ld $a3,$res_z+24($sp)
- addi $bp,$bp_real,64
- addi $rp,$sp,$res_z
- bl __ecp_nistz256_mul_mont # p256_mul_mont(res_z, res_z, in2_z);
- ld $bi,$H($sp)
- ld $a0,$Hsqr+0($sp)
- ld $a1,$Hsqr+8($sp)
- ld $a2,$Hsqr+16($sp)
- ld $a3,$Hsqr+24($sp)
- addi $bp,$sp,$H
- addi $rp,$sp,$Hcub
- bl __ecp_nistz256_mul_mont # p256_mul_mont(Hcub, Hsqr, H);
- ld $bi,$Hsqr($sp)
- ld $a0,$U1+0($sp)
- ld $a1,$U1+8($sp)
- ld $a2,$U1+16($sp)
- ld $a3,$U1+24($sp)
- addi $bp,$sp,$Hsqr
- addi $rp,$sp,$U2
- bl __ecp_nistz256_mul_mont # p256_mul_mont(U2, U1, Hsqr);
- mr $t0,$acc0
- mr $t1,$acc1
- mr $t2,$acc2
- mr $t3,$acc3
- addi $rp,$sp,$Hsqr
- bl __ecp_nistz256_add # p256_mul_by_2(Hsqr, U2);
- addi $bp,$sp,$Rsqr
- addi $rp,$sp,$res_x
- bl __ecp_nistz256_sub_morf # p256_sub(res_x, Rsqr, Hsqr);
- addi $bp,$sp,$Hcub
- bl __ecp_nistz256_sub_from # p256_sub(res_x, res_x, Hcub);
- addi $bp,$sp,$U2
- ld $bi,$Hcub($sp) # forward load for p256_mul_mont
- ld $a0,$S1+0($sp)
- ld $a1,$S1+8($sp)
- ld $a2,$S1+16($sp)
- ld $a3,$S1+24($sp)
- addi $rp,$sp,$res_y
- bl __ecp_nistz256_sub_morf # p256_sub(res_y, U2, res_x);
- addi $bp,$sp,$Hcub
- addi $rp,$sp,$S2
- bl __ecp_nistz256_mul_mont # p256_mul_mont(S2, S1, Hcub);
- ld $bi,$R($sp)
- ld $a0,$res_y+0($sp)
- ld $a1,$res_y+8($sp)
- ld $a2,$res_y+16($sp)
- ld $a3,$res_y+24($sp)
- addi $bp,$sp,$R
- addi $rp,$sp,$res_y
- bl __ecp_nistz256_mul_mont # p256_mul_mont(res_y, res_y, R);
- addi $bp,$sp,$S2
- bl __ecp_nistz256_sub_from # p256_sub(res_y, res_y, S2);
- ld $t0,0($bp_real) # in2
- ld $t1,8($bp_real)
- ld $t2,16($bp_real)
- ld $t3,24($bp_real)
- ld $a0,$res_x+0($sp) # res
- ld $a1,$res_x+8($sp)
- ld $a2,$res_x+16($sp)
- ld $a3,$res_x+24($sp)
- ___
- for($i=0;$i<64;$i+=32) { # conditional moves
- $code.=<<___;
- ld $acc0,$i+0($ap_real) # in1
- ld $acc1,$i+8($ap_real)
- ld $acc2,$i+16($ap_real)
- ld $acc3,$i+24($ap_real)
- andc $t0,$t0,$in1infty
- andc $t1,$t1,$in1infty
- andc $t2,$t2,$in1infty
- andc $t3,$t3,$in1infty
- and $a0,$a0,$in1infty
- and $a1,$a1,$in1infty
- and $a2,$a2,$in1infty
- and $a3,$a3,$in1infty
- or $t0,$t0,$a0
- or $t1,$t1,$a1
- or $t2,$t2,$a2
- or $t3,$t3,$a3
- andc $acc0,$acc0,$in2infty
- andc $acc1,$acc1,$in2infty
- andc $acc2,$acc2,$in2infty
- andc $acc3,$acc3,$in2infty
- and $t0,$t0,$in2infty
- and $t1,$t1,$in2infty
- and $t2,$t2,$in2infty
- and $t3,$t3,$in2infty
- or $acc0,$acc0,$t0
- or $acc1,$acc1,$t1
- or $acc2,$acc2,$t2
- or $acc3,$acc3,$t3
- ld $t0,$i+32($bp_real) # in2
- ld $t1,$i+40($bp_real)
- ld $t2,$i+48($bp_real)
- ld $t3,$i+56($bp_real)
- ld $a0,$res_x+$i+32($sp)
- ld $a1,$res_x+$i+40($sp)
- ld $a2,$res_x+$i+48($sp)
- ld $a3,$res_x+$i+56($sp)
- std $acc0,$i+0($rp_real)
- std $acc1,$i+8($rp_real)
- std $acc2,$i+16($rp_real)
- std $acc3,$i+24($rp_real)
- ___
- }
- $code.=<<___;
- ld $acc0,$i+0($ap_real) # in1
- ld $acc1,$i+8($ap_real)
- ld $acc2,$i+16($ap_real)
- ld $acc3,$i+24($ap_real)
- andc $t0,$t0,$in1infty
- andc $t1,$t1,$in1infty
- andc $t2,$t2,$in1infty
- andc $t3,$t3,$in1infty
- and $a0,$a0,$in1infty
- and $a1,$a1,$in1infty
- and $a2,$a2,$in1infty
- and $a3,$a3,$in1infty
- or $t0,$t0,$a0
- or $t1,$t1,$a1
- or $t2,$t2,$a2
- or $t3,$t3,$a3
- andc $acc0,$acc0,$in2infty
- andc $acc1,$acc1,$in2infty
- andc $acc2,$acc2,$in2infty
- andc $acc3,$acc3,$in2infty
- and $t0,$t0,$in2infty
- and $t1,$t1,$in2infty
- and $t2,$t2,$in2infty
- and $t3,$t3,$in2infty
- or $acc0,$acc0,$t0
- or $acc1,$acc1,$t1
- or $acc2,$acc2,$t2
- or $acc3,$acc3,$t3
- std $acc0,$i+0($rp_real)
- std $acc1,$i+8($rp_real)
- std $acc2,$i+16($rp_real)
- std $acc3,$i+24($rp_real)
- .Ladd_done:
- mtlr r0
- ld r16,$FRAME-8*16($sp)
- ld r17,$FRAME-8*15($sp)
- ld r18,$FRAME-8*14($sp)
- ld r19,$FRAME-8*13($sp)
- ld r20,$FRAME-8*12($sp)
- ld r21,$FRAME-8*11($sp)
- ld r22,$FRAME-8*10($sp)
- ld r23,$FRAME-8*9($sp)
- ld r24,$FRAME-8*8($sp)
- ld r25,$FRAME-8*7($sp)
- ld r26,$FRAME-8*6($sp)
- ld r27,$FRAME-8*5($sp)
- ld r28,$FRAME-8*4($sp)
- ld r29,$FRAME-8*3($sp)
- ld r30,$FRAME-8*2($sp)
- ld r31,$FRAME-8*1($sp)
- addi $sp,$sp,$FRAME
- blr
- .long 0
- .byte 0,12,4,0,0x80,16,3,0
- .long 0
- .size ecp_nistz256_point_add,.-ecp_nistz256_point_add
- ___
- }
- ########################################################################
- # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
- # const P256_POINT_AFFINE *in2);
- if (1) {
- my $FRAME = 64 + 32*10 + 16*8;
- my ($res_x,$res_y,$res_z,
- $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(64+32*$_,(0..9));
- my $Z1sqr = $S2;
- # above map() describes stack layout with 10 temporary
- # 256-bit vectors on top.
- my ($rp_real,$ap_real,$bp_real,$in1infty,$in2infty,$temp)=map("r$_",(16..21));
- $code.=<<___;
- .globl ecp_nistz256_point_add_affine
- .align 5
- ecp_nistz256_point_add_affine:
- stdu $sp,-$FRAME($sp)
- mflr r0
- std r16,$FRAME-8*16($sp)
- std r17,$FRAME-8*15($sp)
- std r18,$FRAME-8*14($sp)
- std r19,$FRAME-8*13($sp)
- std r20,$FRAME-8*12($sp)
- std r21,$FRAME-8*11($sp)
- std r22,$FRAME-8*10($sp)
- std r23,$FRAME-8*9($sp)
- std r24,$FRAME-8*8($sp)
- std r25,$FRAME-8*7($sp)
- std r26,$FRAME-8*6($sp)
- std r27,$FRAME-8*5($sp)
- std r28,$FRAME-8*4($sp)
- std r29,$FRAME-8*3($sp)
- std r30,$FRAME-8*2($sp)
- std r31,$FRAME-8*1($sp)
- li $poly1,-1
- srdi $poly1,$poly1,32 # 0x00000000ffffffff
- li $poly3,1
- orc $poly3,$poly3,$poly1 # 0xffffffff00000001
- mr $rp_real,$rp
- mr $ap_real,$ap
- mr $bp_real,$bp
- ld $a0,64($ap) # in1_z
- ld $a1,72($ap)
- ld $a2,80($ap)
- ld $a3,88($ap)
- or $t0,$a0,$a1
- or $t2,$a2,$a3
- or $in1infty,$t0,$t2
- neg $t0,$in1infty
- or $in1infty,$in1infty,$t0
- sradi $in1infty,$in1infty,63 # !in1infty
- ld $acc0,0($bp) # in2_x
- ld $acc1,8($bp)
- ld $acc2,16($bp)
- ld $acc3,24($bp)
- ld $t0,32($bp) # in2_y
- ld $t1,40($bp)
- ld $t2,48($bp)
- ld $t3,56($bp)
- or $acc0,$acc0,$acc1
- or $acc2,$acc2,$acc3
- or $acc0,$acc0,$acc2
- or $t0,$t0,$t1
- or $t2,$t2,$t3
- or $t0,$t0,$t2
- or $in2infty,$acc0,$t0
- neg $t0,$in2infty
- or $in2infty,$in2infty,$t0
- sradi $in2infty,$in2infty,63 # !in2infty
- addi $rp,$sp,$Z1sqr
- bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Z1sqr, in1_z);
- mr $a0,$acc0
- mr $a1,$acc1
- mr $a2,$acc2
- mr $a3,$acc3
- ld $bi,0($bp_real)
- addi $bp,$bp_real,0
- addi $rp,$sp,$U2
- bl __ecp_nistz256_mul_mont # p256_mul_mont(U2, Z1sqr, in2_x);
- addi $bp,$ap_real,0
- ld $bi,64($ap_real) # forward load for p256_mul_mont
- ld $a0,$Z1sqr+0($sp)
- ld $a1,$Z1sqr+8($sp)
- ld $a2,$Z1sqr+16($sp)
- ld $a3,$Z1sqr+24($sp)
- addi $rp,$sp,$H
- bl __ecp_nistz256_sub_from # p256_sub(H, U2, in1_x);
- addi $bp,$ap_real,64
- addi $rp,$sp,$S2
- bl __ecp_nistz256_mul_mont # p256_mul_mont(S2, Z1sqr, in1_z);
- ld $bi,64($ap_real)
- ld $a0,$H+0($sp)
- ld $a1,$H+8($sp)
- ld $a2,$H+16($sp)
- ld $a3,$H+24($sp)
- addi $bp,$ap_real,64
- addi $rp,$sp,$res_z
- bl __ecp_nistz256_mul_mont # p256_mul_mont(res_z, H, in1_z);
- ld $bi,32($bp_real)
- ld $a0,$S2+0($sp)
- ld $a1,$S2+8($sp)
- ld $a2,$S2+16($sp)
- ld $a3,$S2+24($sp)
- addi $bp,$bp_real,32
- addi $rp,$sp,$S2
- bl __ecp_nistz256_mul_mont # p256_mul_mont(S2, S2, in2_y);
- addi $bp,$ap_real,32
- ld $a0,$H+0($sp) # forward load for p256_sqr_mont
- ld $a1,$H+8($sp)
- ld $a2,$H+16($sp)
- ld $a3,$H+24($sp)
- addi $rp,$sp,$R
- bl __ecp_nistz256_sub_from # p256_sub(R, S2, in1_y);
- addi $rp,$sp,$Hsqr
- bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Hsqr, H);
- ld $a0,$R+0($sp)
- ld $a1,$R+8($sp)
- ld $a2,$R+16($sp)
- ld $a3,$R+24($sp)
- addi $rp,$sp,$Rsqr
- bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Rsqr, R);
- ld $bi,$H($sp)
- ld $a0,$Hsqr+0($sp)
- ld $a1,$Hsqr+8($sp)
- ld $a2,$Hsqr+16($sp)
- ld $a3,$Hsqr+24($sp)
- addi $bp,$sp,$H
- addi $rp,$sp,$Hcub
- bl __ecp_nistz256_mul_mont # p256_mul_mont(Hcub, Hsqr, H);
- ld $bi,0($ap_real)
- ld $a0,$Hsqr+0($sp)
- ld $a1,$Hsqr+8($sp)
- ld $a2,$Hsqr+16($sp)
- ld $a3,$Hsqr+24($sp)
- addi $bp,$ap_real,0
- addi $rp,$sp,$U2
- bl __ecp_nistz256_mul_mont # p256_mul_mont(U2, in1_x, Hsqr);
- mr $t0,$acc0
- mr $t1,$acc1
- mr $t2,$acc2
- mr $t3,$acc3
- addi $rp,$sp,$Hsqr
- bl __ecp_nistz256_add # p256_mul_by_2(Hsqr, U2);
- addi $bp,$sp,$Rsqr
- addi $rp,$sp,$res_x
- bl __ecp_nistz256_sub_morf # p256_sub(res_x, Rsqr, Hsqr);
- addi $bp,$sp,$Hcub
- bl __ecp_nistz256_sub_from # p256_sub(res_x, res_x, Hcub);
- addi $bp,$sp,$U2
- ld $bi,32($ap_real) # forward load for p256_mul_mont
- ld $a0,$Hcub+0($sp)
- ld $a1,$Hcub+8($sp)
- ld $a2,$Hcub+16($sp)
- ld $a3,$Hcub+24($sp)
- addi $rp,$sp,$res_y
- bl __ecp_nistz256_sub_morf # p256_sub(res_y, U2, res_x);
- addi $bp,$ap_real,32
- addi $rp,$sp,$S2
- bl __ecp_nistz256_mul_mont # p256_mul_mont(S2, in1_y, Hcub);
- ld $bi,$R($sp)
- ld $a0,$res_y+0($sp)
- ld $a1,$res_y+8($sp)
- ld $a2,$res_y+16($sp)
- ld $a3,$res_y+24($sp)
- addi $bp,$sp,$R
- addi $rp,$sp,$res_y
- bl __ecp_nistz256_mul_mont # p256_mul_mont(res_y, res_y, R);
- addi $bp,$sp,$S2
- bl __ecp_nistz256_sub_from # p256_sub(res_y, res_y, S2);
- ld $t0,0($bp_real) # in2
- ld $t1,8($bp_real)
- ld $t2,16($bp_real)
- ld $t3,24($bp_real)
- ld $a0,$res_x+0($sp) # res
- ld $a1,$res_x+8($sp)
- ld $a2,$res_x+16($sp)
- ld $a3,$res_x+24($sp)
- ___
- for($i=0;$i<64;$i+=32) { # conditional moves
- $code.=<<___;
- ld $acc0,$i+0($ap_real) # in1
- ld $acc1,$i+8($ap_real)
- ld $acc2,$i+16($ap_real)
- ld $acc3,$i+24($ap_real)
- andc $t0,$t0,$in1infty
- andc $t1,$t1,$in1infty
- andc $t2,$t2,$in1infty
- andc $t3,$t3,$in1infty
- and $a0,$a0,$in1infty
- and $a1,$a1,$in1infty
- and $a2,$a2,$in1infty
- and $a3,$a3,$in1infty
- or $t0,$t0,$a0
- or $t1,$t1,$a1
- or $t2,$t2,$a2
- or $t3,$t3,$a3
- andc $acc0,$acc0,$in2infty
- andc $acc1,$acc1,$in2infty
- andc $acc2,$acc2,$in2infty
- andc $acc3,$acc3,$in2infty
- and $t0,$t0,$in2infty
- and $t1,$t1,$in2infty
- and $t2,$t2,$in2infty
- and $t3,$t3,$in2infty
- or $acc0,$acc0,$t0
- or $acc1,$acc1,$t1
- or $acc2,$acc2,$t2
- or $acc3,$acc3,$t3
- ___
- $code.=<<___ if ($i==0);
- ld $t0,32($bp_real) # in2
- ld $t1,40($bp_real)
- ld $t2,48($bp_real)
- ld $t3,56($bp_real)
- ___
- $code.=<<___ if ($i==32);
- li $t0,1 # Lone_mont
- not $t1,$poly1
- li $t2,-1
- not $t3,$poly3
- ___
- $code.=<<___;
- ld $a0,$res_x+$i+32($sp)
- ld $a1,$res_x+$i+40($sp)
- ld $a2,$res_x+$i+48($sp)
- ld $a3,$res_x+$i+56($sp)
- std $acc0,$i+0($rp_real)
- std $acc1,$i+8($rp_real)
- std $acc2,$i+16($rp_real)
- std $acc3,$i+24($rp_real)
- ___
- }
- $code.=<<___;
- ld $acc0,$i+0($ap_real) # in1
- ld $acc1,$i+8($ap_real)
- ld $acc2,$i+16($ap_real)
- ld $acc3,$i+24($ap_real)
- andc $t0,$t0,$in1infty
- andc $t1,$t1,$in1infty
- andc $t2,$t2,$in1infty
- andc $t3,$t3,$in1infty
- and $a0,$a0,$in1infty
- and $a1,$a1,$in1infty
- and $a2,$a2,$in1infty
- and $a3,$a3,$in1infty
- or $t0,$t0,$a0
- or $t1,$t1,$a1
- or $t2,$t2,$a2
- or $t3,$t3,$a3
- andc $acc0,$acc0,$in2infty
- andc $acc1,$acc1,$in2infty
- andc $acc2,$acc2,$in2infty
- andc $acc3,$acc3,$in2infty
- and $t0,$t0,$in2infty
- and $t1,$t1,$in2infty
- and $t2,$t2,$in2infty
- and $t3,$t3,$in2infty
- or $acc0,$acc0,$t0
- or $acc1,$acc1,$t1
- or $acc2,$acc2,$t2
- or $acc3,$acc3,$t3
- std $acc0,$i+0($rp_real)
- std $acc1,$i+8($rp_real)
- std $acc2,$i+16($rp_real)
- std $acc3,$i+24($rp_real)
- mtlr r0
- ld r16,$FRAME-8*16($sp)
- ld r17,$FRAME-8*15($sp)
- ld r18,$FRAME-8*14($sp)
- ld r19,$FRAME-8*13($sp)
- ld r20,$FRAME-8*12($sp)
- ld r21,$FRAME-8*11($sp)
- ld r22,$FRAME-8*10($sp)
- ld r23,$FRAME-8*9($sp)
- ld r24,$FRAME-8*8($sp)
- ld r25,$FRAME-8*7($sp)
- ld r26,$FRAME-8*6($sp)
- ld r27,$FRAME-8*5($sp)
- ld r28,$FRAME-8*4($sp)
- ld r29,$FRAME-8*3($sp)
- ld r30,$FRAME-8*2($sp)
- ld r31,$FRAME-8*1($sp)
- addi $sp,$sp,$FRAME
- blr
- .long 0
- .byte 0,12,4,0,0x80,16,3,0
- .long 0
- .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
- ___
- }
- if (1) {
- my ($ordk,$ord0,$ord1,$t4) = map("r$_",(18..21));
- my ($ord2,$ord3,$zr) = ($poly1,$poly3,"r0");
- $code.=<<___;
- ########################################################################
- # void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4],
- # uint64_t b[4]);
- .globl ecp_nistz256_ord_mul_mont
- .align 5
- ecp_nistz256_ord_mul_mont:
- stdu $sp,-160($sp)
- std r18,48($sp)
- std r19,56($sp)
- std r20,64($sp)
- std r21,72($sp)
- std r22,80($sp)
- std r23,88($sp)
- std r24,96($sp)
- std r25,104($sp)
- std r26,112($sp)
- std r27,120($sp)
- std r28,128($sp)
- std r29,136($sp)
- std r30,144($sp)
- std r31,152($sp)
- ld $a0,0($ap)
- ld $bi,0($bp)
- ld $a1,8($ap)
- ld $a2,16($ap)
- ld $a3,24($ap)
- lis $ordk,0xccd1
- lis $ord0,0xf3b9
- lis $ord1,0xbce6
- ori $ordk,$ordk,0xc8aa
- ori $ord0,$ord0,0xcac2
- ori $ord1,$ord1,0xfaad
- sldi $ordk,$ordk,32
- sldi $ord0,$ord0,32
- sldi $ord1,$ord1,32
- oris $ordk,$ordk,0xee00
- oris $ord0,$ord0,0xfc63
- oris $ord1,$ord1,0xa717
- ori $ordk,$ordk,0xbc4f # 0xccd1c8aaee00bc4f
- ori $ord0,$ord0,0x2551 # 0xf3b9cac2fc632551
- ori $ord1,$ord1,0x9e84 # 0xbce6faada7179e84
- li $ord2,-1 # 0xffffffffffffffff
- sldi $ord3,$ord2,32 # 0xffffffff00000000
- li $zr,0
- mulld $acc0,$a0,$bi # a[0]*b[0]
- mulhdu $t0,$a0,$bi
- mulld $acc1,$a1,$bi # a[1]*b[0]
- mulhdu $t1,$a1,$bi
- mulld $acc2,$a2,$bi # a[2]*b[0]
- mulhdu $t2,$a2,$bi
- mulld $acc3,$a3,$bi # a[3]*b[0]
- mulhdu $acc4,$a3,$bi
- mulld $t4,$acc0,$ordk
- addc $acc1,$acc1,$t0 # accumulate high parts of multiplication
- adde $acc2,$acc2,$t1
- adde $acc3,$acc3,$t2
- addze $acc4,$acc4
- li $acc5,0
- ___
- for ($i=1;$i<4;$i++) {
- ################################################################
- # ffff0000.ffffffff.yyyyyyyy.zzzzzzzz
- # * abcdefgh
- # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx
- #
- # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
- # rewrite above as:
- #
- # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx
- # - 0000abcd.efgh0000.abcdefgh.00000000.00000000
- # + abcdefgh.abcdefgh.yzayzbyz.cyzdyzey.zfyzgyzh
- $code.=<<___;
- ld $bi,8*$i($bp) # b[i]
- sldi $t0,$t4,32
- subfc $acc2,$t4,$acc2
- srdi $t1,$t4,32
- subfe $acc3,$t0,$acc3
- subfe $acc4,$t1,$acc4
- subfe $acc5,$zr,$acc5
- addic $t0,$acc0,-1 # discarded
- mulhdu $t1,$ord0,$t4
- mulld $t2,$ord1,$t4
- mulhdu $t3,$ord1,$t4
- adde $t2,$t2,$t1
- mulld $t0,$a0,$bi
- addze $t3,$t3
- mulld $t1,$a1,$bi
- addc $acc0,$acc1,$t2
- mulld $t2,$a2,$bi
- adde $acc1,$acc2,$t3
- mulld $t3,$a3,$bi
- adde $acc2,$acc3,$t4
- adde $acc3,$acc4,$t4
- addze $acc4,$acc5
- addc $acc0,$acc0,$t0 # accumulate low parts
- mulhdu $t0,$a0,$bi
- adde $acc1,$acc1,$t1
- mulhdu $t1,$a1,$bi
- adde $acc2,$acc2,$t2
- mulhdu $t2,$a2,$bi
- adde $acc3,$acc3,$t3
- mulhdu $t3,$a3,$bi
- addze $acc4,$acc4
- mulld $t4,$acc0,$ordk
- addc $acc1,$acc1,$t0 # accumulate high parts
- adde $acc2,$acc2,$t1
- adde $acc3,$acc3,$t2
- adde $acc4,$acc4,$t3
- addze $acc5,$zr
- ___
- }
- $code.=<<___;
- sldi $t0,$t4,32 # last reduction
- subfc $acc2,$t4,$acc2
- srdi $t1,$t4,32
- subfe $acc3,$t0,$acc3
- subfe $acc4,$t1,$acc4
- subfe $acc5,$zr,$acc5
- addic $t0,$acc0,-1 # discarded
- mulhdu $t1,$ord0,$t4
- mulld $t2,$ord1,$t4
- mulhdu $t3,$ord1,$t4
- adde $t2,$t2,$t1
- addze $t3,$t3
- addc $acc0,$acc1,$t2
- adde $acc1,$acc2,$t3
- adde $acc2,$acc3,$t4
- adde $acc3,$acc4,$t4
- addze $acc4,$acc5
- subfc $acc0,$ord0,$acc0 # ret -= modulus
- subfe $acc1,$ord1,$acc1
- subfe $acc2,$ord2,$acc2
- subfe $acc3,$ord3,$acc3
- subfe $acc4,$zr,$acc4
- and $t0,$ord0,$acc4
- and $t1,$ord1,$acc4
- addc $acc0,$acc0,$t0 # ret += modulus if borrow
- and $t3,$ord3,$acc4
- adde $acc1,$acc1,$t1
- adde $acc2,$acc2,$acc4
- adde $acc3,$acc3,$t3
- std $acc0,0($rp)
- std $acc1,8($rp)
- std $acc2,16($rp)
- std $acc3,24($rp)
- ld r18,48($sp)
- ld r19,56($sp)
- ld r20,64($sp)
- ld r21,72($sp)
- ld r22,80($sp)
- ld r23,88($sp)
- ld r24,96($sp)
- ld r25,104($sp)
- ld r26,112($sp)
- ld r27,120($sp)
- ld r28,128($sp)
- ld r29,136($sp)
- ld r30,144($sp)
- ld r31,152($sp)
- addi $sp,$sp,160
- blr
- .long 0
- .byte 0,12,4,0,0x80,14,3,0
- .long 0
- .size ecp_nistz256_ord_mul_mont,.-ecp_nistz256_ord_mul_mont
- ################################################################################
- # void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4],
- # uint64_t rep);
- .globl ecp_nistz256_ord_sqr_mont
- .align 5
- ecp_nistz256_ord_sqr_mont:
- stdu $sp,-160($sp)
- std r18,48($sp)
- std r19,56($sp)
- std r20,64($sp)
- std r21,72($sp)
- std r22,80($sp)
- std r23,88($sp)
- std r24,96($sp)
- std r25,104($sp)
- std r26,112($sp)
- std r27,120($sp)
- std r28,128($sp)
- std r29,136($sp)
- std r30,144($sp)
- std r31,152($sp)
- mtctr $bp
- ld $a0,0($ap)
- ld $a1,8($ap)
- ld $a2,16($ap)
- ld $a3,24($ap)
- lis $ordk,0xccd1
- lis $ord0,0xf3b9
- lis $ord1,0xbce6
- ori $ordk,$ordk,0xc8aa
- ori $ord0,$ord0,0xcac2
- ori $ord1,$ord1,0xfaad
- sldi $ordk,$ordk,32
- sldi $ord0,$ord0,32
- sldi $ord1,$ord1,32
- oris $ordk,$ordk,0xee00
- oris $ord0,$ord0,0xfc63
- oris $ord1,$ord1,0xa717
- ori $ordk,$ordk,0xbc4f # 0xccd1c8aaee00bc4f
- ori $ord0,$ord0,0x2551 # 0xf3b9cac2fc632551
- ori $ord1,$ord1,0x9e84 # 0xbce6faada7179e84
- li $ord2,-1 # 0xffffffffffffffff
- sldi $ord3,$ord2,32 # 0xffffffff00000000
- li $zr,0
- b .Loop_ord_sqr
- .align 5
- .Loop_ord_sqr:
- ################################################################
- # | | | | | |a1*a0| |
- # | | | | |a2*a0| | |
- # | |a3*a2|a3*a0| | | |
- # | | | |a2*a1| | | |
- # | | |a3*a1| | | | |
- # *| | | | | | | | 2|
- # +|a3*a3|a2*a2|a1*a1|a0*a0|
- # |--+--+--+--+--+--+--+--|
- # |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
- #
- # "can't overflow" below mark carrying into high part of
- # multiplication result, which can't overflow, because it
- # can never be all ones.
- mulld $acc1,$a1,$a0 # a[1]*a[0]
- mulhdu $t1,$a1,$a0
- mulld $acc2,$a2,$a0 # a[2]*a[0]
- mulhdu $t2,$a2,$a0
- mulld $acc3,$a3,$a0 # a[3]*a[0]
- mulhdu $acc4,$a3,$a0
- addc $acc2,$acc2,$t1 # accumulate high parts of multiplication
- mulld $t0,$a2,$a1 # a[2]*a[1]
- mulhdu $t1,$a2,$a1
- adde $acc3,$acc3,$t2
- mulld $t2,$a3,$a1 # a[3]*a[1]
- mulhdu $t3,$a3,$a1
- addze $acc4,$acc4 # can't overflow
- mulld $acc5,$a3,$a2 # a[3]*a[2]
- mulhdu $acc6,$a3,$a2
- addc $t1,$t1,$t2 # accumulate high parts of multiplication
- mulld $acc0,$a0,$a0 # a[0]*a[0]
- addze $t2,$t3 # can't overflow
- addc $acc3,$acc3,$t0 # accumulate low parts of multiplication
- mulhdu $a0,$a0,$a0
- adde $acc4,$acc4,$t1
- mulld $t1,$a1,$a1 # a[1]*a[1]
- adde $acc5,$acc5,$t2
- mulhdu $a1,$a1,$a1
- addze $acc6,$acc6 # can't overflow
- addc $acc1,$acc1,$acc1 # acc[1-6]*=2
- mulld $t2,$a2,$a2 # a[2]*a[2]
- adde $acc2,$acc2,$acc2
- mulhdu $a2,$a2,$a2
- adde $acc3,$acc3,$acc3
- mulld $t3,$a3,$a3 # a[3]*a[3]
- adde $acc4,$acc4,$acc4
- mulhdu $a3,$a3,$a3
- adde $acc5,$acc5,$acc5
- adde $acc6,$acc6,$acc6
- addze $acc7,$zr
- addc $acc1,$acc1,$a0 # +a[i]*a[i]
- mulld $t4,$acc0,$ordk
- adde $acc2,$acc2,$t1
- adde $acc3,$acc3,$a1
- adde $acc4,$acc4,$t2
- adde $acc5,$acc5,$a2
- adde $acc6,$acc6,$t3
- adde $acc7,$acc7,$a3
- ___
- for($i=0; $i<4; $i++) { # reductions
- $code.=<<___;
- addic $t0,$acc0,-1 # discarded
- mulhdu $t1,$ord0,$t4
- mulld $t2,$ord1,$t4
- mulhdu $t3,$ord1,$t4
- adde $t2,$t2,$t1
- addze $t3,$t3
- addc $acc0,$acc1,$t2
- adde $acc1,$acc2,$t3
- adde $acc2,$acc3,$t4
- adde $acc3,$zr,$t4 # can't overflow
- ___
- $code.=<<___ if ($i<3);
- mulld $t3,$acc0,$ordk
- ___
- $code.=<<___;
- sldi $t0,$t4,32
- subfc $acc1,$t4,$acc1
- srdi $t1,$t4,32
- subfe $acc2,$t0,$acc2
- subfe $acc3,$t1,$acc3 # can't borrow
- ___
- ($t3,$t4) = ($t4,$t3);
- }
- $code.=<<___;
- addc $acc0,$acc0,$acc4 # accumulate upper half
- adde $acc1,$acc1,$acc5
- adde $acc2,$acc2,$acc6
- adde $acc3,$acc3,$acc7
- addze $acc4,$zr
- subfc $acc0,$ord0,$acc0 # ret -= modulus
- subfe $acc1,$ord1,$acc1
- subfe $acc2,$ord2,$acc2
- subfe $acc3,$ord3,$acc3
- subfe $acc4,$zr,$acc4
- and $t0,$ord0,$acc4
- and $t1,$ord1,$acc4
- addc $a0,$acc0,$t0 # ret += modulus if borrow
- and $t3,$ord3,$acc4
- adde $a1,$acc1,$t1
- adde $a2,$acc2,$acc4
- adde $a3,$acc3,$t3
- bdnz .Loop_ord_sqr
- std $a0,0($rp)
- std $a1,8($rp)
- std $a2,16($rp)
- std $a3,24($rp)
- ld r18,48($sp)
- ld r19,56($sp)
- ld r20,64($sp)
- ld r21,72($sp)
- ld r22,80($sp)
- ld r23,88($sp)
- ld r24,96($sp)
- ld r25,104($sp)
- ld r26,112($sp)
- ld r27,120($sp)
- ld r28,128($sp)
- ld r29,136($sp)
- ld r30,144($sp)
- ld r31,152($sp)
- addi $sp,$sp,160
- blr
- .long 0
- .byte 0,12,4,0,0x80,14,3,0
- .long 0
- .size ecp_nistz256_ord_sqr_mont,.-ecp_nistz256_ord_sqr_mont
- ___
- } }
- ########################################################################
- # scatter-gather subroutines
- {
- my ($out,$inp,$index,$mask)=map("r$_",(3..7));
- $code.=<<___;
- ########################################################################
- # void ecp_nistz256_scatter_w5(void *out, const P256_POINT *inp,
- # int index);
- .globl ecp_nistz256_scatter_w5
- .align 4
- ecp_nistz256_scatter_w5:
- slwi $index,$index,2
- add $out,$out,$index
- ld r8, 0($inp) # X
- ld r9, 8($inp)
- ld r10,16($inp)
- ld r11,24($inp)
- stw r8, 64*0-4($out)
- srdi r8, r8, 32
- stw r9, 64*1-4($out)
- srdi r9, r9, 32
- stw r10,64*2-4($out)
- srdi r10,r10,32
- stw r11,64*3-4($out)
- srdi r11,r11,32
- stw r8, 64*4-4($out)
- stw r9, 64*5-4($out)
- stw r10,64*6-4($out)
- stw r11,64*7-4($out)
- addi $out,$out,64*8
- ld r8, 32($inp) # Y
- ld r9, 40($inp)
- ld r10,48($inp)
- ld r11,56($inp)
- stw r8, 64*0-4($out)
- srdi r8, r8, 32
- stw r9, 64*1-4($out)
- srdi r9, r9, 32
- stw r10,64*2-4($out)
- srdi r10,r10,32
- stw r11,64*3-4($out)
- srdi r11,r11,32
- stw r8, 64*4-4($out)
- stw r9, 64*5-4($out)
- stw r10,64*6-4($out)
- stw r11,64*7-4($out)
- addi $out,$out,64*8
- ld r8, 64($inp) # Z
- ld r9, 72($inp)
- ld r10,80($inp)
- ld r11,88($inp)
- stw r8, 64*0-4($out)
- srdi r8, r8, 32
- stw r9, 64*1-4($out)
- srdi r9, r9, 32
- stw r10,64*2-4($out)
- srdi r10,r10,32
- stw r11,64*3-4($out)
- srdi r11,r11,32
- stw r8, 64*4-4($out)
- stw r9, 64*5-4($out)
- stw r10,64*6-4($out)
- stw r11,64*7-4($out)
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,3,0
- .long 0
- .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
- ########################################################################
- # void ecp_nistz256_gather_w5(P256_POINT *out, const void *inp,
- # int index);
- .globl ecp_nistz256_gather_w5
- .align 4
- ecp_nistz256_gather_w5:
- neg r0,$index
- sradi r0,r0,63
- add $index,$index,r0
- slwi $index,$index,2
- add $inp,$inp,$index
- lwz r5, 64*0($inp)
- lwz r6, 64*1($inp)
- lwz r7, 64*2($inp)
- lwz r8, 64*3($inp)
- lwz r9, 64*4($inp)
- lwz r10,64*5($inp)
- lwz r11,64*6($inp)
- lwz r12,64*7($inp)
- addi $inp,$inp,64*8
- sldi r9, r9, 32
- sldi r10,r10,32
- sldi r11,r11,32
- sldi r12,r12,32
- or r5,r5,r9
- or r6,r6,r10
- or r7,r7,r11
- or r8,r8,r12
- and r5,r5,r0
- and r6,r6,r0
- and r7,r7,r0
- and r8,r8,r0
- std r5,0($out) # X
- std r6,8($out)
- std r7,16($out)
- std r8,24($out)
- lwz r5, 64*0($inp)
- lwz r6, 64*1($inp)
- lwz r7, 64*2($inp)
- lwz r8, 64*3($inp)
- lwz r9, 64*4($inp)
- lwz r10,64*5($inp)
- lwz r11,64*6($inp)
- lwz r12,64*7($inp)
- addi $inp,$inp,64*8
- sldi r9, r9, 32
- sldi r10,r10,32
- sldi r11,r11,32
- sldi r12,r12,32
- or r5,r5,r9
- or r6,r6,r10
- or r7,r7,r11
- or r8,r8,r12
- and r5,r5,r0
- and r6,r6,r0
- and r7,r7,r0
- and r8,r8,r0
- std r5,32($out) # Y
- std r6,40($out)
- std r7,48($out)
- std r8,56($out)
- lwz r5, 64*0($inp)
- lwz r6, 64*1($inp)
- lwz r7, 64*2($inp)
- lwz r8, 64*3($inp)
- lwz r9, 64*4($inp)
- lwz r10,64*5($inp)
- lwz r11,64*6($inp)
- lwz r12,64*7($inp)
- sldi r9, r9, 32
- sldi r10,r10,32
- sldi r11,r11,32
- sldi r12,r12,32
- or r5,r5,r9
- or r6,r6,r10
- or r7,r7,r11
- or r8,r8,r12
- and r5,r5,r0
- and r6,r6,r0
- and r7,r7,r0
- and r8,r8,r0
- std r5,64($out) # Z
- std r6,72($out)
- std r7,80($out)
- std r8,88($out)
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,3,0
- .long 0
- .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
- ########################################################################
- # void ecp_nistz256_scatter_w7(void *out, const P256_POINT_AFFINE *inp,
- # int index);
- .globl ecp_nistz256_scatter_w7
- .align 4
- ecp_nistz256_scatter_w7:
- li r0,8
- mtctr r0
- add $out,$out,$index
- subi $inp,$inp,8
- .Loop_scatter_w7:
- ldu r0,8($inp)
- stb r0,64*0($out)
- srdi r0,r0,8
- stb r0,64*1($out)
- srdi r0,r0,8
- stb r0,64*2($out)
- srdi r0,r0,8
- stb r0,64*3($out)
- srdi r0,r0,8
- stb r0,64*4($out)
- srdi r0,r0,8
- stb r0,64*5($out)
- srdi r0,r0,8
- stb r0,64*6($out)
- srdi r0,r0,8
- stb r0,64*7($out)
- addi $out,$out,64*8
- bdnz .Loop_scatter_w7
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,3,0
- .long 0
- .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
- ########################################################################
- # void ecp_nistz256_gather_w7(P256_POINT_AFFINE *out, const void *inp,
- # int index);
- .globl ecp_nistz256_gather_w7
- .align 4
- ecp_nistz256_gather_w7:
- li r0,8
- mtctr r0
- neg r0,$index
- sradi r0,r0,63
- add $index,$index,r0
- add $inp,$inp,$index
- subi $out,$out,8
- .Loop_gather_w7:
- lbz r5, 64*0($inp)
- lbz r6, 64*1($inp)
- lbz r7, 64*2($inp)
- lbz r8, 64*3($inp)
- lbz r9, 64*4($inp)
- lbz r10,64*5($inp)
- lbz r11,64*6($inp)
- lbz r12,64*7($inp)
- addi $inp,$inp,64*8
- sldi r6, r6, 8
- sldi r7, r7, 16
- sldi r8, r8, 24
- sldi r9, r9, 32
- sldi r10,r10,40
- sldi r11,r11,48
- sldi r12,r12,56
- or r5,r5,r6
- or r7,r7,r8
- or r9,r9,r10
- or r11,r11,r12
- or r5,r5,r7
- or r9,r9,r11
- or r5,r5,r9
- and r5,r5,r0
- stdu r5,8($out)
- bdnz .Loop_gather_w7
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,3,0
- .long 0
- .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
- ___
- }
- foreach (split("\n",$code)) {
- s/\`([^\`]*)\`/eval $1/ge;
- print $_,"\n";
- }
- close STDOUT or die "error closing STDOUT: $!"; # enforce flush
|