sp_int.c 611 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389173901739117392173931739417395173961739717398173991740017401174021740317404174051740617407174081740917410174111741217413174141741517416174171741817419174201742117422174231742417425174261742717428174291743017431174321743317434174351743617437174381743917440174411744217443174441744517446174471744817449174501745117452174531745417455174561745717458174591746017461174621746317464174651746617467174681746917470174711747217473174741747517476174771747817479174801748117482174831748417485174861748717488174891749017491174921749317494174951749617497174981749917500175011750217503175041750517506175071750817509175101751117512175131751417515175161751717518175191752017521175221752317524175251752617527175281752917530175311753217533175341753517536175371753817539175401754117542175431754417545175461754717548175491755017551175521755317554175551755617557175581755917560175611756217563175641756517566175671756817569175701757117572175731757417575175761757717578175791758017581175821758317584175851758617587175881758917590175911759217593175941759517596175971759817599176001760117602176031760417605176061760717608176091761017611176121761317614176151761617617176181761917620176211762217623176241762517626176271762817629176301763117632176331763417635176361763717638176391764017641176421764317644176451764617647176481764917650176511765217653176541765517656176571765817659176601766117662176631766417665176661766717668176691767017671176721767317674176751767617677176781767917680176811768217683176841768517686176871768817689176901769117692176931769417695176961769717698176991770017701177021770317704177051770617707177081770917710177111771217713177141771517716177171771817719177201772117722177231772417725177261772717728177291773017731177321773317734177351773617737177381773917740177411774217743177441774517746177471774817749177501775117752177531775417755177561775717758177591776017761177621776317764177651776617767177681776917770177711777217773177741777517776177771777817779177801778117782177831778417785177861778717788177891779017791177921779317794177951779617797177981779917800178011780217803178041780517806178071780817809178101781117812178131781417815178161781717818178191782017821178221782317824178251782617827178281782917830178311783217833178341783517836178371783817839178401784117842178431784417845178461784717848178491785017851178521785317854178551785617857178581785917860178611786217863178641786517866178671786817869178701787117872178731787417875178761787717878178791788017881178821788317884178851788617887178881788917890178911789217893178941789517896178971789817899179001790117902179031790417905179061790717908179091791017911179121791317914179151791617917179181791917920179211792217923179241792517926179271792817929179301793117932179331793417935179361793717938179391794017941179421794317944179451794617947179481794917950179511795217953179541795517956179571795817959179601796117962179631796417965179661796717968179691797017971179721797317974179751797617977179781797917980179811798217983179841798517986179871798817989179901799117992179931799417995179961799717998179991800018001180021800318004180051800618007180081800918010180111801218013180141801518016180171801818019180201802118022180231802418025180261802718028180291803018031180321803318034180351803618037180381803918040180411804218043180441804518046180471804818049180501805118052180531805418055180561805718058180591806018061180621806318064180651806618067180681806918070180711807218073180741807518076180771807818079180801808118082180831808418085180861808718088180891809018091180921809318094180951809618097180981809918100181011810218103181041810518106181071810818109181101811118112181131811418115181161811718118181191812018121181221812318124181251812618127181281812918130181311813218133181341813518136181371813818139181401814118142181431814418145181461814718148181491815018151181521815318154181551815618157181581815918160181611816218163181641816518166181671816818169181701817118172181731817418175181761817718178181791818018181181821818318184181851818618187181881818918190181911819218193181941819518196181971819818199182001820118202182031820418205182061820718208182091821018211182121821318214182151821618217182181821918220182211822218223182241822518226182271822818229182301823118232182331823418235182361823718238182391824018241182421824318244182451824618247182481824918250182511825218253182541825518256182571825818259182601826118262182631826418265182661826718268182691827018271182721827318274182751827618277182781827918280182811828218283182841828518286182871828818289182901829118292182931829418295182961829718298182991830018301183021830318304183051830618307183081830918310183111831218313183141831518316183171831818319183201832118322183231832418325183261832718328183291833018331183321833318334183351833618337183381833918340183411834218343183441834518346183471834818349183501835118352183531835418355183561835718358
  1. /* sp_int.c
  2. *
  3. * Copyright (C) 2006-2022 wolfSSL Inc.
  4. *
  5. * This file is part of wolfSSL.
  6. *
  7. * wolfSSL is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * wolfSSL is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
  20. */
  21. /* Implementation by Sean Parkinson. */
  22. /*
  23. DESCRIPTION
  24. This library provides single precision (SP) integer math functions.
  25. */
  26. #ifdef HAVE_CONFIG_H
  27. #include <config.h>
  28. #endif
  29. #include <wolfssl/wolfcrypt/settings.h>
  30. #if defined(WOLFSSL_SP_MATH) || defined(WOLFSSL_SP_MATH_ALL)
  31. #include <wolfssl/wolfcrypt/error-crypt.h>
  32. #ifdef NO_INLINE
  33. #include <wolfssl/wolfcrypt/misc.h>
  34. #else
  35. #define WOLFSSL_MISC_INCLUDED
  36. #include <wolfcrypt/src/misc.c>
  37. #endif
  38. /* SP Build Options:
  39. * WOLFSSL_HAVE_SP_RSA: Enable SP RSA support
  40. * WOLFSSL_HAVE_SP_DH: Enable SP DH support
  41. * WOLFSSL_HAVE_SP_ECC: Enable SP ECC support
  42. * WOLFSSL_SP_MATH: Use only single precision math and algorithms
  43. * it supports (no fastmath tfm.c or normal integer.c)
  44. * WOLFSSL_SP_MATH_ALL Implementation of all MP functions
  45. * (replacement for tfm.c and integer.c)
  46. * WOLFSSL_SP_SMALL: Use smaller version of code and avoid large
  47. * stack variables
  48. * WOLFSSL_SP_NO_MALLOC: Always use stack, no heap XMALLOC/XFREE allowed
  49. * WOLFSSL_SP_NO_2048: Disable RSA/DH 2048-bit support
  50. * WOLFSSL_SP_NO_3072: Disable RSA/DH 3072-bit support
  51. * WOLFSSL_SP_4096: Enable RSA/RH 4096-bit support
  52. * WOLFSSL_SP_NO_256 Disable ECC 256-bit SECP256R1 support
  53. * WOLFSSL_SP_384 Enable ECC 384-bit SECP384R1 support
  54. * WOLFSSL_SP_521 Enable ECC 521-bit SECP521R1 support
  55. * WOLFSSL_SP_ASM Enable assembly speedups (detect platform)
  56. * WOLFSSL_SP_X86_64_ASM Enable Intel x64 assembly implementation
  57. * WOLFSSL_SP_ARM32_ASM Enable Aarch32 assembly implementation
  58. * WOLFSSL_SP_ARM64_ASM Enable Aarch64 assembly implementation
  59. * WOLFSSL_SP_ARM_CORTEX_M_ASM Enable Cortex-M assembly implementation
  60. * WOLFSSL_SP_ARM_THUMB_ASM Enable ARM Thumb assembly implementation
  61. * (used with -mthumb)
  62. * WOLFSSL_SP_X86_64 Enable Intel x86 64-bit assembly speedups
  63. * WOLFSSL_SP_X86 Enable Intel x86 assembly speedups
  64. * WOLFSSL_SP_ARM64 Enable Aarch64 assembly speedups
  65. * WOLFSSL_SP_ARM32 Enable ARM32 assembly speedups
  66. * WOLFSSL_SP_ARM32_UDIV Enable word divide asm that uses UDIV instr
  67. * WOLFSSL_SP_ARM_THUMB Enable ARM Thumb assembly speedups
  68. * (explicitly uses register 'r7')
  69. * WOLFSSL_SP_PPC64 Enable PPC64 assembly speedups
  70. * WOLFSSL_SP_PPC Enable PPC assembly speedups
  71. * WOLFSSL_SP_MIPS64 Enable MIPS64 assembly speedups
  72. * WOLFSSL_SP_MIPS Enable MIPS assembly speedups
  73. * WOLFSSL_SP_RISCV64 Enable RISCV64 assembly speedups
  74. * WOLFSSL_SP_RISCV32 Enable RISCV32 assembly speedups
  75. * WOLFSSL_SP_S390X Enable S390X assembly speedups
  76. * SP_WORD_SIZE Force 32 or 64 bit mode
  77. * WOLFSSL_SP_NONBLOCK Enables "non blocking" mode for SP math, which
  78. * will return FP_WOULDBLOCK for long operations and function must be
  79. * called again until complete.
  80. * WOLFSSL_SP_FAST_NCT_EXPTMOD Enables the faster non-constant time modular
  81. * exponentiation implementation.
  82. * WOLFSSL_SP_INT_NEGATIVE Enables negative values to be used.
  83. * WOLFSSL_SP_INT_DIGIT_ALIGN Enable when unaligned access of sp_int_digit
  84. * pointer is not allowed.
  85. * WOLFSSL_SP_NO_DYN_STACK Disable use of dynamic stack items.
  86. * Used with small code size and not small stack.
  87. * WOLFSSL_SP_FAST_MODEXP Allow fast mod_exp with small C code
  88. */
  89. /* TODO: WOLFSSL_SP_SMALL is incompatible with clang-12+ -Os. */
  90. #if defined(__clang__) && defined(__clang_major__) && \
  91. (__clang_major__ >= 12) && defined(WOLFSSL_SP_SMALL)
  92. #undef WOLFSSL_SP_SMALL
  93. #endif
  94. #include <wolfssl/wolfcrypt/sp_int.h>
  95. /* DECL_SP_INT: Declare one variable of type 'sp_int'. */
  96. #if (defined(WOLFSSL_SMALL_STACK) || defined(SP_ALLOC)) && \
  97. !defined(WOLFSSL_SP_NO_MALLOC)
  98. /* Declare a variable that will be assigned a value on XMALLOC. */
  99. #define DECL_SP_INT(n, s) \
  100. sp_int* n = NULL
  101. #else
  102. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
  103. defined(WOLFSSL_SP_SMALL) && !defined(WOLFSSL_SP_NO_DYN_STACK)
  104. /* Declare a variable on the stack with the required data size. */
  105. #define DECL_SP_INT(n, s) \
  106. byte n##d[MP_INT_SIZEOF(s)]; \
  107. sp_int* n = (sp_int*)n##d
  108. #else
  109. /* Declare a variable on the stack. */
  110. #define DECL_SP_INT(n, s) \
  111. sp_int n[1]
  112. #endif
  113. #endif
  114. /* ALLOC_SP_INT: Allocate an 'sp_int' of required size. */
  115. #if (defined(WOLFSSL_SMALL_STACK) || defined(SP_ALLOC)) && \
  116. !defined(WOLFSSL_SP_NO_MALLOC)
  117. /* Dynamically allocate just enough data to support size. */
  118. #define ALLOC_SP_INT(n, s, err, h) \
  119. do { \
  120. if (((err) == MP_OKAY) && (s > SP_INT_DIGITS)) { \
  121. (err) = MP_VAL; \
  122. } \
  123. if ((err) == MP_OKAY) { \
  124. (n) = (sp_int*)XMALLOC(MP_INT_SIZEOF(s), (h), \
  125. DYNAMIC_TYPE_BIGINT); \
  126. if ((n) == NULL) { \
  127. (err) = MP_MEM; \
  128. } \
  129. } \
  130. } \
  131. while (0)
  132. /* Dynamically allocate just enough data to support size - and set size. */
  133. #define ALLOC_SP_INT_SIZE(n, s, err, h) \
  134. do { \
  135. ALLOC_SP_INT(n, s, err, h); \
  136. if ((err) == MP_OKAY) { \
  137. (n)->size = (s); \
  138. } \
  139. } \
  140. while (0)
  141. #else
  142. /* Array declared on stack - check size is valid. */
  143. #define ALLOC_SP_INT(n, s, err, h) \
  144. do { \
  145. if (((err) == MP_OKAY) && (s > SP_INT_DIGITS)) { \
  146. (err) = MP_VAL; \
  147. } \
  148. } \
  149. while (0)
  150. /* Array declared on stack - set the size field. */
  151. #define ALLOC_SP_INT_SIZE(n, s, err, h) \
  152. do { \
  153. ALLOC_SP_INT(n, s, err, h); \
  154. if ((err) == MP_OKAY) { \
  155. (n)->size = (s); \
  156. } \
  157. } \
  158. while (0)
  159. #endif
  160. /* FREE_SP_INT: Free an 'sp_int' variable. */
  161. #if (defined(WOLFSSL_SMALL_STACK) || defined(SP_ALLOC)) && \
  162. !defined(WOLFSSL_SP_NO_MALLOC)
  163. /* Free dynamically allocated data. */
  164. #define FREE_SP_INT(n, h) \
  165. do { \
  166. if ((n) != NULL) { \
  167. XFREE(n, h, DYNAMIC_TYPE_BIGINT); \
  168. } \
  169. } \
  170. while (0)
  171. #else
  172. /* Nothing to do as declared on stack. */
  173. #define FREE_SP_INT(n, h)
  174. #endif
  175. /* DECL_SP_INT_ARRAY: Declare array of 'sp_int'. */
  176. #if (defined(WOLFSSL_SMALL_STACK) || defined(SP_ALLOC)) && \
  177. !defined(WOLFSSL_SP_NO_MALLOC)
  178. /* Declare a variable that will be assigned a value on XMALLOC. */
  179. #define DECL_SP_INT_ARRAY(n, s, c) \
  180. sp_int* n##d = NULL; \
  181. sp_int* (n)[c] = { NULL, }
  182. #else
  183. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
  184. defined(WOLFSSL_SP_SMALL) && !defined(WOLFSSL_SP_NO_DYN_STACK)
  185. /* Declare a variable on the stack with the required data size. */
  186. #define DECL_SP_INT_ARRAY(n, s, c) \
  187. byte n##d[MP_INT_SIZEOF(s) * (c)]; \
  188. sp_int* (n)[c] = { NULL, }
  189. #else
  190. /* Declare a variable on the stack. */
  191. #define DECL_SP_INT_ARRAY(n, s, c) \
  192. sp_int n##d[c]; \
  193. sp_int* (n)[c]
  194. #endif
  195. #endif
  196. /* ALLOC_SP_INT_ARRAY: Allocate an array of 'sp_int's of required size. */
  197. #if (defined(WOLFSSL_SMALL_STACK) || defined(SP_ALLOC)) && \
  198. !defined(WOLFSSL_SP_NO_MALLOC)
  199. /* Dynamically allocate just enough data to support multiple sp_ints of the
  200. * required size. Use pointers into data to make up array and set sizes.
  201. */
  202. #define ALLOC_SP_INT_ARRAY(n, s, c, err, h) \
  203. do { \
  204. if (((err) == MP_OKAY) && (s > SP_INT_DIGITS)) { \
  205. (err) = MP_VAL; \
  206. } \
  207. if ((err) == MP_OKAY) { \
  208. n##d = (sp_int*)XMALLOC(MP_INT_SIZEOF(s) * (c), (h), \
  209. DYNAMIC_TYPE_BIGINT); \
  210. if (n##d == NULL) { \
  211. (err) = MP_MEM; \
  212. } \
  213. else { \
  214. int n##ii; \
  215. (n)[0] = n##d; \
  216. (n)[0]->size = (s); \
  217. for (n##ii = 1; n##ii < (c); n##ii++) { \
  218. (n)[n##ii] = MP_INT_NEXT((n)[n##ii-1], s); \
  219. (n)[n##ii]->size = (s); \
  220. } \
  221. } \
  222. } \
  223. } \
  224. while (0)
  225. #else
  226. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
  227. defined(WOLFSSL_SP_SMALL) && !defined(WOLFSSL_SP_NO_DYN_STACK)
  228. /* Data declared on stack that supports multiple sp_ints of the
  229. * required size. Use pointers into data to make up array and set sizes.
  230. */
  231. #define ALLOC_SP_INT_ARRAY(n, s, c, err, h) \
  232. do { \
  233. if (((err) == MP_OKAY) && (s > SP_INT_DIGITS)) { \
  234. (err) = MP_VAL; \
  235. } \
  236. if ((err) == MP_OKAY) { \
  237. int n##ii; \
  238. (n)[0] = (sp_int*)n##d; \
  239. ((sp_int_minimal*)(n)[0])->size = (s); \
  240. for (n##ii = 1; n##ii < (c); n##ii++) { \
  241. (n)[n##ii] = MP_INT_NEXT((n)[n##ii-1], s); \
  242. ((sp_int_minimal*)(n)[n##ii])->size = (s); \
  243. } \
  244. } \
  245. } \
  246. while (0)
  247. #else
  248. /* Data declared on stack that supports multiple sp_ints of the
  249. * required size. Set into array and set sizes.
  250. */
  251. #define ALLOC_SP_INT_ARRAY(n, s, c, err, h) \
  252. do { \
  253. if (((err) == MP_OKAY) && (s > SP_INT_DIGITS)) { \
  254. (err) = MP_VAL; \
  255. } \
  256. if ((err) == MP_OKAY) { \
  257. int n##ii; \
  258. for (n##ii = 0; n##ii < (c); n##ii++) { \
  259. (n)[n##ii] = &n##d[n##ii]; \
  260. (n)[n##ii]->size = (s); \
  261. } \
  262. } \
  263. } \
  264. while (0)
  265. #endif
  266. #endif
  267. /* FREE_SP_INT_ARRAY: Free an array of 'sp_int'. */
  268. #if (defined(WOLFSSL_SMALL_STACK) || defined(SP_ALLOC)) && \
  269. !defined(WOLFSSL_SP_NO_MALLOC)
  270. /* Free data variable that was dynamically allocated. */
  271. #define FREE_SP_INT_ARRAY(n, h) \
  272. do { \
  273. if (n##d != NULL) { \
  274. XFREE(n##d, h, DYNAMIC_TYPE_BIGINT); \
  275. } \
  276. } \
  277. while (0)
  278. #else
  279. /* Nothing to do as data declared on stack. */
  280. #define FREE_SP_INT_ARRAY(n, h)
  281. #endif
  282. #ifndef WOLFSSL_NO_ASM
  283. #ifdef __IAR_SYSTEMS_ICC__
  284. #define __asm__ asm
  285. #define __volatile__ volatile
  286. #endif /* __IAR_SYSTEMS_ICC__ */
  287. #ifdef __KEIL__
  288. #define __asm__ __asm
  289. #define __volatile__ volatile
  290. #endif
  291. #if defined(WOLFSSL_SP_X86_64) && SP_WORD_SIZE == 64
  292. /*
  293. * CPU: x86_64
  294. */
  295. #ifndef _MSC_VER
  296. /* Multiply va by vb and store double size result in: vh | vl */
  297. #define SP_ASM_MUL(vl, vh, va, vb) \
  298. __asm__ __volatile__ ( \
  299. "movq %[b], %%rax \n\t" \
  300. "mulq %[a] \n\t" \
  301. "movq %%rax, %[l] \n\t" \
  302. "movq %%rdx, %[h] \n\t" \
  303. : [h] "+r" (vh), [l] "+r" (vl) \
  304. : [a] "m" (va), [b] "m" (vb) \
  305. : "memory", "%rax", "%rdx", "cc" \
  306. )
  307. /* Multiply va by vb and store double size result in: vo | vh | vl */
  308. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  309. __asm__ __volatile__ ( \
  310. "movq %[b], %%rax \n\t" \
  311. "mulq %[a] \n\t" \
  312. "movq $0 , %[o] \n\t" \
  313. "movq %%rax, %[l] \n\t" \
  314. "movq %%rdx, %[h] \n\t" \
  315. : [l] "+r" (vl), [h] "+r" (vh), [o] "=r" (vo) \
  316. : [a] "m" (va), [b] "m" (vb) \
  317. : "%rax", "%rdx", "cc" \
  318. )
  319. /* Multiply va by vb and add double size result into: vo | vh | vl */
  320. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  321. __asm__ __volatile__ ( \
  322. "movq %[b], %%rax \n\t" \
  323. "mulq %[a] \n\t" \
  324. "addq %%rax, %[l] \n\t" \
  325. "adcq %%rdx, %[h] \n\t" \
  326. "adcq $0 , %[o] \n\t" \
  327. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  328. : [a] "m" (va), [b] "m" (vb) \
  329. : "%rax", "%rdx", "cc" \
  330. )
  331. /* Multiply va by vb and add double size result into: vh | vl */
  332. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  333. __asm__ __volatile__ ( \
  334. "movq %[b], %%rax \n\t" \
  335. "mulq %[a] \n\t" \
  336. "addq %%rax, %[l] \n\t" \
  337. "adcq %%rdx, %[h] \n\t" \
  338. : [l] "+r" (vl), [h] "+r" (vh) \
  339. : [a] "m" (va), [b] "m" (vb) \
  340. : "%rax", "%rdx", "cc" \
  341. )
  342. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  343. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  344. __asm__ __volatile__ ( \
  345. "movq %[b], %%rax \n\t" \
  346. "mulq %[a] \n\t" \
  347. "addq %%rax, %[l] \n\t" \
  348. "adcq %%rdx, %[h] \n\t" \
  349. "adcq $0 , %[o] \n\t" \
  350. "addq %%rax, %[l] \n\t" \
  351. "adcq %%rdx, %[h] \n\t" \
  352. "adcq $0 , %[o] \n\t" \
  353. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  354. : [a] "m" (va), [b] "m" (vb) \
  355. : "%rax", "%rdx", "cc" \
  356. )
  357. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  358. * Assumes first add will not overflow vh | vl
  359. */
  360. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  361. __asm__ __volatile__ ( \
  362. "movq %[b], %%rax \n\t" \
  363. "mulq %[a] \n\t" \
  364. "addq %%rax, %[l] \n\t" \
  365. "adcq %%rdx, %[h] \n\t" \
  366. "addq %%rax, %[l] \n\t" \
  367. "adcq %%rdx, %[h] \n\t" \
  368. "adcq $0 , %[o] \n\t" \
  369. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  370. : [a] "m" (va), [b] "m" (vb) \
  371. : "%rax", "%rdx", "cc" \
  372. )
  373. /* Square va and store double size result in: vh | vl */
  374. #define SP_ASM_SQR(vl, vh, va) \
  375. __asm__ __volatile__ ( \
  376. "movq %[a], %%rax \n\t" \
  377. "mulq %%rax \n\t" \
  378. "movq %%rax, %[l] \n\t" \
  379. "movq %%rdx, %[h] \n\t" \
  380. : [h] "+r" (vh), [l] "+r" (vl) \
  381. : [a] "m" (va) \
  382. : "memory", "%rax", "%rdx", "cc" \
  383. )
  384. /* Square va and add double size result into: vo | vh | vl */
  385. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  386. __asm__ __volatile__ ( \
  387. "movq %[a], %%rax \n\t" \
  388. "mulq %%rax \n\t" \
  389. "addq %%rax, %[l] \n\t" \
  390. "adcq %%rdx, %[h] \n\t" \
  391. "adcq $0 , %[o] \n\t" \
  392. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  393. : [a] "m" (va) \
  394. : "%rax", "%rdx", "cc" \
  395. )
  396. /* Square va and add double size result into: vh | vl */
  397. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  398. __asm__ __volatile__ ( \
  399. "movq %[a], %%rax \n\t" \
  400. "mulq %%rax \n\t" \
  401. "addq %%rax, %[l] \n\t" \
  402. "adcq %%rdx, %[h] \n\t" \
  403. : [l] "+r" (vl), [h] "+r" (vh) \
  404. : [a] "m" (va) \
  405. : "%rax", "%rdx", "cc" \
  406. )
  407. /* Add va into: vh | vl */
  408. #define SP_ASM_ADDC(vl, vh, va) \
  409. __asm__ __volatile__ ( \
  410. "addq %[a], %[l] \n\t" \
  411. "adcq $0 , %[h] \n\t" \
  412. : [l] "+r" (vl), [h] "+r" (vh) \
  413. : [a] "m" (va) \
  414. : "cc" \
  415. )
  416. /* Add va, variable in a register, into: vh | vl */
  417. #define SP_ASM_ADDC_REG(vl, vh, va) \
  418. __asm__ __volatile__ ( \
  419. "addq %[a], %[l] \n\t" \
  420. "adcq $0 , %[h] \n\t" \
  421. : [l] "+r" (vl), [h] "+r" (vh) \
  422. : [a] "r" (va) \
  423. : "cc" \
  424. )
  425. /* Sub va from: vh | vl */
  426. #define SP_ASM_SUBB(vl, vh, va) \
  427. __asm__ __volatile__ ( \
  428. "subq %[a], %[l] \n\t" \
  429. "sbbq $0 , %[h] \n\t" \
  430. : [l] "+r" (vl), [h] "+r" (vh) \
  431. : [a] "m" (va) \
  432. : "cc" \
  433. )
  434. /* Sub va from: vh | vl */
  435. #define SP_ASM_SUBB_REG(vl, vh, va) \
  436. __asm__ __volatile__ ( \
  437. "subq %[a], %[l] \n\t" \
  438. "sbbq $0 , %[h] \n\t" \
  439. : [l] "+r" (vl), [h] "+r" (vh) \
  440. : [a] "r" (va) \
  441. : "cc" \
  442. )
  443. /* Add two times vc | vb | va into vo | vh | vl */
  444. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  445. __asm__ __volatile__ ( \
  446. "addq %[a], %[l] \n\t" \
  447. "adcq %[b], %[h] \n\t" \
  448. "adcq %[c], %[o] \n\t" \
  449. "addq %[a], %[l] \n\t" \
  450. "adcq %[b], %[h] \n\t" \
  451. "adcq %[c], %[o] \n\t" \
  452. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  453. : [a] "r" (va), [b] "r" (vb), [c] "r" (vc) \
  454. : "%rax", "%rdx", "cc" \
  455. )
  456. #else
  457. #include <intrin.h>
  458. /* Multiply va by vb and store double size result in: vh | vl */
  459. #define SP_ASM_MUL(vl, vh, va, vb) \
  460. vl = _umul128(va, vb, &vh)
  461. /* Multiply va by vb and store double size result in: vo | vh | vl */
  462. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  463. do { \
  464. vl = _umul128(va, vb, &vh); \
  465. vo = 0; \
  466. } \
  467. while (0)
  468. /* Multiply va by vb and add double size result into: vo | vh | vl */
  469. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  470. do { \
  471. unsigned __int64 vtl, vth; \
  472. unsigned char c; \
  473. vtl = _umul128(va, vb, &vth); \
  474. c = _addcarry_u64(0, vl, vtl, &vl); \
  475. c = _addcarry_u64(c, vh, vth, &vh); \
  476. _addcarry_u64(c, vo, 0, &vo); \
  477. } \
  478. while (0)
  479. /* Multiply va by vb and add double size result into: vh | vl */
  480. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  481. do { \
  482. unsigned __int64 vtl, vth; \
  483. unsigned char c; \
  484. vtl = _umul128(va, vb, &vth); \
  485. c = _addcarry_u64(0, vl, vtl, &vl); \
  486. _addcarry_u64(c, vh, vth, &vh); \
  487. } \
  488. while (0)
  489. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  490. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  491. do { \
  492. unsigned __int64 vtl, vth; \
  493. unsigned char c; \
  494. vtl = _umul128(va, vb, &vth); \
  495. c = _addcarry_u64(0, vl, vtl, &vl); \
  496. c = _addcarry_u64(c, vh, vth, &vh); \
  497. _addcarry_u64(c, vo, 0, &vo); \
  498. c = _addcarry_u64(0, vl, vtl, &vl); \
  499. c = _addcarry_u64(c, vh, vth, &vh); \
  500. _addcarry_u64(c, vo, 0, &vo); \
  501. } \
  502. while (0)
  503. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  504. * Assumes first add will not overflow vh | vl
  505. */
  506. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  507. do { \
  508. unsigned __int64 vtl, vth; \
  509. unsigned char c; \
  510. vtl = _umul128(va, vb, &vth); \
  511. c = _addcarry_u64(0, vl, vtl, &vl); \
  512. _addcarry_u64(c, vh, vth, &vh); \
  513. c = _addcarry_u64(0, vl, vtl, &vl); \
  514. c = _addcarry_u64(c, vh, vth, &vh); \
  515. _addcarry_u64(c, vo, 0, &vo); \
  516. } \
  517. while (0)
  518. /* Square va and store double size result in: vh | vl */
  519. #define SP_ASM_SQR(vl, vh, va) \
  520. vl = _umul128(va, va, &vh)
  521. /* Square va and add double size result into: vo | vh | vl */
  522. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  523. do { \
  524. unsigned __int64 vtl, vth; \
  525. unsigned char c; \
  526. vtl = _umul128(va, va, &vth); \
  527. c = _addcarry_u64(0, vl, vtl, &vl); \
  528. c = _addcarry_u64(c, vh, vth, &vh); \
  529. _addcarry_u64(c, vo, 0, &vo); \
  530. } \
  531. while (0)
  532. /* Square va and add double size result into: vh | vl */
  533. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  534. do { \
  535. unsigned __int64 vtl, vth; \
  536. unsigned char c; \
  537. vtl = _umul128(va, va, &vth); \
  538. c = _addcarry_u64(0, vl, vtl, &vl); \
  539. _addcarry_u64(c, vh, vth, &vh); \
  540. } \
  541. while (0)
  542. /* Add va into: vh | vl */
  543. #define SP_ASM_ADDC(vl, vh, va) \
  544. do { \
  545. unsigned char c; \
  546. c = _addcarry_u64(0, vl, va, &vl); \
  547. _addcarry_u64(c, vh, 0, &vh); \
  548. } \
  549. while (0)
  550. /* Add va, variable in a register, into: vh | vl */
  551. #define SP_ASM_ADDC_REG(vl, vh, va) \
  552. do { \
  553. unsigned char c; \
  554. c = _addcarry_u64(0, vl, va, &vl); \
  555. _addcarry_u64(c, vh, 0, &vh); \
  556. } \
  557. while (0)
  558. /* Sub va from: vh | vl */
  559. #define SP_ASM_SUBB(vl, vh, va) \
  560. do { \
  561. unsigned char c; \
  562. c = _subborrow_u64(0, vl, va, &vl); \
  563. _subborrow_u64(c, vh, 0, &vh); \
  564. } \
  565. while (0)
  566. /* Add two times vc | vb | va into vo | vh | vl */
  567. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  568. do { \
  569. unsigned char c; \
  570. c = _addcarry_u64(0, vl, va, &vl); \
  571. c = _addcarry_u64(c, vh, vb, &vh); \
  572. _addcarry_u64(c, vo, vc, &vo); \
  573. c = _addcarry_u64(0, vl, va, &vl); \
  574. c = _addcarry_u64(c, vh, vb, &vh); \
  575. _addcarry_u64(c, vo, vc, &vo); \
  576. } \
  577. while (0)
  578. #endif
  579. #if !defined(WOLFSSL_SP_DIV_WORD_HALF) && (!defined(_MSC_VER) || \
  580. _MSC_VER >= 1920)
  581. /* Divide a two digit number by a digit number and return. (hi | lo) / d
  582. *
  583. * Using divq instruction on Intel x64.
  584. *
  585. * @param [in] hi SP integer digit. High digit of the dividend.
  586. * @param [in] lo SP integer digit. Lower digit of the dividend.
  587. * @param [in] d SP integer digit. Number to divide by.
  588. * @return The division result.
  589. */
  590. static WC_INLINE sp_int_digit sp_div_word(sp_int_digit hi, sp_int_digit lo,
  591. sp_int_digit d)
  592. {
  593. #ifndef _MSC_VER
  594. __asm__ __volatile__ (
  595. "divq %2"
  596. : "+a" (lo)
  597. : "d" (hi), "r" (d)
  598. : "cc"
  599. );
  600. return lo;
  601. #elif defined(_MSC_VER) && _MSC_VER >= 1920
  602. return _udiv128(hi, lo, d, NULL);
  603. #endif
  604. }
  605. #define SP_ASM_DIV_WORD
  606. #endif
  607. #define SP_INT_ASM_AVAILABLE
  608. #endif /* WOLFSSL_SP_X86_64 && SP_WORD_SIZE == 64 */
  609. #if defined(WOLFSSL_SP_X86) && SP_WORD_SIZE == 32
  610. /*
  611. * CPU: x86
  612. */
  613. /* Multiply va by vb and store double size result in: vh | vl */
  614. #define SP_ASM_MUL(vl, vh, va, vb) \
  615. __asm__ __volatile__ ( \
  616. "movl %[b], %%eax \n\t" \
  617. "mull %[a] \n\t" \
  618. "movl %%eax, %[l] \n\t" \
  619. "movl %%edx, %[h] \n\t" \
  620. : [h] "+r" (vh), [l] "+r" (vl) \
  621. : [a] "m" (va), [b] "m" (vb) \
  622. : "memory", "eax", "edx", "cc" \
  623. )
  624. /* Multiply va by vb and store double size result in: vo | vh | vl */
  625. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  626. __asm__ __volatile__ ( \
  627. "movl %[b], %%eax \n\t" \
  628. "mull %[a] \n\t" \
  629. "movl $0 , %[o] \n\t" \
  630. "movl %%eax, %[l] \n\t" \
  631. "movl %%edx, %[h] \n\t" \
  632. : [l] "+r" (vl), [h] "+r" (vh), [o] "=r" (vo) \
  633. : [a] "m" (va), [b] "m" (vb) \
  634. : "eax", "edx", "cc" \
  635. )
  636. /* Multiply va by vb and add double size result into: vo | vh | vl */
  637. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  638. __asm__ __volatile__ ( \
  639. "movl %[b], %%eax \n\t" \
  640. "mull %[a] \n\t" \
  641. "addl %%eax, %[l] \n\t" \
  642. "adcl %%edx, %[h] \n\t" \
  643. "adcl $0 , %[o] \n\t" \
  644. : [l] "+rm" (vl), [h] "+rm" (vh), [o] "+rm" (vo) \
  645. : [a] "r" (va), [b] "r" (vb) \
  646. : "eax", "edx", "cc" \
  647. )
  648. /* Multiply va by vb and add double size result into: vh | vl */
  649. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  650. __asm__ __volatile__ ( \
  651. "movl %[b], %%eax \n\t" \
  652. "mull %[a] \n\t" \
  653. "addl %%eax, %[l] \n\t" \
  654. "adcl %%edx, %[h] \n\t" \
  655. : [l] "+r" (vl), [h] "+r" (vh) \
  656. : [a] "m" (va), [b] "m" (vb) \
  657. : "eax", "edx", "cc" \
  658. )
  659. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  660. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  661. __asm__ __volatile__ ( \
  662. "movl %[b], %%eax \n\t" \
  663. "mull %[a] \n\t" \
  664. "addl %%eax, %[l] \n\t" \
  665. "adcl %%edx, %[h] \n\t" \
  666. "adcl $0 , %[o] \n\t" \
  667. "addl %%eax, %[l] \n\t" \
  668. "adcl %%edx, %[h] \n\t" \
  669. "adcl $0 , %[o] \n\t" \
  670. : [l] "+rm" (vl), [h] "+rm" (vh), [o] "+rm" (vo) \
  671. : [a] "r" (va), [b] "r" (vb) \
  672. : "eax", "edx", "cc" \
  673. )
  674. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  675. * Assumes first add will not overflow vh | vl
  676. */
  677. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  678. __asm__ __volatile__ ( \
  679. "movl %[b], %%eax \n\t" \
  680. "mull %[a] \n\t" \
  681. "addl %%eax, %[l] \n\t" \
  682. "adcl %%edx, %[h] \n\t" \
  683. "addl %%eax, %[l] \n\t" \
  684. "adcl %%edx, %[h] \n\t" \
  685. "adcl $0 , %[o] \n\t" \
  686. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  687. : [a] "m" (va), [b] "m" (vb) \
  688. : "eax", "edx", "cc" \
  689. )
  690. /* Square va and store double size result in: vh | vl */
  691. #define SP_ASM_SQR(vl, vh, va) \
  692. __asm__ __volatile__ ( \
  693. "movl %[a], %%eax \n\t" \
  694. "mull %%eax \n\t" \
  695. "movl %%eax, %[l] \n\t" \
  696. "movl %%edx, %[h] \n\t" \
  697. : [h] "+r" (vh), [l] "+r" (vl) \
  698. : [a] "m" (va) \
  699. : "memory", "eax", "edx", "cc" \
  700. )
  701. /* Square va and add double size result into: vo | vh | vl */
  702. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  703. __asm__ __volatile__ ( \
  704. "movl %[a], %%eax \n\t" \
  705. "mull %%eax \n\t" \
  706. "addl %%eax, %[l] \n\t" \
  707. "adcl %%edx, %[h] \n\t" \
  708. "adcl $0 , %[o] \n\t" \
  709. : [l] "+rm" (vl), [h] "+rm" (vh), [o] "+rm" (vo) \
  710. : [a] "m" (va) \
  711. : "eax", "edx", "cc" \
  712. )
  713. /* Square va and add double size result into: vh | vl */
  714. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  715. __asm__ __volatile__ ( \
  716. "movl %[a], %%eax \n\t" \
  717. "mull %%eax \n\t" \
  718. "addl %%eax, %[l] \n\t" \
  719. "adcl %%edx, %[h] \n\t" \
  720. : [l] "+r" (vl), [h] "+r" (vh) \
  721. : [a] "m" (va) \
  722. : "eax", "edx", "cc" \
  723. )
  724. /* Add va into: vh | vl */
  725. #define SP_ASM_ADDC(vl, vh, va) \
  726. __asm__ __volatile__ ( \
  727. "addl %[a], %[l] \n\t" \
  728. "adcl $0 , %[h] \n\t" \
  729. : [l] "+r" (vl), [h] "+r" (vh) \
  730. : [a] "m" (va) \
  731. : "cc" \
  732. )
  733. /* Add va, variable in a register, into: vh | vl */
  734. #define SP_ASM_ADDC_REG(vl, vh, va) \
  735. __asm__ __volatile__ ( \
  736. "addl %[a], %[l] \n\t" \
  737. "adcl $0 , %[h] \n\t" \
  738. : [l] "+r" (vl), [h] "+r" (vh) \
  739. : [a] "r" (va) \
  740. : "cc" \
  741. )
  742. /* Sub va from: vh | vl */
  743. #define SP_ASM_SUBB(vl, vh, va) \
  744. __asm__ __volatile__ ( \
  745. "subl %[a], %[l] \n\t" \
  746. "sbbl $0 , %[h] \n\t" \
  747. : [l] "+r" (vl), [h] "+r" (vh) \
  748. : [a] "m" (va) \
  749. : "cc" \
  750. )
  751. /* Sub va from: vh | vl */
  752. #define SP_ASM_SUBB_REG(vl, vh, va) \
  753. __asm__ __volatile__ ( \
  754. "subl %[a], %[l] \n\t" \
  755. "sbbl $0 , %[h] \n\t" \
  756. : [l] "+r" (vl), [h] "+r" (vh) \
  757. : [a] "r" (va) \
  758. : "cc" \
  759. )
  760. /* Add two times vc | vb | va into vo | vh | vl */
  761. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  762. __asm__ __volatile__ ( \
  763. "addl %[a], %[l] \n\t" \
  764. "adcl %[b], %[h] \n\t" \
  765. "adcl %[c], %[o] \n\t" \
  766. "addl %[a], %[l] \n\t" \
  767. "adcl %[b], %[h] \n\t" \
  768. "adcl %[c], %[o] \n\t" \
  769. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  770. : [a] "r" (va), [b] "r" (vb), [c] "r" (vc) \
  771. : "cc" \
  772. )
  773. #ifndef WOLFSSL_SP_DIV_WORD_HALF
  774. /* Divide a two digit number by a digit number and return. (hi | lo) / d
  775. *
  776. * Using divl instruction on Intel x64.
  777. *
  778. * @param [in] hi SP integer digit. High digit of the dividend.
  779. * @param [in] lo SP integer digit. Lower digit of the dividend.
  780. * @param [in] d SP integer digit. Number to divide by.
  781. * @return The division result.
  782. */
  783. static WC_INLINE sp_int_digit sp_div_word(sp_int_digit hi, sp_int_digit lo,
  784. sp_int_digit d)
  785. {
  786. __asm__ __volatile__ (
  787. "divl %2"
  788. : "+a" (lo)
  789. : "d" (hi), "r" (d)
  790. : "cc"
  791. );
  792. return lo;
  793. }
  794. #define SP_ASM_DIV_WORD
  795. #endif
  796. #define SP_INT_ASM_AVAILABLE
  797. #endif /* WOLFSSL_SP_X86 && SP_WORD_SIZE == 32 */
  798. #if defined(WOLFSSL_SP_ARM64) && SP_WORD_SIZE == 64
  799. /*
  800. * CPU: Aarch64
  801. */
  802. /* Multiply va by vb and store double size result in: vh | vl */
  803. #define SP_ASM_MUL(vl, vh, va, vb) \
  804. __asm__ __volatile__ ( \
  805. "mul %[l], %[a], %[b] \n\t" \
  806. "umulh %[h], %[a], %[b] \n\t" \
  807. : [h] "+r" (vh), [l] "+r" (vl) \
  808. : [a] "r" (va), [b] "r" (vb) \
  809. : "memory", "cc" \
  810. )
  811. /* Multiply va by vb and store double size result in: vo | vh | vl */
  812. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  813. __asm__ __volatile__ ( \
  814. "mul x8, %[a], %[b] \n\t" \
  815. "umulh %[h], %[a], %[b] \n\t" \
  816. "mov %[l], x8 \n\t" \
  817. "mov %[o], xzr \n\t" \
  818. : [l] "+r" (vl), [h] "+r" (vh), [o] "=r" (vo) \
  819. : [a] "r" (va), [b] "r" (vb) \
  820. : "x8" \
  821. )
  822. /* Multiply va by vb and add double size result into: vo | vh | vl */
  823. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  824. __asm__ __volatile__ ( \
  825. "mul x8, %[a], %[b] \n\t" \
  826. "umulh x9, %[a], %[b] \n\t" \
  827. "adds %[l], %[l], x8 \n\t" \
  828. "adcs %[h], %[h], x9 \n\t" \
  829. "adc %[o], %[o], xzr \n\t" \
  830. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  831. : [a] "r" (va), [b] "r" (vb) \
  832. : "x8", "x9", "cc" \
  833. )
  834. /* Multiply va by vb and add double size result into: vh | vl */
  835. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  836. __asm__ __volatile__ ( \
  837. "mul x8, %[a], %[b] \n\t" \
  838. "umulh x9, %[a], %[b] \n\t" \
  839. "adds %[l], %[l], x8 \n\t" \
  840. "adc %[h], %[h], x9 \n\t" \
  841. : [l] "+r" (vl), [h] "+r" (vh) \
  842. : [a] "r" (va), [b] "r" (vb) \
  843. : "x8", "x9", "cc" \
  844. )
  845. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  846. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  847. __asm__ __volatile__ ( \
  848. "mul x8, %[a], %[b] \n\t" \
  849. "umulh x9, %[a], %[b] \n\t" \
  850. "adds %[l], %[l], x8 \n\t" \
  851. "adcs %[h], %[h], x9 \n\t" \
  852. "adc %[o], %[o], xzr \n\t" \
  853. "adds %[l], %[l], x8 \n\t" \
  854. "adcs %[h], %[h], x9 \n\t" \
  855. "adc %[o], %[o], xzr \n\t" \
  856. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  857. : [a] "r" (va), [b] "r" (vb) \
  858. : "x8", "x9", "cc" \
  859. )
  860. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  861. * Assumes first add will not overflow vh | vl
  862. */
  863. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  864. __asm__ __volatile__ ( \
  865. "mul x8, %[a], %[b] \n\t" \
  866. "umulh x9, %[a], %[b] \n\t" \
  867. "adds %[l], %[l], x8 \n\t" \
  868. "adc %[h], %[h], x9 \n\t" \
  869. "adds %[l], %[l], x8 \n\t" \
  870. "adcs %[h], %[h], x9 \n\t" \
  871. "adc %[o], %[o], xzr \n\t" \
  872. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  873. : [a] "r" (va), [b] "r" (vb) \
  874. : "x8", "x9", "cc" \
  875. )
  876. /* Square va and store double size result in: vh | vl */
  877. #define SP_ASM_SQR(vl, vh, va) \
  878. __asm__ __volatile__ ( \
  879. "mul %[l], %[a], %[a] \n\t" \
  880. "umulh %[h], %[a], %[a] \n\t" \
  881. : [h] "+r" (vh), [l] "+r" (vl) \
  882. : [a] "r" (va) \
  883. : "memory" \
  884. )
  885. /* Square va and add double size result into: vo | vh | vl */
  886. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  887. __asm__ __volatile__ ( \
  888. "mul x8, %[a], %[a] \n\t" \
  889. "umulh x9, %[a], %[a] \n\t" \
  890. "adds %[l], %[l], x8 \n\t" \
  891. "adcs %[h], %[h], x9 \n\t" \
  892. "adc %[o], %[o], xzr \n\t" \
  893. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  894. : [a] "r" (va) \
  895. : "x8", "x9", "cc" \
  896. )
  897. /* Square va and add double size result into: vh | vl */
  898. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  899. __asm__ __volatile__ ( \
  900. "mul x8, %[a], %[a] \n\t" \
  901. "umulh x9, %[a], %[a] \n\t" \
  902. "adds %[l], %[l], x8 \n\t" \
  903. "adc %[h], %[h], x9 \n\t" \
  904. : [l] "+r" (vl), [h] "+r" (vh) \
  905. : [a] "r" (va) \
  906. : "x8", "x9", "cc" \
  907. )
  908. /* Add va into: vh | vl */
  909. #define SP_ASM_ADDC(vl, vh, va) \
  910. __asm__ __volatile__ ( \
  911. "adds %[l], %[l], %[a] \n\t" \
  912. "adc %[h], %[h], xzr \n\t" \
  913. : [l] "+r" (vl), [h] "+r" (vh) \
  914. : [a] "r" (va) \
  915. : "cc" \
  916. )
  917. /* Sub va from: vh | vl */
  918. #define SP_ASM_SUBB(vl, vh, va) \
  919. __asm__ __volatile__ ( \
  920. "subs %[l], %[l], %[a] \n\t" \
  921. "sbc %[h], %[h], xzr \n\t" \
  922. : [l] "+r" (vl), [h] "+r" (vh) \
  923. : [a] "r" (va) \
  924. : "cc" \
  925. )
  926. /* Add two times vc | vb | va into vo | vh | vl */
  927. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  928. __asm__ __volatile__ ( \
  929. "adds %[l], %[l], %[a] \n\t" \
  930. "adcs %[h], %[h], %[b] \n\t" \
  931. "adc %[o], %[o], %[c] \n\t" \
  932. "adds %[l], %[l], %[a] \n\t" \
  933. "adcs %[h], %[h], %[b] \n\t" \
  934. "adc %[o], %[o], %[c] \n\t" \
  935. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  936. : [a] "r" (va), [b] "r" (vb), [c] "r" (vc) \
  937. : "cc" \
  938. )
  939. #ifndef WOLFSSL_SP_DIV_WORD_HALF
  940. /* Divide a two digit number by a digit number and return. (hi | lo) / d
  941. *
  942. * Using udiv instruction on Aarch64.
  943. * Constant time.
  944. *
  945. * @param [in] hi SP integer digit. High digit of the dividend.
  946. * @param [in] lo SP integer digit. Lower digit of the dividend.
  947. * @param [in] d SP integer digit. Number to divide by.
  948. * @return The division result.
  949. */
  950. static WC_INLINE sp_int_digit sp_div_word(sp_int_digit hi, sp_int_digit lo,
  951. sp_int_digit d)
  952. {
  953. __asm__ __volatile__ (
  954. "lsr x3, %[d], 48\n\t"
  955. "mov x5, 16\n\t"
  956. "cmp x3, 0\n\t"
  957. "mov x4, 63\n\t"
  958. "csel x3, x5, xzr, eq\n\t"
  959. "sub x4, x4, x3\n\t"
  960. "lsl %[d], %[d], x3\n\t"
  961. "lsl %[hi], %[hi], x3\n\t"
  962. "lsr x5, %[lo], x4\n\t"
  963. "lsl %[lo], %[lo], x3\n\t"
  964. "orr %[hi], %[hi], x5, lsr 1\n\t"
  965. "lsr x5, %[d], 32\n\t"
  966. "add x5, x5, 1\n\t"
  967. "udiv x3, %[hi], x5\n\t"
  968. "lsl x6, x3, 32\n\t"
  969. "mul x4, %[d], x6\n\t"
  970. "umulh x3, %[d], x6\n\t"
  971. "subs %[lo], %[lo], x4\n\t"
  972. "sbc %[hi], %[hi], x3\n\t"
  973. "udiv x3, %[hi], x5\n\t"
  974. "lsl x3, x3, 32\n\t"
  975. "add x6, x6, x3\n\t"
  976. "mul x4, %[d], x3\n\t"
  977. "umulh x3, %[d], x3\n\t"
  978. "subs %[lo], %[lo], x4\n\t"
  979. "sbc %[hi], %[hi], x3\n\t"
  980. "lsr x3, %[lo], 32\n\t"
  981. "orr x3, x3, %[hi], lsl 32\n\t"
  982. "udiv x3, x3, x5\n\t"
  983. "add x6, x6, x3\n\t"
  984. "mul x4, %[d], x3\n\t"
  985. "umulh x3, %[d], x3\n\t"
  986. "subs %[lo], %[lo], x4\n\t"
  987. "sbc %[hi], %[hi], x3\n\t"
  988. "lsr x3, %[lo], 32\n\t"
  989. "orr x3, x3, %[hi], lsl 32\n\t"
  990. "udiv x3, x3, x5\n\t"
  991. "add x6, x6, x3\n\t"
  992. "mul x4, %[d], x3\n\t"
  993. "sub %[lo], %[lo], x4\n\t"
  994. "udiv x3, %[lo], %[d]\n\t"
  995. "add %[hi], x6, x3\n\t"
  996. : [hi] "+r" (hi), [lo] "+r" (lo), [d] "+r" (d)
  997. :
  998. : "x3", "x4", "x5", "x6"
  999. );
  1000. return hi;
  1001. }
  1002. #define SP_ASM_DIV_WORD
  1003. #endif
  1004. #define SP_INT_ASM_AVAILABLE
  1005. #endif /* WOLFSSL_SP_ARM64 && SP_WORD_SIZE == 64 */
  1006. #if (defined(WOLFSSL_SP_ARM32) || defined(WOLFSSL_SP_ARM_CORTEX_M)) && \
  1007. SP_WORD_SIZE == 32
  1008. /*
  1009. * CPU: ARM32 or Cortex-M4 and similar
  1010. */
  1011. /* Multiply va by vb and store double size result in: vh | vl */
  1012. #define SP_ASM_MUL(vl, vh, va, vb) \
  1013. __asm__ __volatile__ ( \
  1014. "umull %[l], %[h], %[a], %[b] \n\t" \
  1015. : [h] "+r" (vh), [l] "+r" (vl) \
  1016. : [a] "r" (va), [b] "r" (vb) \
  1017. : "memory" \
  1018. )
  1019. /* Multiply va by vb and store double size result in: vo | vh | vl */
  1020. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  1021. __asm__ __volatile__ ( \
  1022. "umull %[l], %[h], %[a], %[b] \n\t" \
  1023. "mov %[o], #0 \n\t" \
  1024. : [l] "+r" (vl), [h] "+r" (vh), [o] "=r" (vo) \
  1025. : [a] "r" (va), [b] "r" (vb) \
  1026. : \
  1027. )
  1028. /* Multiply va by vb and add double size result into: vo | vh | vl */
  1029. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  1030. __asm__ __volatile__ ( \
  1031. "umull r8, r9, %[a], %[b] \n\t" \
  1032. "adds %[l], %[l], r8 \n\t" \
  1033. "adcs %[h], %[h], r9 \n\t" \
  1034. "adc %[o], %[o], #0 \n\t" \
  1035. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  1036. : [a] "r" (va), [b] "r" (vb) \
  1037. : "r8", "r9", "cc" \
  1038. )
  1039. /* Multiply va by vb and add double size result into: vh | vl */
  1040. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  1041. __asm__ __volatile__ ( \
  1042. "umlal %[l], %[h], %[a], %[b] \n\t" \
  1043. : [l] "+r" (vl), [h] "+r" (vh) \
  1044. : [a] "r" (va), [b] "r" (vb) \
  1045. : \
  1046. )
  1047. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  1048. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  1049. __asm__ __volatile__ ( \
  1050. "umull r8, r9, %[a], %[b] \n\t" \
  1051. "adds %[l], %[l], r8 \n\t" \
  1052. "adcs %[h], %[h], r9 \n\t" \
  1053. "adc %[o], %[o], #0 \n\t" \
  1054. "adds %[l], %[l], r8 \n\t" \
  1055. "adcs %[h], %[h], r9 \n\t" \
  1056. "adc %[o], %[o], #0 \n\t" \
  1057. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  1058. : [a] "r" (va), [b] "r" (vb) \
  1059. : "r8", "r9", "cc" \
  1060. )
  1061. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  1062. * Assumes first add will not overflow vh | vl
  1063. */
  1064. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  1065. __asm__ __volatile__ ( \
  1066. "umull r8, r9, %[a], %[b] \n\t" \
  1067. "adds %[l], %[l], r8 \n\t" \
  1068. "adc %[h], %[h], r9 \n\t" \
  1069. "adds %[l], %[l], r8 \n\t" \
  1070. "adcs %[h], %[h], r9 \n\t" \
  1071. "adc %[o], %[o], #0 \n\t" \
  1072. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  1073. : [a] "r" (va), [b] "r" (vb) \
  1074. : "r8", "r9", "cc" \
  1075. )
  1076. /* Square va and store double size result in: vh | vl */
  1077. #define SP_ASM_SQR(vl, vh, va) \
  1078. __asm__ __volatile__ ( \
  1079. "umull %[l], %[h], %[a], %[a] \n\t" \
  1080. : [h] "+r" (vh), [l] "+r" (vl) \
  1081. : [a] "r" (va) \
  1082. : "memory" \
  1083. )
  1084. /* Square va and add double size result into: vo | vh | vl */
  1085. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  1086. __asm__ __volatile__ ( \
  1087. "umull r8, r9, %[a], %[a] \n\t" \
  1088. "adds %[l], %[l], r8 \n\t" \
  1089. "adcs %[h], %[h], r9 \n\t" \
  1090. "adc %[o], %[o], #0 \n\t" \
  1091. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  1092. : [a] "r" (va) \
  1093. : "r8", "r9", "cc" \
  1094. )
  1095. /* Square va and add double size result into: vh | vl */
  1096. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  1097. __asm__ __volatile__ ( \
  1098. "umlal %[l], %[h], %[a], %[a] \n\t" \
  1099. : [l] "+r" (vl), [h] "+r" (vh) \
  1100. : [a] "r" (va) \
  1101. : "cc" \
  1102. )
  1103. /* Add va into: vh | vl */
  1104. #define SP_ASM_ADDC(vl, vh, va) \
  1105. __asm__ __volatile__ ( \
  1106. "adds %[l], %[l], %[a] \n\t" \
  1107. "adc %[h], %[h], #0 \n\t" \
  1108. : [l] "+r" (vl), [h] "+r" (vh) \
  1109. : [a] "r" (va) \
  1110. : "cc" \
  1111. )
  1112. /* Sub va from: vh | vl */
  1113. #define SP_ASM_SUBB(vl, vh, va) \
  1114. __asm__ __volatile__ ( \
  1115. "subs %[l], %[l], %[a] \n\t" \
  1116. "sbc %[h], %[h], #0 \n\t" \
  1117. : [l] "+r" (vl), [h] "+r" (vh) \
  1118. : [a] "r" (va) \
  1119. : "cc" \
  1120. )
  1121. /* Add two times vc | vb | va into vo | vh | vl */
  1122. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  1123. __asm__ __volatile__ ( \
  1124. "adds %[l], %[l], %[a] \n\t" \
  1125. "adcs %[h], %[h], %[b] \n\t" \
  1126. "adc %[o], %[o], %[c] \n\t" \
  1127. "adds %[l], %[l], %[a] \n\t" \
  1128. "adcs %[h], %[h], %[b] \n\t" \
  1129. "adc %[o], %[o], %[c] \n\t" \
  1130. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  1131. : [a] "r" (va), [b] "r" (vb), [c] "r" (vc) \
  1132. : "cc" \
  1133. )
  1134. #ifndef WOLFSSL_SP_DIV_WORD_HALF
  1135. #ifndef WOLFSSL_SP_ARM32_UDIV
  1136. /* Divide a two digit number by a digit number and return. (hi | lo) / d
  1137. *
  1138. * No division instruction used - does operation bit by bit.
  1139. * Constant time.
  1140. *
  1141. * @param [in] hi SP integer digit. High digit of the dividend.
  1142. * @param [in] lo SP integer digit. Lower digit of the dividend.
  1143. * @param [in] d SP integer digit. Number to divide by.
  1144. * @return The division result.
  1145. */
  1146. static WC_INLINE sp_int_digit sp_div_word(sp_int_digit hi, sp_int_digit lo,
  1147. sp_int_digit d)
  1148. {
  1149. sp_int_digit r = 0;
  1150. #if defined(WOLFSSL_SP_ARM_ARCH) && (WOLFSSL_SP_ARM_ARCH < 7)
  1151. static const char debruijn32[32] = {
  1152. 0, 31, 9, 30, 3, 8, 13, 29, 2, 5, 7, 21, 12, 24, 28, 19,
  1153. 1, 10, 4, 14, 6, 22, 25, 20, 11, 15, 23, 26, 16, 27, 17, 18
  1154. };
  1155. static const sp_uint32 debruijn32_mul = 0x076be629;
  1156. #endif
  1157. __asm__ __volatile__ (
  1158. /* Shift d so that top bit is set. */
  1159. #if defined(WOLFSSL_SP_ARM_ARCH) && (WOLFSSL_SP_ARM_ARCH < 7)
  1160. "ldr r4, %[m]\n\t"
  1161. "mov r5, %[d]\n\t"
  1162. "orr r5, r5, r5, lsr #1\n\t"
  1163. "orr r5, r5, r5, lsr #2\n\t"
  1164. "orr r5, r5, r5, lsr #4\n\t"
  1165. "orr r5, r5, r5, lsr #8\n\t"
  1166. "orr r5, r5, r5, lsr #16\n\t"
  1167. "add r5, r5, #1\n\t"
  1168. "mul r5, r5, r4\n\t"
  1169. "lsr r5, r5, #27\n\t"
  1170. "ldrb r5, [%[t], r5]\n\t"
  1171. #else
  1172. "clz r5, %[d]\n\t"
  1173. #endif
  1174. "rsb r6, r5, #31\n\t"
  1175. "lsl %[d], %[d], r5\n\t"
  1176. "lsl %[hi], %[hi], r5\n\t"
  1177. "lsr r9, %[lo], r6\n\t"
  1178. "lsl %[lo], %[lo], r5\n\t"
  1179. "orr %[hi], %[hi], r9, lsr #1\n\t"
  1180. "lsr r5, %[d], #1\n\t"
  1181. "add r5, r5, #1\n\t"
  1182. "mov r6, %[lo]\n\t"
  1183. "mov r9, %[hi]\n\t"
  1184. /* Do top 32 */
  1185. "subs r8, r5, r9\n\t"
  1186. "sbc r8, r8, r8\n\t"
  1187. "add %[r], %[r], %[r]\n\t"
  1188. "sub %[r], %[r], r8\n\t"
  1189. "and r8, r8, r5\n\t"
  1190. "subs r9, r9, r8\n\t"
  1191. /* Next 30 bits */
  1192. "mov r4, #29\n\t"
  1193. "\n1:\n\t"
  1194. "movs r6, r6, lsl #1\n\t"
  1195. "adc r9, r9, r9\n\t"
  1196. "subs r8, r5, r9\n\t"
  1197. "sbc r8, r8, r8\n\t"
  1198. "add %[r], %[r], %[r]\n\t"
  1199. "sub %[r], %[r], r8\n\t"
  1200. "and r8, r8, r5\n\t"
  1201. "subs r9, r9, r8\n\t"
  1202. "subs r4, r4, #1\n\t"
  1203. "bpl 1b\n\t"
  1204. "add %[r], %[r], %[r]\n\t"
  1205. "add %[r], %[r], #1\n\t"
  1206. /* Handle difference has hi word > 0. */
  1207. "umull r4, r5, %[r], %[d]\n\t"
  1208. "subs r4, %[lo], r4\n\t"
  1209. "sbc r5, %[hi], r5\n\t"
  1210. "add %[r], %[r], r5\n\t"
  1211. "umull r4, r5, %[r], %[d]\n\t"
  1212. "subs r4, %[lo], r4\n\t"
  1213. "sbc r5, %[hi], r5\n\t"
  1214. "add %[r], %[r], r5\n\t"
  1215. /* Add 1 to result if bottom half of difference is >= d. */
  1216. "mul r4, %[r], %[d]\n\t"
  1217. "subs r4, %[lo], r4\n\t"
  1218. "subs r9, %[d], r4\n\t"
  1219. "sbc r8, r8, r8\n\t"
  1220. "sub %[r], %[r], r8\n\t"
  1221. "subs r9, r9, #1\n\t"
  1222. "sbc r8, r8, r8\n\t"
  1223. "sub %[r], %[r], r8\n\t"
  1224. : [r] "+r" (r), [hi] "+r" (hi), [lo] "+r" (lo), [d] "+r" (d)
  1225. #if defined(WOLFSSL_SP_ARM_ARCH) && (WOLFSSL_SP_ARM_ARCH < 7)
  1226. : [t] "r" (debruijn32), [m] "m" (debruijn32_mul)
  1227. #else
  1228. :
  1229. #endif
  1230. : "r4", "r5", "r6", "r8", "r9"
  1231. );
  1232. return r;
  1233. }
  1234. #else
  1235. /* Divide a two digit number by a digit number and return. (hi | lo) / d
  1236. *
  1237. * Using udiv instruction on arm32
  1238. * Constant time.
  1239. *
  1240. * @param [in] hi SP integer digit. High digit of the dividend.
  1241. * @param [in] lo SP integer digit. Lower digit of the dividend.
  1242. * @param [in] d SP integer digit. Number to divide by.
  1243. * @return The division result.
  1244. */
  1245. static WC_INLINE sp_int_digit sp_div_word(sp_int_digit hi, sp_int_digit lo,
  1246. sp_int_digit d)
  1247. {
  1248. __asm__ __volatile__ (
  1249. "lsrs r3, %[d], #24\n\t"
  1250. "it eq\n\t"
  1251. "moveq r3, #8\n\t"
  1252. "it ne\n\t"
  1253. "movne r3, #0\n\t"
  1254. "rsb r4, r3, #31\n\t"
  1255. "lsl %[d], %[d], r3\n\t"
  1256. "lsl %[hi], %[hi], r3\n\t"
  1257. "lsr r5, %[lo], r4\n\t"
  1258. "lsl %[lo], %[lo], r3\n\t"
  1259. "orr %[hi], %[hi], r5, lsr #1\n\t"
  1260. "lsr r5, %[d], 16\n\t"
  1261. "add r5, r5, 1\n\t"
  1262. "udiv r3, %[hi], r5\n\t"
  1263. "lsl r6, r3, 16\n\t"
  1264. "umull r4, r3, %[d], r6\n\t"
  1265. "subs %[lo], %[lo], r4\n\t"
  1266. "sbc %[hi], %[hi], r3\n\t"
  1267. "udiv r3, %[hi], r5\n\t"
  1268. "lsl r3, r3, 16\n\t"
  1269. "add r6, r6, r3\n\t"
  1270. "umull r4, r3, %[d], r3\n\t"
  1271. "subs %[lo], %[lo], r4\n\t"
  1272. "sbc %[hi], %[hi], r3\n\t"
  1273. "lsr r3, %[lo], 16\n\t"
  1274. "orr r3, r3, %[hi], lsl 16\n\t"
  1275. "udiv r3, r3, r5\n\t"
  1276. "add r6, r6, r3\n\t"
  1277. "umull r4, r3, %[d], r3\n\t"
  1278. "subs %[lo], %[lo], r4\n\t"
  1279. "sbc %[hi], %[hi], r3\n\t"
  1280. "lsr r3, %[lo], 16\n\t"
  1281. "orr r3, r3, %[hi], lsl 16\n\t"
  1282. "udiv r3, r3, r5\n\t"
  1283. "add r6, r6, r3\n\t"
  1284. "mul r4, %[d], r3\n\t"
  1285. "sub %[lo], %[lo], r4\n\t"
  1286. "udiv r3, %[lo], %[d]\n\t"
  1287. "add %[hi], r6, r3\n\t"
  1288. : [hi] "+r" (hi), [lo] "+r" (lo), [d] "+r" (d)
  1289. :
  1290. : "r3", "r4", "r5", "r6"
  1291. );
  1292. return hi;
  1293. }
  1294. #endif
  1295. #define SP_ASM_DIV_WORD
  1296. #endif
  1297. #define SP_INT_ASM_AVAILABLE
  1298. #endif /* (WOLFSSL_SP_ARM32 || ARM_CORTEX_M) && SP_WORD_SIZE == 32 */
  1299. #if defined(WOLFSSL_SP_ARM_THUMB) && SP_WORD_SIZE == 32
  1300. /*
  1301. * CPU: ARM Thumb (like Cortex-M0)
  1302. */
  1303. /* Compile with -fomit-frame-pointer, or similar, if compiler complains about
  1304. * usage of register 'r7'.
  1305. */
  1306. #if defined(__clang__)
  1307. /* Multiply va by vb and store double size result in: vh | vl */
  1308. #define SP_ASM_MUL(vl, vh, va, vb) \
  1309. __asm__ __volatile__ ( \
  1310. /* al * bl */ \
  1311. "uxth r6, %[a] \n\t" \
  1312. "uxth %[l], %[b] \n\t" \
  1313. "muls %[l], r6 \n\t" \
  1314. /* al * bh */ \
  1315. "lsrs r4, %[b], #16 \n\t" \
  1316. "muls r6, r4 \n\t" \
  1317. "lsrs %[h], r6, #16 \n\t" \
  1318. "lsls r6, r6, #16 \n\t" \
  1319. "adds %[l], %[l], r6 \n\t" \
  1320. "movs r5, #0 \n\t" \
  1321. "adcs %[h], r5 \n\t" \
  1322. /* ah * bh */ \
  1323. "lsrs r6, %[a], #16 \n\t" \
  1324. "muls r4, r6 \n\t" \
  1325. "adds %[h], %[h], r4 \n\t" \
  1326. /* ah * bl */ \
  1327. "uxth r4, %[b] \n\t" \
  1328. "muls r6, r4 \n\t" \
  1329. "lsrs r4, r6, #16 \n\t" \
  1330. "lsls r6, r6, #16 \n\t" \
  1331. "adds %[l], %[l], r6 \n\t" \
  1332. "adcs %[h], r4 \n\t" \
  1333. : [h] "+l" (vh), [l] "+l" (vl) \
  1334. : [a] "l" (va), [b] "l" (vb) \
  1335. : "r4", "r5", "r6", "cc" \
  1336. )
  1337. /* Multiply va by vb and store double size result in: vo | vh | vl */
  1338. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  1339. __asm__ __volatile__ ( \
  1340. /* al * bl */ \
  1341. "uxth r6, %[a] \n\t" \
  1342. "uxth %[l], %[b] \n\t" \
  1343. "muls %[l], r6 \n\t" \
  1344. /* al * bh */ \
  1345. "lsrs r7, %[b], #16 \n\t" \
  1346. "muls r6, r7 \n\t" \
  1347. "lsrs %[h], r6, #16 \n\t" \
  1348. "lsls r6, r6, #16 \n\t" \
  1349. "adds %[l], %[l], r6 \n\t" \
  1350. "movs %[o], #0 \n\t" \
  1351. "adcs %[h], %[o] \n\t" \
  1352. /* ah * bh */ \
  1353. "lsrs r6, %[a], #16 \n\t" \
  1354. "muls r7, r6 \n\t" \
  1355. "adds %[h], %[h], r7 \n\t" \
  1356. /* ah * bl */ \
  1357. "uxth r7, %[b] \n\t" \
  1358. "muls r6, r7 \n\t" \
  1359. "lsrs r7, r6, #16 \n\t" \
  1360. "lsls r6, r6, #16 \n\t" \
  1361. "adds %[l], %[l], r6 \n\t" \
  1362. "adcs %[h], r7 \n\t" \
  1363. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  1364. : [a] "l" (va), [b] "l" (vb) \
  1365. : "r6", "r7", "cc" \
  1366. )
  1367. #ifndef WOLFSSL_SP_SMALL
  1368. /* Multiply va by vb and add double size result into: vo | vh | vl */
  1369. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  1370. __asm__ __volatile__ ( \
  1371. /* al * bl */ \
  1372. "uxth r6, %[a] \n\t" \
  1373. "uxth r7, %[b] \n\t" \
  1374. "muls r7, r6 \n\t" \
  1375. "adds %[l], %[l], r7 \n\t" \
  1376. "movs r5, #0 \n\t" \
  1377. "adcs %[h], r5 \n\t" \
  1378. "adcs %[o], r5 \n\t" \
  1379. /* al * bh */ \
  1380. "lsrs r7, %[b], #16 \n\t" \
  1381. "muls r6, r7 \n\t" \
  1382. "lsrs r7, r6, #16 \n\t" \
  1383. "lsls r6, r6, #16 \n\t" \
  1384. "adds %[l], %[l], r6 \n\t" \
  1385. "adcs %[h], r7 \n\t" \
  1386. "adcs %[o], r5 \n\t" \
  1387. /* ah * bh */ \
  1388. "lsrs r6, %[a], #16 \n\t" \
  1389. "lsrs r7, %[b], #16 \n\t" \
  1390. "muls r7, r6 \n\t" \
  1391. "adds %[h], %[h], r7 \n\t" \
  1392. "adcs %[o], r5 \n\t" \
  1393. /* ah * bl */ \
  1394. "uxth r7, %[b] \n\t" \
  1395. "muls r6, r7 \n\t" \
  1396. "lsrs r7, r6, #16 \n\t" \
  1397. "lsls r6, r6, #16 \n\t" \
  1398. "adds %[l], %[l], r6 \n\t" \
  1399. "adcs %[h], r7 \n\t" \
  1400. "adcs %[o], r5 \n\t" \
  1401. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  1402. : [a] "l" (va), [b] "l" (vb) \
  1403. : "r5", "r6", "r7", "cc" \
  1404. )
  1405. #else
  1406. /* Multiply va by vb and add double size result into: vo | vh | vl */
  1407. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  1408. __asm__ __volatile__ ( \
  1409. /* al * bl */ \
  1410. "uxth r6, %[a] \n\t" \
  1411. "uxth r5, %[b] \n\t" \
  1412. "muls r5, r6 \n\t" \
  1413. "adds %[l], %[l], r5 \n\t" \
  1414. "movs r5, #0 \n\t" \
  1415. "adcs %[h], r5 \n\t" \
  1416. "adcs %[o], r5 \n\t" \
  1417. /* al * bh */ \
  1418. "lsrs r5, %[b], #16 \n\t" \
  1419. "muls r6, r5 \n\t" \
  1420. "lsrs r5, r6, #16 \n\t" \
  1421. "lsls r6, r6, #16 \n\t" \
  1422. "adds %[l], %[l], r6 \n\t" \
  1423. "adcs %[h], r5 \n\t" \
  1424. "movs r5, #0 \n\t" \
  1425. "adcs %[o], r5 \n\t" \
  1426. /* ah * bh */ \
  1427. "lsrs r6, %[a], #16 \n\t" \
  1428. "lsrs r5, %[b], #16 \n\t" \
  1429. "muls r5, r6 \n\t" \
  1430. "adds %[h], %[h], r5 \n\t" \
  1431. "movs r5, #0 \n\t" \
  1432. "adcs %[o], r5 \n\t" \
  1433. /* ah * bl */ \
  1434. "uxth r5, %[b] \n\t" \
  1435. "muls r6, r5 \n\t" \
  1436. "lsrs r5, r6, #16 \n\t" \
  1437. "lsls r6, r6, #16 \n\t" \
  1438. "adds %[l], %[l], r6 \n\t" \
  1439. "adcs %[h], r5 \n\t" \
  1440. "movs r5, #0 \n\t" \
  1441. "adcs %[o], r5 \n\t" \
  1442. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  1443. : [a] "l" (va), [b] "l" (vb) \
  1444. : "r5", "r6", "cc" \
  1445. )
  1446. #endif
  1447. /* Multiply va by vb and add double size result into: vh | vl */
  1448. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  1449. __asm__ __volatile__ ( \
  1450. /* al * bl */ \
  1451. "uxth r6, %[a] \n\t" \
  1452. "uxth r4, %[b] \n\t" \
  1453. "muls r4, r6 \n\t" \
  1454. "adds %[l], %[l], r4 \n\t" \
  1455. "movs r5, #0 \n\t" \
  1456. "adcs %[h], r5 \n\t" \
  1457. /* al * bh */ \
  1458. "lsrs r4, %[b], #16 \n\t" \
  1459. "muls r6, r4 \n\t" \
  1460. "lsrs r4, r6, #16 \n\t" \
  1461. "lsls r6, r6, #16 \n\t" \
  1462. "adds %[l], %[l], r6 \n\t" \
  1463. "adcs %[h], r4 \n\t" \
  1464. /* ah * bh */ \
  1465. "lsrs r6, %[a], #16 \n\t" \
  1466. "lsrs r4, %[b], #16 \n\t" \
  1467. "muls r4, r6 \n\t" \
  1468. "adds %[h], %[h], r4 \n\t" \
  1469. /* ah * bl */ \
  1470. "uxth r4, %[b] \n\t" \
  1471. "muls r6, r4 \n\t" \
  1472. "lsrs r4, r6, #16 \n\t" \
  1473. "lsls r6, r6, #16 \n\t" \
  1474. "adds %[l], %[l], r6 \n\t" \
  1475. "adcs %[h], r4 \n\t" \
  1476. : [l] "+l" (vl), [h] "+l" (vh) \
  1477. : [a] "l" (va), [b] "l" (vb) \
  1478. : "r4", "r5", "r6", "cc" \
  1479. )
  1480. #ifndef WOLFSSL_SP_SMALL
  1481. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  1482. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  1483. __asm__ __volatile__ ( \
  1484. /* al * bl */ \
  1485. "uxth r6, %[a] \n\t" \
  1486. "uxth r7, %[b] \n\t" \
  1487. "muls r7, r6 \n\t" \
  1488. "adds %[l], %[l], r7 \n\t" \
  1489. "movs r5, #0 \n\t" \
  1490. "adcs %[h], r5 \n\t" \
  1491. "adcs %[o], r5 \n\t" \
  1492. "adds %[l], %[l], r7 \n\t" \
  1493. "adcs %[h], r5 \n\t" \
  1494. "adcs %[o], r5 \n\t" \
  1495. /* al * bh */ \
  1496. "lsrs r7, %[b], #16 \n\t" \
  1497. "muls r6, r7 \n\t" \
  1498. "lsrs r7, r6, #16 \n\t" \
  1499. "lsls r6, r6, #16 \n\t" \
  1500. "adds %[l], %[l], r6 \n\t" \
  1501. "adcs %[h], r7 \n\t" \
  1502. "adcs %[o], r5 \n\t" \
  1503. "adds %[l], %[l], r6 \n\t" \
  1504. "adcs %[h], r7 \n\t" \
  1505. "adcs %[o], r5 \n\t" \
  1506. /* ah * bh */ \
  1507. "lsrs r6, %[a], #16 \n\t" \
  1508. "lsrs r7, %[b], #16 \n\t" \
  1509. "muls r7, r6 \n\t" \
  1510. "adds %[h], %[h], r7 \n\t" \
  1511. "adcs %[o], r5 \n\t" \
  1512. "adds %[h], %[h], r7 \n\t" \
  1513. "adcs %[o], r5 \n\t" \
  1514. /* ah * bl */ \
  1515. "uxth r7, %[b] \n\t" \
  1516. "muls r6, r7 \n\t" \
  1517. "lsrs r7, r6, #16 \n\t" \
  1518. "lsls r6, r6, #16 \n\t" \
  1519. "adds %[l], %[l], r6 \n\t" \
  1520. "adcs %[h], r7 \n\t" \
  1521. "adcs %[o], r5 \n\t" \
  1522. "adds %[l], %[l], r6 \n\t" \
  1523. "adcs %[h], r7 \n\t" \
  1524. "adcs %[o], r5 \n\t" \
  1525. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  1526. : [a] "l" (va), [b] "l" (vb) \
  1527. : "r5", "r6", "r7", "cc" \
  1528. )
  1529. #else
  1530. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  1531. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  1532. __asm__ __volatile__ ( \
  1533. "movs r8, %[a] \n\t" \
  1534. /* al * bl */ \
  1535. "uxth r6, %[a] \n\t" \
  1536. "uxth r5, %[b] \n\t" \
  1537. "muls r5, r6 \n\t" \
  1538. "adds %[l], %[l], r5 \n\t" \
  1539. "movs %[a], #0 \n\t" \
  1540. "adcs %[h], %[a] \n\t" \
  1541. "adcs %[o], %[a] \n\t" \
  1542. "adds %[l], %[l], r5 \n\t" \
  1543. "adcs %[h], %[a] \n\t" \
  1544. "adcs %[o], %[a] \n\t" \
  1545. /* al * bh */ \
  1546. "lsrs r5, %[b], #16 \n\t" \
  1547. "muls r6, r5 \n\t" \
  1548. "lsrs r5, r6, #16 \n\t" \
  1549. "lsls r6, r6, #16 \n\t" \
  1550. "adds %[l], %[l], r6 \n\t" \
  1551. "adcs %[h], r5 \n\t" \
  1552. "adcs %[o], %[a] \n\t" \
  1553. "adds %[l], %[l], r6 \n\t" \
  1554. "adcs %[h], r5 \n\t" \
  1555. "adcs %[o], %[a] \n\t" \
  1556. /* ah * bh */ \
  1557. "movs %[a], r8 \n\t" \
  1558. "lsrs r6, %[a], #16 \n\t" \
  1559. "lsrs r5, %[b], #16 \n\t" \
  1560. "muls r5, r6 \n\t" \
  1561. "adds %[h], %[h], r5 \n\t" \
  1562. "movs %[a], #0 \n\t" \
  1563. "adcs %[o], %[a] \n\t" \
  1564. "adds %[h], %[h], r5 \n\t" \
  1565. "adcs %[o], %[a] \n\t" \
  1566. /* ah * bl */ \
  1567. "uxth r5, %[b] \n\t" \
  1568. "muls r6, r5 \n\t" \
  1569. "lsrs r5, r6, #16 \n\t" \
  1570. "lsls r6, r6, #16 \n\t" \
  1571. "adds %[l], %[l], r6 \n\t" \
  1572. "adcs %[h], r5 \n\t" \
  1573. "adcs %[o], %[a] \n\t" \
  1574. "adds %[l], %[l], r6 \n\t" \
  1575. "adcs %[h], r5 \n\t" \
  1576. "adcs %[o], %[a] \n\t" \
  1577. "movs %[a], r8 \n\t" \
  1578. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  1579. : [a] "l" (va), [b] "l" (vb) \
  1580. : "r5", "r6", "r8", "cc" \
  1581. )
  1582. #endif
  1583. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  1584. * Assumes first add will not overflow vh | vl
  1585. */
  1586. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  1587. __asm__ __volatile__ ( \
  1588. /* al * bl */ \
  1589. "uxth r6, %[a] \n\t" \
  1590. "uxth r7, %[b] \n\t" \
  1591. "muls r7, r6 \n\t" \
  1592. "adds %[l], %[l], r7 \n\t" \
  1593. "movs r5, #0 \n\t" \
  1594. "adcs %[h], r5 \n\t" \
  1595. "adds %[l], %[l], r7 \n\t" \
  1596. "adcs %[h], r5 \n\t" \
  1597. /* al * bh */ \
  1598. "lsrs r7, %[b], #16 \n\t" \
  1599. "muls r6, r7 \n\t" \
  1600. "lsrs r7, r6, #16 \n\t" \
  1601. "lsls r6, r6, #16 \n\t" \
  1602. "adds %[l], %[l], r6 \n\t" \
  1603. "adcs %[h], r7 \n\t" \
  1604. "adds %[l], %[l], r6 \n\t" \
  1605. "adcs %[h], r7 \n\t" \
  1606. "adcs %[o], r5 \n\t" \
  1607. /* ah * bh */ \
  1608. "lsrs r6, %[a], #16 \n\t" \
  1609. "lsrs r7, %[b], #16 \n\t" \
  1610. "muls r7, r6 \n\t" \
  1611. "adds %[h], %[h], r7 \n\t" \
  1612. "adcs %[o], r5 \n\t" \
  1613. "adds %[h], %[h], r7 \n\t" \
  1614. "adcs %[o], r5 \n\t" \
  1615. /* ah * bl */ \
  1616. "uxth r7, %[b] \n\t" \
  1617. "muls r6, r7 \n\t" \
  1618. "lsrs r7, r6, #16 \n\t" \
  1619. "lsls r6, r6, #16 \n\t" \
  1620. "adds %[l], %[l], r6 \n\t" \
  1621. "adcs %[h], r7 \n\t" \
  1622. "adcs %[o], r5 \n\t" \
  1623. "adds %[l], %[l], r6 \n\t" \
  1624. "adcs %[h], r7 \n\t" \
  1625. "adcs %[o], r5 \n\t" \
  1626. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  1627. : [a] "l" (va), [b] "l" (vb) \
  1628. : "r5", "r6", "r7", "cc" \
  1629. )
  1630. /* Square va and store double size result in: vh | vl */
  1631. #define SP_ASM_SQR(vl, vh, va) \
  1632. __asm__ __volatile__ ( \
  1633. "lsrs r5, %[a], #16 \n\t" \
  1634. "uxth r6, %[a] \n\t" \
  1635. "mov %[l], r6 \n\t" \
  1636. "mov %[h], r5 \n\t" \
  1637. /* al * al */ \
  1638. "muls %[l], %[l] \n\t" \
  1639. /* ah * ah */ \
  1640. "muls %[h], %[h] \n\t" \
  1641. /* 2 * al * ah */ \
  1642. "muls r6, r5 \n\t" \
  1643. "lsrs r5, r6, #15 \n\t" \
  1644. "lsls r6, r6, #17 \n\t" \
  1645. "adds %[l], %[l], r6 \n\t" \
  1646. "adcs %[h], r5 \n\t" \
  1647. : [h] "+l" (vh), [l] "+l" (vl) \
  1648. : [a] "l" (va) \
  1649. : "r5", "r6", "cc" \
  1650. )
  1651. /* Square va and add double size result into: vo | vh | vl */
  1652. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  1653. __asm__ __volatile__ ( \
  1654. "lsrs r4, %[a], #16 \n\t" \
  1655. "uxth r6, %[a] \n\t" \
  1656. /* al * al */ \
  1657. "muls r6, r6 \n\t" \
  1658. /* ah * ah */ \
  1659. "muls r4, r4 \n\t" \
  1660. "adds %[l], %[l], r6 \n\t" \
  1661. "adcs %[h], r4 \n\t" \
  1662. "movs r5, #0 \n\t" \
  1663. "adcs %[o], r5 \n\t" \
  1664. "lsrs r4, %[a], #16 \n\t" \
  1665. "uxth r6, %[a] \n\t" \
  1666. /* 2 * al * ah */ \
  1667. "muls r6, r4 \n\t" \
  1668. "lsrs r4, r6, #15 \n\t" \
  1669. "lsls r6, r6, #17 \n\t" \
  1670. "adds %[l], %[l], r6 \n\t" \
  1671. "adcs %[h], r4 \n\t" \
  1672. "adcs %[o], r5 \n\t" \
  1673. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  1674. : [a] "l" (va) \
  1675. : "r4", "r5", "r6", "cc" \
  1676. )
  1677. /* Square va and add double size result into: vh | vl */
  1678. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  1679. __asm__ __volatile__ ( \
  1680. "lsrs r7, %[a], #16 \n\t" \
  1681. "uxth r6, %[a] \n\t" \
  1682. /* al * al */ \
  1683. "muls r6, r6 \n\t" \
  1684. /* ah * ah */ \
  1685. "muls r7, r7 \n\t" \
  1686. "adds %[l], %[l], r6 \n\t" \
  1687. "adcs %[h], r7 \n\t" \
  1688. "lsrs r7, %[a], #16 \n\t" \
  1689. "uxth r6, %[a] \n\t" \
  1690. /* 2 * al * ah */ \
  1691. "muls r6, r7 \n\t" \
  1692. "lsrs r7, r6, #15 \n\t" \
  1693. "lsls r6, r6, #17 \n\t" \
  1694. "adds %[l], %[l], r6 \n\t" \
  1695. "adcs %[h], r7 \n\t" \
  1696. : [l] "+l" (vl), [h] "+l" (vh) \
  1697. : [a] "l" (va) \
  1698. : "r6", "r7", "cc" \
  1699. )
  1700. /* Add va into: vh | vl */
  1701. #define SP_ASM_ADDC(vl, vh, va) \
  1702. __asm__ __volatile__ ( \
  1703. "adds %[l], %[l], %[a] \n\t" \
  1704. "movs r5, #0 \n\t" \
  1705. "adcs %[h], r5 \n\t" \
  1706. : [l] "+l" (vl), [h] "+l" (vh) \
  1707. : [a] "l" (va) \
  1708. : "r5", "cc" \
  1709. )
  1710. /* Sub va from: vh | vl */
  1711. #define SP_ASM_SUBB(vl, vh, va) \
  1712. __asm__ __volatile__ ( \
  1713. "subs %[l], %[l], %[a] \n\t" \
  1714. "movs r5, #0 \n\t" \
  1715. "sbcs %[h], r5 \n\t" \
  1716. : [l] "+l" (vl), [h] "+l" (vh) \
  1717. : [a] "l" (va) \
  1718. : "r5", "cc" \
  1719. )
  1720. /* Add two times vc | vb | va into vo | vh | vl */
  1721. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  1722. __asm__ __volatile__ ( \
  1723. "adds %[l], %[l], %[a] \n\t" \
  1724. "adcs %[h], %[b] \n\t" \
  1725. "adcs %[o], %[c] \n\t" \
  1726. "adds %[l], %[l], %[a] \n\t" \
  1727. "adcs %[h], %[b] \n\t" \
  1728. "adcs %[o], %[c] \n\t" \
  1729. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  1730. : [a] "l" (va), [b] "l" (vb), [c] "l" (vc) \
  1731. : "cc" \
  1732. )
  1733. #elif defined(WOLFSSL_KEIL)
  1734. /* Multiply va by vb and store double size result in: vh | vl */
  1735. #define SP_ASM_MUL(vl, vh, va, vb) \
  1736. __asm__ __volatile__ ( \
  1737. /* al * bl */ \
  1738. "uxth r6, %[a] \n\t" \
  1739. "uxth %[l], %[b] \n\t" \
  1740. "muls %[l], r6, %[l] \n\t" \
  1741. /* al * bh */ \
  1742. "lsrs r4, %[b], #16 \n\t" \
  1743. "muls r6, r4, r6 \n\t" \
  1744. "lsrs %[h], r6, #16 \n\t" \
  1745. "lsls r6, r6, #16 \n\t" \
  1746. "adds %[l], %[l], r6 \n\t" \
  1747. "movs r5, #0 \n\t" \
  1748. "adcs %[h], %[h], r5 \n\t" \
  1749. /* ah * bh */ \
  1750. "lsrs r6, %[a], #16 \n\t" \
  1751. "muls r4, r6, r4 \n\t" \
  1752. "adds %[h], %[h], r4 \n\t" \
  1753. /* ah * bl */ \
  1754. "uxth r4, %[b] \n\t" \
  1755. "muls r6, r4, r6 \n\t" \
  1756. "lsrs r4, r6, #16 \n\t" \
  1757. "lsls r6, r6, #16 \n\t" \
  1758. "adds %[l], %[l], r6 \n\t" \
  1759. "adcs %[h], %[h], r4 \n\t" \
  1760. : [h] "+l" (vh), [l] "+l" (vl) \
  1761. : [a] "l" (va), [b] "l" (vb) \
  1762. : "r4", "r5", "r6", "cc" \
  1763. )
  1764. /* Multiply va by vb and store double size result in: vo | vh | vl */
  1765. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  1766. __asm__ __volatile__ ( \
  1767. /* al * bl */ \
  1768. "uxth r6, %[a] \n\t" \
  1769. "uxth %[l], %[b] \n\t" \
  1770. "muls %[l], r6, %[l] \n\t" \
  1771. /* al * bh */ \
  1772. "lsrs r7, %[b], #16 \n\t" \
  1773. "muls r6, r7, r6 \n\t" \
  1774. "lsrs %[h], r6, #16 \n\t" \
  1775. "lsls r6, r6, #16 \n\t" \
  1776. "adds %[l], %[l], r6 \n\t" \
  1777. "movs %[o], #0 \n\t" \
  1778. "adcs %[h], %[h], %[o] \n\t" \
  1779. /* ah * bh */ \
  1780. "lsrs r6, %[a], #16 \n\t" \
  1781. "muls r7, r6, r7 \n\t" \
  1782. "adds %[h], %[h], r7 \n\t" \
  1783. /* ah * bl */ \
  1784. "uxth r7, %[b] \n\t" \
  1785. "muls r6, r7, r6 \n\t" \
  1786. "lsrs r7, r6, #16 \n\t" \
  1787. "lsls r6, r6, #16 \n\t" \
  1788. "adds %[l], %[l], r6 \n\t" \
  1789. "adcs %[h], %[h], r7 \n\t" \
  1790. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  1791. : [a] "l" (va), [b] "l" (vb) \
  1792. : "r6", "r7", "cc" \
  1793. )
  1794. #ifndef WOLFSSL_SP_SMALL
  1795. /* Multiply va by vb and add double size result into: vo | vh | vl */
  1796. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  1797. __asm__ __volatile__ ( \
  1798. /* al * bl */ \
  1799. "uxth r6, %[a] \n\t" \
  1800. "uxth r7, %[b] \n\t" \
  1801. "muls r7, r6, r7 \n\t" \
  1802. "adds %[l], %[l], r7 \n\t" \
  1803. "movs r5, #0 \n\t" \
  1804. "adcs %[h], %[h], r5 \n\t" \
  1805. "adcs %[o], %[o], r5 \n\t" \
  1806. /* al * bh */ \
  1807. "lsrs r7, %[b], #16 \n\t" \
  1808. "muls r6, r7, r6 \n\t" \
  1809. "lsrs r7, r6, #16 \n\t" \
  1810. "lsls r6, r6, #16 \n\t" \
  1811. "adds %[l], %[l], r6 \n\t" \
  1812. "adcs %[h], %[h], r7 \n\t" \
  1813. "adcs %[o], %[o], r5 \n\t" \
  1814. /* ah * bh */ \
  1815. "lsrs r6, %[a], #16 \n\t" \
  1816. "lsrs r7, %[b], #16 \n\t" \
  1817. "muls r7, r6, r7 \n\t" \
  1818. "adds %[h], %[h], r7 \n\t" \
  1819. "adcs %[o], %[o], r5 \n\t" \
  1820. /* ah * bl */ \
  1821. "uxth r7, %[b] \n\t" \
  1822. "muls r6, r7, r6 \n\t" \
  1823. "lsrs r7, r6, #16 \n\t" \
  1824. "lsls r6, r6, #16 \n\t" \
  1825. "adds %[l], %[l], r6 \n\t" \
  1826. "adcs %[h], %[h], r7 \n\t" \
  1827. "adcs %[o], %[o], r5 \n\t" \
  1828. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  1829. : [a] "l" (va), [b] "l" (vb) \
  1830. : "r5", "r6", "r7", "cc" \
  1831. )
  1832. #else
  1833. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  1834. __asm__ __volatile__ ( \
  1835. /* al * bl */ \
  1836. "uxth r6, %[a] \n\t" \
  1837. "uxth r5, %[b] \n\t" \
  1838. "muls r5, r6, r5 \n\t" \
  1839. "adds %[l], %[l], r5 \n\t" \
  1840. "movs r5, #0 \n\t" \
  1841. "adcs %[h], %[h], r5 \n\t" \
  1842. "adcs %[o], %[o], r5 \n\t" \
  1843. /* al * bh */ \
  1844. "lsrs r5, %[b], #16 \n\t" \
  1845. "muls r6, r5, r6 \n\t" \
  1846. "lsrs r5, r6, #16 \n\t" \
  1847. "lsls r6, r6, #16 \n\t" \
  1848. "adds %[l], %[l], r6 \n\t" \
  1849. "adcs %[h], %[h], r5 \n\t" \
  1850. "movs r5, #0 \n\t" \
  1851. "adcs %[o], %[o], r5 \n\t" \
  1852. /* ah * bh */ \
  1853. "lsrs r6, %[a], #16 \n\t" \
  1854. "lsrs r5, %[b], #16 \n\t" \
  1855. "muls r5, r6, r5 \n\t" \
  1856. "adds %[h], %[h], r5 \n\t" \
  1857. "movs r5, #0 \n\t" \
  1858. "adcs %[o], %[o], r5 \n\t" \
  1859. /* ah * bl */ \
  1860. "uxth r5, %[b] \n\t" \
  1861. "muls r6, r5, r6 \n\t" \
  1862. "lsrs r5, r6, #16 \n\t" \
  1863. "lsls r6, r6, #16 \n\t" \
  1864. "adds %[l], %[l], r6 \n\t" \
  1865. "adcs %[h], %[h], r5 \n\t" \
  1866. "movs r5, #0 \n\t" \
  1867. "adcs %[o], %[o], r5 \n\t" \
  1868. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  1869. : [a] "l" (va), [b] "l" (vb) \
  1870. : "r5", "r6", "cc" \
  1871. )
  1872. #endif
  1873. /* Multiply va by vb and add double size result into: vh | vl */
  1874. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  1875. __asm__ __volatile__ ( \
  1876. /* al * bl */ \
  1877. "uxth r6, %[a] \n\t" \
  1878. "uxth r4, %[b] \n\t" \
  1879. "muls r4, r6, r4 \n\t" \
  1880. "adds %[l], %[l], r4 \n\t" \
  1881. "movs r5, #0 \n\t" \
  1882. "adcs %[h], %[h], r5 \n\t" \
  1883. /* al * bh */ \
  1884. "lsrs r4, %[b], #16 \n\t" \
  1885. "muls r6, r4, r6 \n\t" \
  1886. "lsrs r4, r6, #16 \n\t" \
  1887. "lsls r6, r6, #16 \n\t" \
  1888. "adds %[l], %[l], r6 \n\t" \
  1889. "adcs %[h], %[h], r4 \n\t" \
  1890. /* ah * bh */ \
  1891. "lsrs r6, %[a], #16 \n\t" \
  1892. "lsrs r4, %[b], #16 \n\t" \
  1893. "muls r4, r6, r4 \n\t" \
  1894. "adds %[h], %[h], r4 \n\t" \
  1895. /* ah * bl */ \
  1896. "uxth r4, %[b] \n\t" \
  1897. "muls r6, r4, r6 \n\t" \
  1898. "lsrs r4, r6, #16 \n\t" \
  1899. "lsls r6, r6, #16 \n\t" \
  1900. "adds %[l], %[l], r6 \n\t" \
  1901. "adcs %[h], %[h], r4 \n\t" \
  1902. : [l] "+l" (vl), [h] "+l" (vh) \
  1903. : [a] "l" (va), [b] "l" (vb) \
  1904. : "r4", "r5", "r6", "cc" \
  1905. )
  1906. #ifndef WOLFSSL_SP_SMALL
  1907. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  1908. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  1909. __asm__ __volatile__ ( \
  1910. /* al * bl */ \
  1911. "uxth r6, %[a] \n\t" \
  1912. "uxth r7, %[b] \n\t" \
  1913. "muls r7, r6, r7 \n\t" \
  1914. "adds %[l], %[l], r7 \n\t" \
  1915. "movs r5, #0 \n\t" \
  1916. "adcs %[h], %[h], r5 \n\t" \
  1917. "adcs %[o], %[o], r5 \n\t" \
  1918. "adds %[l], %[l], r7 \n\t" \
  1919. "adcs %[h], %[h], r5 \n\t" \
  1920. "adcs %[o], %[o], r5 \n\t" \
  1921. /* al * bh */ \
  1922. "lsrs r7, %[b], #16 \n\t" \
  1923. "muls r6, r7, r6 \n\t" \
  1924. "lsrs r7, r6, #16 \n\t" \
  1925. "lsls r6, r6, #16 \n\t" \
  1926. "adds %[l], %[l], r6 \n\t" \
  1927. "adcs %[h], %[h], r7 \n\t" \
  1928. "adcs %[o], %[o], r5 \n\t" \
  1929. "adds %[l], %[l], r6 \n\t" \
  1930. "adcs %[h], %[h], r7 \n\t" \
  1931. "adcs %[o], %[o], r5 \n\t" \
  1932. /* ah * bh */ \
  1933. "lsrs r6, %[a], #16 \n\t" \
  1934. "lsrs r7, %[b], #16 \n\t" \
  1935. "muls r7, r6, r7 \n\t" \
  1936. "adds %[h], %[h], r7 \n\t" \
  1937. "adcs %[o], %[o], r5 \n\t" \
  1938. "adds %[h], %[h], r7 \n\t" \
  1939. "adcs %[o], %[o], r5 \n\t" \
  1940. /* ah * bl */ \
  1941. "uxth r7, %[b] \n\t" \
  1942. "muls r6, r7, r6 \n\t" \
  1943. "lsrs r7, r6, #16 \n\t" \
  1944. "lsls r6, r6, #16 \n\t" \
  1945. "adds %[l], %[l], r6 \n\t" \
  1946. "adcs %[h], %[h], r7 \n\t" \
  1947. "adcs %[o], %[o], r5 \n\t" \
  1948. "adds %[l], %[l], r6 \n\t" \
  1949. "adcs %[h], %[h], r7 \n\t" \
  1950. "adcs %[o], %[o], r5 \n\t" \
  1951. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  1952. : [a] "l" (va), [b] "l" (vb) \
  1953. : "r5", "r6", "r7", "cc" \
  1954. )
  1955. #else
  1956. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  1957. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  1958. __asm__ __volatile__ ( \
  1959. "movs r8, %[a] \n\t" \
  1960. /* al * bl */ \
  1961. "uxth r6, %[a] \n\t" \
  1962. "uxth r5, %[b] \n\t" \
  1963. "muls r5, r6, r5 \n\t" \
  1964. "adds %[l], %[l], r5 \n\t" \
  1965. "movs %[a], #0 \n\t" \
  1966. "adcs %[h], %[h], %[a] \n\t" \
  1967. "adcs %[o], %[o], %[a] \n\t" \
  1968. "adds %[l], %[l], r5 \n\t" \
  1969. "adcs %[h], %[h], %[a] \n\t" \
  1970. "adcs %[o], %[o], %[a] \n\t" \
  1971. /* al * bh */ \
  1972. "lsrs r5, %[b], #16 \n\t" \
  1973. "muls r6, r5, r6 \n\t" \
  1974. "lsrs r5, r6, #16 \n\t" \
  1975. "lsls r6, r6, #16 \n\t" \
  1976. "adds %[l], %[l], r6 \n\t" \
  1977. "adcs %[h], %[h], r5 \n\t" \
  1978. "adcs %[o], %[o], %[a] \n\t" \
  1979. "adds %[l], %[l], r6 \n\t" \
  1980. "adcs %[h], %[h], r5 \n\t" \
  1981. "adcs %[o], %[o], %[a] \n\t" \
  1982. /* ah * bh */ \
  1983. "movs %[a], r8 \n\t" \
  1984. "lsrs r6, %[a], #16 \n\t" \
  1985. "lsrs r5, %[b], #16 \n\t" \
  1986. "muls r5, r6, r5 \n\t" \
  1987. "adds %[h], %[h], r5 \n\t" \
  1988. "movs %[a], #0 \n\t" \
  1989. "adcs %[o], %[o], %[a] \n\t" \
  1990. "adds %[h], %[h], r5 \n\t" \
  1991. "adcs %[o], %[o], %[a] \n\t" \
  1992. /* ah * bl */ \
  1993. "uxth r5, %[b] \n\t" \
  1994. "muls r6, r5, r6 \n\t" \
  1995. "lsrs r5, r6, #16 \n\t" \
  1996. "lsls r6, r6, #16 \n\t" \
  1997. "adds %[l], %[l], r6 \n\t" \
  1998. "adcs %[h], %[h], r5 \n\t" \
  1999. "adcs %[o], %[o], %[a] \n\t" \
  2000. "adds %[l], %[l], r6 \n\t" \
  2001. "adcs %[h], %[h], r5 \n\t" \
  2002. "adcs %[o], %[o], %[a] \n\t" \
  2003. "movs %[a], r8 \n\t" \
  2004. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  2005. : [a] "l" (va), [b] "l" (vb) \
  2006. : "r5", "r6", "r8", "cc" \
  2007. )
  2008. #endif
  2009. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  2010. * Assumes first add will not overflow vh | vl
  2011. */
  2012. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  2013. __asm__ __volatile__ ( \
  2014. /* al * bl */ \
  2015. "uxth r6, %[a] \n\t" \
  2016. "uxth r7, %[b] \n\t" \
  2017. "muls r7, r6, r7 \n\t" \
  2018. "adds %[l], %[l], r7 \n\t" \
  2019. "movs r5, #0 \n\t" \
  2020. "adcs %[h], %[h], r5 \n\t" \
  2021. "adds %[l], %[l], r7 \n\t" \
  2022. "adcs %[h], %[h], r5 \n\t" \
  2023. /* al * bh */ \
  2024. "lsrs r7, %[b], #16 \n\t" \
  2025. "muls r6, r7, r6 \n\t" \
  2026. "lsrs r7, r6, #16 \n\t" \
  2027. "lsls r6, r6, #16 \n\t" \
  2028. "adds %[l], %[l], r6 \n\t" \
  2029. "adcs %[h], %[h], r7 \n\t" \
  2030. "adds %[l], %[l], r6 \n\t" \
  2031. "adcs %[h], %[h], r7 \n\t" \
  2032. "adcs %[o], %[o], r5 \n\t" \
  2033. /* ah * bh */ \
  2034. "lsrs r6, %[a], #16 \n\t" \
  2035. "lsrs r7, %[b], #16 \n\t" \
  2036. "muls r7, r6, r7 \n\t" \
  2037. "adds %[h], %[h], r7 \n\t" \
  2038. "adcs %[o], %[o], r5 \n\t" \
  2039. "adds %[h], %[h], r7 \n\t" \
  2040. "adcs %[o], %[o], r5 \n\t" \
  2041. /* ah * bl */ \
  2042. "uxth r7, %[b] \n\t" \
  2043. "muls r6, r7, r6 \n\t" \
  2044. "lsrs r7, r6, #16 \n\t" \
  2045. "lsls r6, r6, #16 \n\t" \
  2046. "adds %[l], %[l], r6 \n\t" \
  2047. "adcs %[h], %[h], r7 \n\t" \
  2048. "adcs %[o], %[o], r5 \n\t" \
  2049. "adds %[l], %[l], r6 \n\t" \
  2050. "adcs %[h], %[h], r7 \n\t" \
  2051. "adcs %[o], %[o], r5 \n\t" \
  2052. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  2053. : [a] "l" (va), [b] "l" (vb) \
  2054. : "r5", "r6", "r7", "cc" \
  2055. )
  2056. /* Square va and store double size result in: vh | vl */
  2057. #define SP_ASM_SQR(vl, vh, va) \
  2058. __asm__ __volatile__ ( \
  2059. "lsrs r5, %[a], #16 \n\t" \
  2060. "uxth r6, %[a] \n\t" \
  2061. "mov %[l], r6 \n\t" \
  2062. "mov %[h], r5 \n\t" \
  2063. /* al * al */ \
  2064. "muls %[l], %[l], %[l] \n\t" \
  2065. /* ah * ah */ \
  2066. "muls %[h], %[h], %[h] \n\t" \
  2067. /* 2 * al * ah */ \
  2068. "muls r6, r5, r6 \n\t" \
  2069. "lsrs r5, r6, #15 \n\t" \
  2070. "lsls r6, r6, #17 \n\t" \
  2071. "adds %[l], %[l], r6 \n\t" \
  2072. "adcs %[h], %[h], r5 \n\t" \
  2073. : [h] "+l" (vh), [l] "+l" (vl) \
  2074. : [a] "l" (va) \
  2075. : "r5", "r6", "cc" \
  2076. )
  2077. /* Square va and add double size result into: vo | vh | vl */
  2078. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  2079. __asm__ __volatile__ ( \
  2080. "lsrs r4, %[a], #16 \n\t" \
  2081. "uxth r6, %[a] \n\t" \
  2082. /* al * al */ \
  2083. "muls r6, r6, r6 \n\t" \
  2084. /* ah * ah */ \
  2085. "muls r4, r4, r4 \n\t" \
  2086. "adds %[l], %[l], r6 \n\t" \
  2087. "adcs %[h], %[h], r4 \n\t" \
  2088. "movs r5, #0 \n\t" \
  2089. "adcs %[o], %[o], r5 \n\t" \
  2090. "lsrs r4, %[a], #16 \n\t" \
  2091. "uxth r6, %[a] \n\t" \
  2092. /* 2 * al * ah */ \
  2093. "muls r6, r4, r6 \n\t" \
  2094. "lsrs r4, r6, #15 \n\t" \
  2095. "lsls r6, r6, #17 \n\t" \
  2096. "adds %[l], %[l], r6 \n\t" \
  2097. "adcs %[h], %[h], r4 \n\t" \
  2098. "adcs %[o], %[o], r5 \n\t" \
  2099. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  2100. : [a] "l" (va) \
  2101. : "r4", "r5", "r6", "cc" \
  2102. )
  2103. /* Square va and add double size result into: vh | vl */
  2104. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  2105. __asm__ __volatile__ ( \
  2106. "lsrs r7, %[a], #16 \n\t" \
  2107. "uxth r6, %[a] \n\t" \
  2108. /* al * al */ \
  2109. "muls r6, r6, r6 \n\t" \
  2110. /* ah * ah */ \
  2111. "muls r7, r7, r7 \n\t" \
  2112. "adds %[l], %[l], r6 \n\t" \
  2113. "adcs %[h], %[h], r7 \n\t" \
  2114. "lsrs r7, %[a], #16 \n\t" \
  2115. "uxth r6, %[a] \n\t" \
  2116. /* 2 * al * ah */ \
  2117. "muls r6, r7, r6 \n\t" \
  2118. "lsrs r7, r6, #15 \n\t" \
  2119. "lsls r6, r6, #17 \n\t" \
  2120. "adds %[l], %[l], r6 \n\t" \
  2121. "adcs %[h], %[h], r7 \n\t" \
  2122. : [l] "+l" (vl), [h] "+l" (vh) \
  2123. : [a] "l" (va) \
  2124. : "r6", "r7", "cc" \
  2125. )
  2126. /* Add va into: vh | vl */
  2127. #define SP_ASM_ADDC(vl, vh, va) \
  2128. __asm__ __volatile__ ( \
  2129. "adds %[l], %[l], %[a] \n\t" \
  2130. "movs r5, #0 \n\t" \
  2131. "adcs %[h], %[h], r5 \n\t" \
  2132. : [l] "+l" (vl), [h] "+l" (vh) \
  2133. : [a] "l" (va) \
  2134. : "r5", "cc" \
  2135. )
  2136. /* Sub va from: vh | vl */
  2137. #define SP_ASM_SUBB(vl, vh, va) \
  2138. __asm__ __volatile__ ( \
  2139. "subs %[l], %[l], %[a] \n\t" \
  2140. "movs r5, #0 \n\t" \
  2141. "sbcs %[h], %[h], r5 \n\t" \
  2142. : [l] "+l" (vl), [h] "+l" (vh) \
  2143. : [a] "l" (va) \
  2144. : "r5", "cc" \
  2145. )
  2146. /* Add two times vc | vb | va into vo | vh | vl */
  2147. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  2148. __asm__ __volatile__ ( \
  2149. "adds %[l], %[l], %[a] \n\t" \
  2150. "adcs %[h], %[h], %[b] \n\t" \
  2151. "adcs %[o], %[o], %[c] \n\t" \
  2152. "adds %[l], %[l], %[a] \n\t" \
  2153. "adcs %[h], %[h], %[b] \n\t" \
  2154. "adcs %[o], %[o], %[c] \n\t" \
  2155. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  2156. : [a] "l" (va), [b] "l" (vb), [c] "l" (vc) \
  2157. : "cc" \
  2158. )
  2159. #elif defined(__GNUC__)
  2160. /* Multiply va by vb and store double size result in: vh | vl */
  2161. #define SP_ASM_MUL(vl, vh, va, vb) \
  2162. __asm__ __volatile__ ( \
  2163. /* al * bl */ \
  2164. "uxth r6, %[a] \n\t" \
  2165. "uxth %[l], %[b] \n\t" \
  2166. "mul %[l], r6 \n\t" \
  2167. /* al * bh */ \
  2168. "lsr r4, %[b], #16 \n\t" \
  2169. "mul r6, r4 \n\t" \
  2170. "lsr %[h], r6, #16 \n\t" \
  2171. "lsl r6, r6, #16 \n\t" \
  2172. "add %[l], %[l], r6 \n\t" \
  2173. "mov r5, #0 \n\t" \
  2174. "adc %[h], r5 \n\t" \
  2175. /* ah * bh */ \
  2176. "lsr r6, %[a], #16 \n\t" \
  2177. "mul r4, r6 \n\t" \
  2178. "add %[h], %[h], r4 \n\t" \
  2179. /* ah * bl */ \
  2180. "uxth r4, %[b] \n\t" \
  2181. "mul r6, r4 \n\t" \
  2182. "lsr r4, r6, #16 \n\t" \
  2183. "lsl r6, r6, #16 \n\t" \
  2184. "add %[l], %[l], r6 \n\t" \
  2185. "adc %[h], r4 \n\t" \
  2186. : [h] "+l" (vh), [l] "+l" (vl) \
  2187. : [a] "l" (va), [b] "l" (vb) \
  2188. : "r4", "r5", "r6", "cc" \
  2189. )
  2190. /* Multiply va by vb and store double size result in: vo | vh | vl */
  2191. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  2192. __asm__ __volatile__ ( \
  2193. /* al * bl */ \
  2194. "uxth r6, %[a] \n\t" \
  2195. "uxth %[l], %[b] \n\t" \
  2196. "mul %[l], r6 \n\t" \
  2197. /* al * bh */ \
  2198. "lsr r7, %[b], #16 \n\t" \
  2199. "mul r6, r7 \n\t" \
  2200. "lsr %[h], r6, #16 \n\t" \
  2201. "lsl r6, r6, #16 \n\t" \
  2202. "add %[l], %[l], r6 \n\t" \
  2203. "mov %[o], #0 \n\t" \
  2204. "adc %[h], %[o] \n\t" \
  2205. /* ah * bh */ \
  2206. "lsr r6, %[a], #16 \n\t" \
  2207. "mul r7, r6 \n\t" \
  2208. "add %[h], %[h], r7 \n\t" \
  2209. /* ah * bl */ \
  2210. "uxth r7, %[b] \n\t" \
  2211. "mul r6, r7 \n\t" \
  2212. "lsr r7, r6, #16 \n\t" \
  2213. "lsl r6, r6, #16 \n\t" \
  2214. "add %[l], %[l], r6 \n\t" \
  2215. "adc %[h], r7 \n\t" \
  2216. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  2217. : [a] "l" (va), [b] "l" (vb) \
  2218. : "r6", "r7", "cc" \
  2219. )
  2220. #ifndef WOLFSSL_SP_SMALL
  2221. /* Multiply va by vb and add double size result into: vo | vh | vl */
  2222. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  2223. __asm__ __volatile__ ( \
  2224. /* al * bl */ \
  2225. "uxth r6, %[a] \n\t" \
  2226. "uxth r7, %[b] \n\t" \
  2227. "mul r7, r6 \n\t" \
  2228. "add %[l], %[l], r7 \n\t" \
  2229. "mov r5, #0 \n\t" \
  2230. "adc %[h], r5 \n\t" \
  2231. "adc %[o], r5 \n\t" \
  2232. /* al * bh */ \
  2233. "lsr r7, %[b], #16 \n\t" \
  2234. "mul r6, r7 \n\t" \
  2235. "lsr r7, r6, #16 \n\t" \
  2236. "lsl r6, r6, #16 \n\t" \
  2237. "add %[l], %[l], r6 \n\t" \
  2238. "adc %[h], r7 \n\t" \
  2239. "adc %[o], r5 \n\t" \
  2240. /* ah * bh */ \
  2241. "lsr r6, %[a], #16 \n\t" \
  2242. "lsr r7, %[b], #16 \n\t" \
  2243. "mul r7, r6 \n\t" \
  2244. "add %[h], %[h], r7 \n\t" \
  2245. "adc %[o], r5 \n\t" \
  2246. /* ah * bl */ \
  2247. "uxth r7, %[b] \n\t" \
  2248. "mul r6, r7 \n\t" \
  2249. "lsr r7, r6, #16 \n\t" \
  2250. "lsl r6, r6, #16 \n\t" \
  2251. "add %[l], %[l], r6 \n\t" \
  2252. "adc %[h], r7 \n\t" \
  2253. "adc %[o], r5 \n\t" \
  2254. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  2255. : [a] "l" (va), [b] "l" (vb) \
  2256. : "r5", "r6", "r7", "cc" \
  2257. )
  2258. #else
  2259. /* Multiply va by vb and add double size result into: vo | vh | vl */
  2260. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  2261. __asm__ __volatile__ ( \
  2262. /* al * bl */ \
  2263. "uxth r6, %[a] \n\t" \
  2264. "uxth r5, %[b] \n\t" \
  2265. "mul r5, r6 \n\t" \
  2266. "add %[l], %[l], r5 \n\t" \
  2267. "mov r5, #0 \n\t" \
  2268. "adc %[h], r5 \n\t" \
  2269. "adc %[o], r5 \n\t" \
  2270. /* al * bh */ \
  2271. "lsr r5, %[b], #16 \n\t" \
  2272. "mul r6, r5 \n\t" \
  2273. "lsr r5, r6, #16 \n\t" \
  2274. "lsl r6, r6, #16 \n\t" \
  2275. "add %[l], %[l], r6 \n\t" \
  2276. "adc %[h], r5 \n\t" \
  2277. "mov r5, #0 \n\t" \
  2278. "adc %[o], r5 \n\t" \
  2279. /* ah * bh */ \
  2280. "lsr r6, %[a], #16 \n\t" \
  2281. "lsr r5, %[b], #16 \n\t" \
  2282. "mul r5, r6 \n\t" \
  2283. "add %[h], %[h], r5 \n\t" \
  2284. "mov r5, #0 \n\t" \
  2285. "adc %[o], r5 \n\t" \
  2286. /* ah * bl */ \
  2287. "uxth r5, %[b] \n\t" \
  2288. "mul r6, r5 \n\t" \
  2289. "lsr r5, r6, #16 \n\t" \
  2290. "lsl r6, r6, #16 \n\t" \
  2291. "add %[l], %[l], r6 \n\t" \
  2292. "adc %[h], r5 \n\t" \
  2293. "mov r5, #0 \n\t" \
  2294. "adc %[o], r5 \n\t" \
  2295. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  2296. : [a] "l" (va), [b] "l" (vb) \
  2297. : "r5", "r6", "cc" \
  2298. )
  2299. #endif
  2300. /* Multiply va by vb and add double size result into: vh | vl */
  2301. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  2302. __asm__ __volatile__ ( \
  2303. /* al * bl */ \
  2304. "uxth r6, %[a] \n\t" \
  2305. "uxth r4, %[b] \n\t" \
  2306. "mul r4, r6 \n\t" \
  2307. "add %[l], %[l], r4 \n\t" \
  2308. "mov r5, #0 \n\t" \
  2309. "adc %[h], r5 \n\t" \
  2310. /* al * bh */ \
  2311. "lsr r4, %[b], #16 \n\t" \
  2312. "mul r6, r4 \n\t" \
  2313. "lsr r4, r6, #16 \n\t" \
  2314. "lsl r6, r6, #16 \n\t" \
  2315. "add %[l], %[l], r6 \n\t" \
  2316. "adc %[h], r4 \n\t" \
  2317. /* ah * bh */ \
  2318. "lsr r6, %[a], #16 \n\t" \
  2319. "lsr r4, %[b], #16 \n\t" \
  2320. "mul r4, r6 \n\t" \
  2321. "add %[h], %[h], r4 \n\t" \
  2322. /* ah * bl */ \
  2323. "uxth r4, %[b] \n\t" \
  2324. "mul r6, r4 \n\t" \
  2325. "lsr r4, r6, #16 \n\t" \
  2326. "lsl r6, r6, #16 \n\t" \
  2327. "add %[l], %[l], r6 \n\t" \
  2328. "adc %[h], r4 \n\t" \
  2329. : [l] "+l" (vl), [h] "+l" (vh) \
  2330. : [a] "l" (va), [b] "l" (vb) \
  2331. : "r4", "r5", "r6", "cc" \
  2332. )
  2333. #ifndef WOLFSSL_SP_SMALL
  2334. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  2335. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  2336. __asm__ __volatile__ ( \
  2337. /* al * bl */ \
  2338. "uxth r6, %[a] \n\t" \
  2339. "uxth r7, %[b] \n\t" \
  2340. "mul r7, r6 \n\t" \
  2341. "add %[l], %[l], r7 \n\t" \
  2342. "mov r5, #0 \n\t" \
  2343. "adc %[h], r5 \n\t" \
  2344. "adc %[o], r5 \n\t" \
  2345. "add %[l], %[l], r7 \n\t" \
  2346. "adc %[h], r5 \n\t" \
  2347. "adc %[o], r5 \n\t" \
  2348. /* al * bh */ \
  2349. "lsr r7, %[b], #16 \n\t" \
  2350. "mul r6, r7 \n\t" \
  2351. "lsr r7, r6, #16 \n\t" \
  2352. "lsl r6, r6, #16 \n\t" \
  2353. "add %[l], %[l], r6 \n\t" \
  2354. "adc %[h], r7 \n\t" \
  2355. "adc %[o], r5 \n\t" \
  2356. "add %[l], %[l], r6 \n\t" \
  2357. "adc %[h], r7 \n\t" \
  2358. "adc %[o], r5 \n\t" \
  2359. /* ah * bh */ \
  2360. "lsr r6, %[a], #16 \n\t" \
  2361. "lsr r7, %[b], #16 \n\t" \
  2362. "mul r7, r6 \n\t" \
  2363. "add %[h], %[h], r7 \n\t" \
  2364. "adc %[o], r5 \n\t" \
  2365. "add %[h], %[h], r7 \n\t" \
  2366. "adc %[o], r5 \n\t" \
  2367. /* ah * bl */ \
  2368. "uxth r7, %[b] \n\t" \
  2369. "mul r6, r7 \n\t" \
  2370. "lsr r7, r6, #16 \n\t" \
  2371. "lsl r6, r6, #16 \n\t" \
  2372. "add %[l], %[l], r6 \n\t" \
  2373. "adc %[h], r7 \n\t" \
  2374. "adc %[o], r5 \n\t" \
  2375. "add %[l], %[l], r6 \n\t" \
  2376. "adc %[h], r7 \n\t" \
  2377. "adc %[o], r5 \n\t" \
  2378. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  2379. : [a] "l" (va), [b] "l" (vb) \
  2380. : "r5", "r6", "r7", "cc" \
  2381. )
  2382. #else
  2383. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  2384. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  2385. __asm__ __volatile__ ( \
  2386. "mov r8, %[a] \n\t" \
  2387. /* al * bl */ \
  2388. "uxth r6, %[a] \n\t" \
  2389. "uxth r5, %[b] \n\t" \
  2390. "mul r5, r6 \n\t" \
  2391. "add %[l], %[l], r5 \n\t" \
  2392. "mov %[a], #0 \n\t" \
  2393. "adc %[h], %[a] \n\t" \
  2394. "adc %[o], %[a] \n\t" \
  2395. "add %[l], %[l], r5 \n\t" \
  2396. "adc %[h], %[a] \n\t" \
  2397. "adc %[o], %[a] \n\t" \
  2398. /* al * bh */ \
  2399. "lsr r5, %[b], #16 \n\t" \
  2400. "mul r6, r5 \n\t" \
  2401. "lsr r5, r6, #16 \n\t" \
  2402. "lsl r6, r6, #16 \n\t" \
  2403. "add %[l], %[l], r6 \n\t" \
  2404. "adc %[h], r5 \n\t" \
  2405. "adc %[o], %[a] \n\t" \
  2406. "add %[l], %[l], r6 \n\t" \
  2407. "adc %[h], r5 \n\t" \
  2408. "adc %[o], %[a] \n\t" \
  2409. /* ah * bh */ \
  2410. "mov %[a], r8 \n\t" \
  2411. "lsr r6, %[a], #16 \n\t" \
  2412. "lsr r5, %[b], #16 \n\t" \
  2413. "mul r5, r6 \n\t" \
  2414. "add %[h], %[h], r5 \n\t" \
  2415. "mov %[a], #0 \n\t" \
  2416. "adc %[o], %[a] \n\t" \
  2417. "add %[h], %[h], r5 \n\t" \
  2418. "adc %[o], %[a] \n\t" \
  2419. /* ah * bl */ \
  2420. "uxth r5, %[b] \n\t" \
  2421. "mul r6, r5 \n\t" \
  2422. "lsr r5, r6, #16 \n\t" \
  2423. "lsl r6, r6, #16 \n\t" \
  2424. "add %[l], %[l], r6 \n\t" \
  2425. "adc %[h], r5 \n\t" \
  2426. "adc %[o], %[a] \n\t" \
  2427. "add %[l], %[l], r6 \n\t" \
  2428. "adc %[h], r5 \n\t" \
  2429. "adc %[o], %[a] \n\t" \
  2430. "mov %[a], r8 \n\t" \
  2431. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  2432. : [a] "l" (va), [b] "l" (vb) \
  2433. : "r5", "r6", "r8", "cc" \
  2434. )
  2435. #endif
  2436. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  2437. * Assumes first add will not overflow vh | vl
  2438. */
  2439. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  2440. __asm__ __volatile__ ( \
  2441. /* al * bl */ \
  2442. "uxth r6, %[a] \n\t" \
  2443. "uxth r7, %[b] \n\t" \
  2444. "mul r7, r6 \n\t" \
  2445. "add %[l], %[l], r7 \n\t" \
  2446. "mov r5, #0 \n\t" \
  2447. "adc %[h], r5 \n\t" \
  2448. "add %[l], %[l], r7 \n\t" \
  2449. "adc %[h], r5 \n\t" \
  2450. /* al * bh */ \
  2451. "lsr r7, %[b], #16 \n\t" \
  2452. "mul r6, r7 \n\t" \
  2453. "lsr r7, r6, #16 \n\t" \
  2454. "lsl r6, r6, #16 \n\t" \
  2455. "add %[l], %[l], r6 \n\t" \
  2456. "adc %[h], r7 \n\t" \
  2457. "add %[l], %[l], r6 \n\t" \
  2458. "adc %[h], r7 \n\t" \
  2459. "adc %[o], r5 \n\t" \
  2460. /* ah * bh */ \
  2461. "lsr r6, %[a], #16 \n\t" \
  2462. "lsr r7, %[b], #16 \n\t" \
  2463. "mul r7, r6 \n\t" \
  2464. "add %[h], %[h], r7 \n\t" \
  2465. "adc %[o], r5 \n\t" \
  2466. "add %[h], %[h], r7 \n\t" \
  2467. "adc %[o], r5 \n\t" \
  2468. /* ah * bl */ \
  2469. "uxth r7, %[b] \n\t" \
  2470. "mul r6, r7 \n\t" \
  2471. "lsr r7, r6, #16 \n\t" \
  2472. "lsl r6, r6, #16 \n\t" \
  2473. "add %[l], %[l], r6 \n\t" \
  2474. "adc %[h], r7 \n\t" \
  2475. "adc %[o], r5 \n\t" \
  2476. "add %[l], %[l], r6 \n\t" \
  2477. "adc %[h], r7 \n\t" \
  2478. "adc %[o], r5 \n\t" \
  2479. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  2480. : [a] "l" (va), [b] "l" (vb) \
  2481. : "r5", "r6", "r7", "cc" \
  2482. )
  2483. /* Square va and store double size result in: vh | vl */
  2484. #define SP_ASM_SQR(vl, vh, va) \
  2485. __asm__ __volatile__ ( \
  2486. "lsr r5, %[a], #16 \n\t" \
  2487. "uxth r6, %[a] \n\t" \
  2488. "mov %[l], r6 \n\t" \
  2489. "mov %[h], r5 \n\t" \
  2490. /* al * al */ \
  2491. "mul %[l], %[l] \n\t" \
  2492. /* ah * ah */ \
  2493. "mul %[h], %[h] \n\t" \
  2494. /* 2 * al * ah */ \
  2495. "mul r6, r5 \n\t" \
  2496. "lsr r5, r6, #15 \n\t" \
  2497. "lsl r6, r6, #17 \n\t" \
  2498. "add %[l], %[l], r6 \n\t" \
  2499. "adc %[h], r5 \n\t" \
  2500. : [h] "+l" (vh), [l] "+l" (vl) \
  2501. : [a] "l" (va) \
  2502. : "r5", "r6", "cc" \
  2503. )
  2504. /* Square va and add double size result into: vo | vh | vl */
  2505. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  2506. __asm__ __volatile__ ( \
  2507. "lsr r4, %[a], #16 \n\t" \
  2508. "uxth r6, %[a] \n\t" \
  2509. /* al * al */ \
  2510. "mul r6, r6 \n\t" \
  2511. /* ah * ah */ \
  2512. "mul r4, r4 \n\t" \
  2513. "add %[l], %[l], r6 \n\t" \
  2514. "adc %[h], r4 \n\t" \
  2515. "mov r5, #0 \n\t" \
  2516. "adc %[o], r5 \n\t" \
  2517. "lsr r4, %[a], #16 \n\t" \
  2518. "uxth r6, %[a] \n\t" \
  2519. /* 2 * al * ah */ \
  2520. "mul r6, r4 \n\t" \
  2521. "lsr r4, r6, #15 \n\t" \
  2522. "lsl r6, r6, #17 \n\t" \
  2523. "add %[l], %[l], r6 \n\t" \
  2524. "adc %[h], r4 \n\t" \
  2525. "adc %[o], r5 \n\t" \
  2526. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  2527. : [a] "l" (va) \
  2528. : "r4", "r5", "r6", "cc" \
  2529. )
  2530. /* Square va and add double size result into: vh | vl */
  2531. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  2532. __asm__ __volatile__ ( \
  2533. "lsr r7, %[a], #16 \n\t" \
  2534. "uxth r6, %[a] \n\t" \
  2535. /* al * al */ \
  2536. "mul r6, r6 \n\t" \
  2537. /* ah * ah */ \
  2538. "mul r7, r7 \n\t" \
  2539. "add %[l], %[l], r6 \n\t" \
  2540. "adc %[h], r7 \n\t" \
  2541. "lsr r7, %[a], #16 \n\t" \
  2542. "uxth r6, %[a] \n\t" \
  2543. /* 2 * al * ah */ \
  2544. "mul r6, r7 \n\t" \
  2545. "lsr r7, r6, #15 \n\t" \
  2546. "lsl r6, r6, #17 \n\t" \
  2547. "add %[l], %[l], r6 \n\t" \
  2548. "adc %[h], r7 \n\t" \
  2549. : [l] "+l" (vl), [h] "+l" (vh) \
  2550. : [a] "l" (va) \
  2551. : "r6", "r7", "cc" \
  2552. )
  2553. /* Add va into: vh | vl */
  2554. #define SP_ASM_ADDC(vl, vh, va) \
  2555. __asm__ __volatile__ ( \
  2556. "add %[l], %[l], %[a] \n\t" \
  2557. "mov r5, #0 \n\t" \
  2558. "adc %[h], r5 \n\t" \
  2559. : [l] "+l" (vl), [h] "+l" (vh) \
  2560. : [a] "l" (va) \
  2561. : "r5", "cc" \
  2562. )
  2563. /* Sub va from: vh | vl */
  2564. #define SP_ASM_SUBB(vl, vh, va) \
  2565. __asm__ __volatile__ ( \
  2566. "sub %[l], %[l], %[a] \n\t" \
  2567. "mov r5, #0 \n\t" \
  2568. "sbc %[h], r5 \n\t" \
  2569. : [l] "+l" (vl), [h] "+l" (vh) \
  2570. : [a] "l" (va) \
  2571. : "r5", "cc" \
  2572. )
  2573. /* Add two times vc | vb | va into vo | vh | vl */
  2574. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  2575. __asm__ __volatile__ ( \
  2576. "add %[l], %[l], %[a] \n\t" \
  2577. "adc %[h], %[b] \n\t" \
  2578. "adc %[o], %[c] \n\t" \
  2579. "add %[l], %[l], %[a] \n\t" \
  2580. "adc %[h], %[b] \n\t" \
  2581. "adc %[o], %[c] \n\t" \
  2582. : [l] "+l" (vl), [h] "+l" (vh), [o] "+l" (vo) \
  2583. : [a] "l" (va), [b] "l" (vb), [c] "l" (vc) \
  2584. : "cc" \
  2585. )
  2586. #endif
  2587. #ifdef WOLFSSL_SP_DIV_WORD_HALF
  2588. /* Divide a two digit number by a digit number and return. (hi | lo) / d
  2589. *
  2590. * No division instruction used - does operation bit by bit.
  2591. * Constant time.
  2592. *
  2593. * @param [in] hi SP integer digit. High digit of the dividend.
  2594. * @param [in] lo SP integer digit. Lower digit of the dividend.
  2595. * @param [in] d SP integer digit. Number to divide by.
  2596. * @return The division result.
  2597. */
  2598. static WC_INLINE sp_int_digit sp_div_word(sp_int_digit hi, sp_int_digit lo,
  2599. sp_int_digit d)
  2600. {
  2601. __asm__ __volatile__ (
  2602. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2603. "lsrs r3, %[d], #24\n\t"
  2604. #else
  2605. "lsr r3, %[d], #24\n\t"
  2606. #endif
  2607. "beq 2%=f\n\t"
  2608. "\n1%=:\n\t"
  2609. "movs r3, #0\n\t"
  2610. "b 3%=f\n\t"
  2611. "\n2%=:\n\t"
  2612. "mov r3, #8\n\t"
  2613. "\n3%=:\n\t"
  2614. "movs r4, #31\n\t"
  2615. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2616. "subs r4, r4, r3\n\t"
  2617. #else
  2618. "sub r4, r4, r3\n\t"
  2619. #endif
  2620. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2621. "lsls %[d], %[d], r3\n\t"
  2622. #else
  2623. "lsl %[d], %[d], r3\n\t"
  2624. #endif
  2625. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2626. "lsls %[hi], %[hi], r3\n\t"
  2627. #else
  2628. "lsl %[hi], %[hi], r3\n\t"
  2629. #endif
  2630. "mov r5, %[lo]\n\t"
  2631. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2632. "lsrs r5, r5, r4\n\t"
  2633. #else
  2634. "lsr r5, r5, r4\n\t"
  2635. #endif
  2636. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2637. "lsls %[lo], %[lo], r3\n\t"
  2638. #else
  2639. "lsl %[lo], %[lo], r3\n\t"
  2640. #endif
  2641. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2642. "lsrs r5, r5, #1\n\t"
  2643. #else
  2644. "lsr r5, r5, #1\n\t"
  2645. #endif
  2646. #if defined(WOLFSSL_KEIL)
  2647. "orrs %[hi], %[hi], r5\n\t"
  2648. #elif defined(__clang__)
  2649. "orrs %[hi], r5\n\t"
  2650. #else
  2651. "orr %[hi], r5\n\t"
  2652. #endif
  2653. "movs r3, #0\n\t"
  2654. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2655. "lsrs r5, %[d], #1\n\t"
  2656. #else
  2657. "lsr r5, %[d], #1\n\t"
  2658. #endif
  2659. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2660. "adds r5, r5, #1\n\t"
  2661. #else
  2662. "add r5, r5, #1\n\t"
  2663. #endif
  2664. "mov r8, %[lo]\n\t"
  2665. "mov r9, %[hi]\n\t"
  2666. /* Do top 32 */
  2667. "movs r6, r5\n\t"
  2668. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2669. "subs r6, r6, %[hi]\n\t"
  2670. #else
  2671. "sub r6, r6, %[hi]\n\t"
  2672. #endif
  2673. #ifdef WOLFSSL_KEIL
  2674. "sbcs r6, r6, r6\n\t"
  2675. #elif defined(__clang__)
  2676. "sbcs r6, r6\n\t"
  2677. #else
  2678. "sbc r6, r6\n\t"
  2679. #endif
  2680. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2681. "adds r3, r3, r3\n\t"
  2682. #else
  2683. "add r3, r3, r3\n\t"
  2684. #endif
  2685. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2686. "subs r3, r3, r6\n\t"
  2687. #else
  2688. "sub r3, r3, r6\n\t"
  2689. #endif
  2690. #ifdef WOLFSSL_KEIL
  2691. "ands r6, r6, r5\n\t"
  2692. #elif defined(__clang__)
  2693. "ands r6, r5\n\t"
  2694. #else
  2695. "and r6, r5\n\t"
  2696. #endif
  2697. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2698. "subs %[hi], %[hi], r6\n\t"
  2699. #else
  2700. "sub %[hi], %[hi], r6\n\t"
  2701. #endif
  2702. "movs r4, #29\n\t"
  2703. "\n"
  2704. "L_sp_div_word_loop%=:\n\t"
  2705. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2706. "lsls %[lo], %[lo], #1\n\t"
  2707. #else
  2708. "lsl %[lo], %[lo], #1\n\t"
  2709. #endif
  2710. #ifdef WOLFSSL_KEIL
  2711. "adcs %[hi], %[hi], %[hi]\n\t"
  2712. #elif defined(__clang__)
  2713. "adcs %[hi], %[hi]\n\t"
  2714. #else
  2715. "adc %[hi], %[hi]\n\t"
  2716. #endif
  2717. "movs r6, r5\n\t"
  2718. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2719. "subs r6, r6, %[hi]\n\t"
  2720. #else
  2721. "sub r6, r6, %[hi]\n\t"
  2722. #endif
  2723. #ifdef WOLFSSL_KEIL
  2724. "sbcs r6, r6, r6\n\t"
  2725. #elif defined(__clang__)
  2726. "sbcs r6, r6\n\t"
  2727. #else
  2728. "sbc r6, r6\n\t"
  2729. #endif
  2730. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2731. "adds r3, r3, r3\n\t"
  2732. #else
  2733. "add r3, r3, r3\n\t"
  2734. #endif
  2735. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2736. "subs r3, r3, r6\n\t"
  2737. #else
  2738. "sub r3, r3, r6\n\t"
  2739. #endif
  2740. #ifdef WOLFSSL_KEIL
  2741. "ands r6, r6, r5\n\t"
  2742. #elif defined(__clang__)
  2743. "ands r6, r5\n\t"
  2744. #else
  2745. "and r6, r5\n\t"
  2746. #endif
  2747. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2748. "subs %[hi], %[hi], r6\n\t"
  2749. #else
  2750. "sub %[hi], %[hi], r6\n\t"
  2751. #endif
  2752. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2753. "subs r4, r4, #1\n\t"
  2754. #else
  2755. "sub r4, r4, #1\n\t"
  2756. #endif
  2757. "bpl L_sp_div_word_loop%=\n\t"
  2758. "movs r7, #0\n\t"
  2759. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2760. "adds r3, r3, r3\n\t"
  2761. #else
  2762. "add r3, r3, r3\n\t"
  2763. #endif
  2764. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2765. "adds r3, r3, #1\n\t"
  2766. #else
  2767. "add r3, r3, #1\n\t"
  2768. #endif
  2769. /* r * d - Start */
  2770. "uxth %[hi], r3\n\t"
  2771. "uxth r4, %[d]\n\t"
  2772. #ifdef WOLFSSL_KEIL
  2773. "muls r4, %[hi], r4\n\t"
  2774. #elif defined(__clang__)
  2775. "muls r4, %[hi]\n\t"
  2776. #else
  2777. "mul r4, %[hi]\n\t"
  2778. #endif
  2779. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2780. "lsrs r6, %[d], #16\n\t"
  2781. #else
  2782. "lsr r6, %[d], #16\n\t"
  2783. #endif
  2784. #ifdef WOLFSSL_KEIL
  2785. "muls %[hi], r6, %[hi]\n\t"
  2786. #elif defined(__clang__)
  2787. "muls %[hi], r6\n\t"
  2788. #else
  2789. "mul %[hi], r6\n\t"
  2790. #endif
  2791. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2792. "lsrs r5, %[hi], #16\n\t"
  2793. #else
  2794. "lsr r5, %[hi], #16\n\t"
  2795. #endif
  2796. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2797. "lsls %[hi], %[hi], #16\n\t"
  2798. #else
  2799. "lsl %[hi], %[hi], #16\n\t"
  2800. #endif
  2801. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2802. "adds r4, r4, %[hi]\n\t"
  2803. #else
  2804. "add r4, r4, %[hi]\n\t"
  2805. #endif
  2806. #ifdef WOLFSSL_KEIL
  2807. "adcs r5, r5, r7\n\t"
  2808. #elif defined(__clang__)
  2809. "adcs r5, r7\n\t"
  2810. #else
  2811. "adc r5, r7\n\t"
  2812. #endif
  2813. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2814. "lsrs %[hi], r3, #16\n\t"
  2815. #else
  2816. "lsr %[hi], r3, #16\n\t"
  2817. #endif
  2818. #ifdef WOLFSSL_KEIL
  2819. "muls r6, %[hi], r6\n\t"
  2820. #elif defined(__clang__)
  2821. "muls r6, %[hi]\n\t"
  2822. #else
  2823. "mul r6, %[hi]\n\t"
  2824. #endif
  2825. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2826. "adds r5, r5, r6\n\t"
  2827. #else
  2828. "add r5, r5, r6\n\t"
  2829. #endif
  2830. "uxth r6, %[d]\n\t"
  2831. #ifdef WOLFSSL_KEIL
  2832. "muls %[hi], r6, %[hi]\n\t"
  2833. #elif defined(__clang__)
  2834. "muls %[hi], r6\n\t"
  2835. #else
  2836. "mul %[hi], r6\n\t"
  2837. #endif
  2838. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2839. "lsrs r6, %[hi], #16\n\t"
  2840. #else
  2841. "lsr r6, %[hi], #16\n\t"
  2842. #endif
  2843. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2844. "lsls %[hi], %[hi], #16\n\t"
  2845. #else
  2846. "lsl %[hi], %[hi], #16\n\t"
  2847. #endif
  2848. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2849. "adds r4, r4, %[hi]\n\t"
  2850. #else
  2851. "add r4, r4, %[hi]\n\t"
  2852. #endif
  2853. #ifdef WOLFSSL_KEIL
  2854. "adcs r5, r5, r6\n\t"
  2855. #elif defined(__clang__)
  2856. "adcs r5, r6\n\t"
  2857. #else
  2858. "adc r5, r6\n\t"
  2859. #endif
  2860. /* r * d - Done */
  2861. "mov %[hi], r8\n\t"
  2862. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2863. "subs %[hi], %[hi], r4\n\t"
  2864. #else
  2865. "sub %[hi], %[hi], r4\n\t"
  2866. #endif
  2867. "movs r4, %[hi]\n\t"
  2868. "mov %[hi], r9\n\t"
  2869. #ifdef WOLFSSL_KEIL
  2870. "sbcs %[hi], %[hi], r5\n\t"
  2871. #elif defined(__clang__)
  2872. "sbcs %[hi], r5\n\t"
  2873. #else
  2874. "sbc %[hi], r5\n\t"
  2875. #endif
  2876. "movs r5, %[hi]\n\t"
  2877. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2878. "adds r3, r3, r5\n\t"
  2879. #else
  2880. "add r3, r3, r5\n\t"
  2881. #endif
  2882. /* r * d - Start */
  2883. "uxth %[hi], r3\n\t"
  2884. "uxth r4, %[d]\n\t"
  2885. #ifdef WOLFSSL_KEIL
  2886. "muls r4, %[hi], r4\n\t"
  2887. #elif defined(__clang__)
  2888. "muls r4, %[hi]\n\t"
  2889. #else
  2890. "mul r4, %[hi]\n\t"
  2891. #endif
  2892. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2893. "lsrs r6, %[d], #16\n\t"
  2894. #else
  2895. "lsr r6, %[d], #16\n\t"
  2896. #endif
  2897. #ifdef WOLFSSL_KEIL
  2898. "muls %[hi], r6, %[hi]\n\t"
  2899. #elif defined(__clang__)
  2900. "muls %[hi], r6\n\t"
  2901. #else
  2902. "mul %[hi], r6\n\t"
  2903. #endif
  2904. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2905. "lsrs r5, %[hi], #16\n\t"
  2906. #else
  2907. "lsr r5, %[hi], #16\n\t"
  2908. #endif
  2909. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2910. "lsls %[hi], %[hi], #16\n\t"
  2911. #else
  2912. "lsl %[hi], %[hi], #16\n\t"
  2913. #endif
  2914. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2915. "adds r4, r4, %[hi]\n\t"
  2916. #else
  2917. "add r4, r4, %[hi]\n\t"
  2918. #endif
  2919. #ifdef WOLFSSL_KEIL
  2920. "adcs r5, r5, r7\n\t"
  2921. #elif defined(__clang__)
  2922. "adcs r5, r7\n\t"
  2923. #else
  2924. "adc r5, r7\n\t"
  2925. #endif
  2926. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2927. "lsrs %[hi], r3, #16\n\t"
  2928. #else
  2929. "lsr %[hi], r3, #16\n\t"
  2930. #endif
  2931. #ifdef WOLFSSL_KEIL
  2932. "muls r6, %[hi], r6\n\t"
  2933. #elif defined(__clang__)
  2934. "muls r6, %[hi]\n\t"
  2935. #else
  2936. "mul r6, %[hi]\n\t"
  2937. #endif
  2938. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2939. "adds r5, r5, r6\n\t"
  2940. #else
  2941. "add r5, r5, r6\n\t"
  2942. #endif
  2943. "uxth r6, %[d]\n\t"
  2944. #ifdef WOLFSSL_KEIL
  2945. "muls %[hi], r6, %[hi]\n\t"
  2946. #elif defined(__clang__)
  2947. "muls %[hi], r6\n\t"
  2948. #else
  2949. "mul %[hi], r6\n\t"
  2950. #endif
  2951. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2952. "lsrs r6, %[hi], #16\n\t"
  2953. #else
  2954. "lsr r6, %[hi], #16\n\t"
  2955. #endif
  2956. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2957. "lsls %[hi], %[hi], #16\n\t"
  2958. #else
  2959. "lsl %[hi], %[hi], #16\n\t"
  2960. #endif
  2961. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2962. "adds r4, r4, %[hi]\n\t"
  2963. #else
  2964. "add r4, r4, %[hi]\n\t"
  2965. #endif
  2966. #ifdef WOLFSSL_KEIL
  2967. "adcs r5, r5, r6\n\t"
  2968. #elif defined(__clang__)
  2969. "adcs r5, r6\n\t"
  2970. #else
  2971. "adc r5, r6\n\t"
  2972. #endif
  2973. /* r * d - Done */
  2974. "mov %[hi], r8\n\t"
  2975. "mov r6, r9\n\t"
  2976. #ifdef WOLFSSL_KEIL
  2977. "subs r4, %[hi], r4\n\t"
  2978. #else
  2979. #ifdef __clang__
  2980. "subs r4, %[hi], r4\n\t"
  2981. #else
  2982. "sub r4, %[hi], r4\n\t"
  2983. #endif
  2984. #endif
  2985. #ifdef WOLFSSL_KEIL
  2986. "sbcs r6, r6, r5\n\t"
  2987. #elif defined(__clang__)
  2988. "sbcs r6, r5\n\t"
  2989. #else
  2990. "sbc r6, r5\n\t"
  2991. #endif
  2992. "movs r5, r6\n\t"
  2993. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  2994. "adds r3, r3, r5\n\t"
  2995. #else
  2996. "add r3, r3, r5\n\t"
  2997. #endif
  2998. /* r * d - Start */
  2999. "uxth %[hi], r3\n\t"
  3000. "uxth r4, %[d]\n\t"
  3001. #ifdef WOLFSSL_KEIL
  3002. "muls r4, %[hi], r4\n\t"
  3003. #elif defined(__clang__)
  3004. "muls r4, %[hi]\n\t"
  3005. #else
  3006. "mul r4, %[hi]\n\t"
  3007. #endif
  3008. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  3009. "lsrs r6, %[d], #16\n\t"
  3010. #else
  3011. "lsr r6, %[d], #16\n\t"
  3012. #endif
  3013. #ifdef WOLFSSL_KEIL
  3014. "muls %[hi], r6, %[hi]\n\t"
  3015. #elif defined(__clang__)
  3016. "muls %[hi], r6\n\t"
  3017. #else
  3018. "mul %[hi], r6\n\t"
  3019. #endif
  3020. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  3021. "lsrs r5, %[hi], #16\n\t"
  3022. #else
  3023. "lsr r5, %[hi], #16\n\t"
  3024. #endif
  3025. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  3026. "lsls %[hi], %[hi], #16\n\t"
  3027. #else
  3028. "lsl %[hi], %[hi], #16\n\t"
  3029. #endif
  3030. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  3031. "adds r4, r4, %[hi]\n\t"
  3032. #else
  3033. "add r4, r4, %[hi]\n\t"
  3034. #endif
  3035. #ifdef WOLFSSL_KEIL
  3036. "adcs r5, r5, r7\n\t"
  3037. #elif defined(__clang__)
  3038. "adcs r5, r7\n\t"
  3039. #else
  3040. "adc r5, r7\n\t"
  3041. #endif
  3042. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  3043. "lsrs %[hi], r3, #16\n\t"
  3044. #else
  3045. "lsr %[hi], r3, #16\n\t"
  3046. #endif
  3047. #ifdef WOLFSSL_KEIL
  3048. "muls r6, %[hi], r6\n\t"
  3049. #elif defined(__clang__)
  3050. "muls r6, %[hi]\n\t"
  3051. #else
  3052. "mul r6, %[hi]\n\t"
  3053. #endif
  3054. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  3055. "adds r5, r5, r6\n\t"
  3056. #else
  3057. "add r5, r5, r6\n\t"
  3058. #endif
  3059. "uxth r6, %[d]\n\t"
  3060. #ifdef WOLFSSL_KEIL
  3061. "muls %[hi], r6, %[hi]\n\t"
  3062. #elif defined(__clang__)
  3063. "muls %[hi], r6\n\t"
  3064. #else
  3065. "mul %[hi], r6\n\t"
  3066. #endif
  3067. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  3068. "lsrs r6, %[hi], #16\n\t"
  3069. #else
  3070. "lsr r6, %[hi], #16\n\t"
  3071. #endif
  3072. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  3073. "lsls %[hi], %[hi], #16\n\t"
  3074. #else
  3075. "lsl %[hi], %[hi], #16\n\t"
  3076. #endif
  3077. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  3078. "adds r4, r4, %[hi]\n\t"
  3079. #else
  3080. "add r4, r4, %[hi]\n\t"
  3081. #endif
  3082. #ifdef WOLFSSL_KEIL
  3083. "adcs r5, r5, r6\n\t"
  3084. #elif defined(__clang__)
  3085. "adcs r5, r6\n\t"
  3086. #else
  3087. "adc r5, r6\n\t"
  3088. #endif
  3089. /* r * d - Done */
  3090. "mov %[hi], r8\n\t"
  3091. "mov r6, r9\n\t"
  3092. #ifdef WOLFSSL_KEIL
  3093. "subs r4, %[hi], r4\n\t"
  3094. #else
  3095. #ifdef __clang__
  3096. "subs r4, %[hi], r4\n\t"
  3097. #else
  3098. "sub r4, %[hi], r4\n\t"
  3099. #endif
  3100. #endif
  3101. #ifdef WOLFSSL_KEIL
  3102. "sbcs r6, r6, r5\n\t"
  3103. #elif defined(__clang__)
  3104. "sbcs r6, r5\n\t"
  3105. #else
  3106. "sbc r6, r5\n\t"
  3107. #endif
  3108. "movs r5, r6\n\t"
  3109. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  3110. "adds r3, r3, r5\n\t"
  3111. #else
  3112. "add r3, r3, r5\n\t"
  3113. #endif
  3114. "movs r6, %[d]\n\t"
  3115. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  3116. "subs r6, r6, r4\n\t"
  3117. #else
  3118. "sub r6, r6, r4\n\t"
  3119. #endif
  3120. #ifdef WOLFSSL_KEIL
  3121. "sbcs r6, r6, r6\n\t"
  3122. #elif defined(__clang__)
  3123. "sbcs r6, r6\n\t"
  3124. #else
  3125. "sbc r6, r6\n\t"
  3126. #endif
  3127. #if defined(__clang__) || defined(WOLFSSL_KEIL)
  3128. "subs r3, r3, r6\n\t"
  3129. #else
  3130. "sub r3, r3, r6\n\t"
  3131. #endif
  3132. "movs %[hi], r3\n\t"
  3133. : [hi] "+l" (hi), [lo] "+l" (lo), [d] "+l" (d)
  3134. :
  3135. : "r3", "r4", "r5", "r6", "r7", "r8", "r9"
  3136. );
  3137. return (uint32_t)(size_t)hi;
  3138. }
  3139. #define SP_ASM_DIV_WORD
  3140. #endif /* !WOLFSSL_SP_DIV_WORD_HALF */
  3141. #define SP_INT_ASM_AVAILABLE
  3142. #endif /* WOLFSSL_SP_ARM_THUMB && SP_WORD_SIZE == 32 */
  3143. #if defined(WOLFSSL_SP_PPC64) && SP_WORD_SIZE == 64
  3144. /*
  3145. * CPU: PPC64
  3146. */
  3147. /* Multiply va by vb and store double size result in: vh | vl */
  3148. #define SP_ASM_MUL(vl, vh, va, vb) \
  3149. __asm__ __volatile__ ( \
  3150. "mulld %[l], %[a], %[b] \n\t" \
  3151. "mulhdu %[h], %[a], %[b] \n\t" \
  3152. : [h] "+r" (vh), [l] "+r" (vl) \
  3153. : [a] "r" (va), [b] "r" (vb) \
  3154. : "memory" \
  3155. )
  3156. /* Multiply va by vb and store double size result in: vo | vh | vl */
  3157. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  3158. __asm__ __volatile__ ( \
  3159. "mulhdu %[h], %[a], %[b] \n\t" \
  3160. "mulld %[l], %[a], %[b] \n\t" \
  3161. "li %[o], 0 \n\t" \
  3162. : [l] "+r" (vl), [h] "+r" (vh), [o] "=r" (vo) \
  3163. : [a] "r" (va), [b] "r" (vb) \
  3164. : \
  3165. )
  3166. /* Multiply va by vb and add double size result into: vo | vh | vl */
  3167. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  3168. __asm__ __volatile__ ( \
  3169. "mulld 16, %[a], %[b] \n\t" \
  3170. "mulhdu 17, %[a], %[b] \n\t" \
  3171. "addc %[l], %[l], 16 \n\t" \
  3172. "adde %[h], %[h], 17 \n\t" \
  3173. "addze %[o], %[o] \n\t" \
  3174. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3175. : [a] "r" (va), [b] "r" (vb) \
  3176. : "16", "17", "cc" \
  3177. )
  3178. /* Multiply va by vb and add double size result into: vh | vl */
  3179. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  3180. __asm__ __volatile__ ( \
  3181. "mulld 16, %[a], %[b] \n\t" \
  3182. "mulhdu 17, %[a], %[b] \n\t" \
  3183. "addc %[l], %[l], 16 \n\t" \
  3184. "adde %[h], %[h], 17 \n\t" \
  3185. : [l] "+r" (vl), [h] "+r" (vh) \
  3186. : [a] "r" (va), [b] "r" (vb) \
  3187. : "16", "17", "cc" \
  3188. )
  3189. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  3190. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  3191. __asm__ __volatile__ ( \
  3192. "mulld 16, %[a], %[b] \n\t" \
  3193. "mulhdu 17, %[a], %[b] \n\t" \
  3194. "addc %[l], %[l], 16 \n\t" \
  3195. "adde %[h], %[h], 17 \n\t" \
  3196. "addze %[o], %[o] \n\t" \
  3197. "addc %[l], %[l], 16 \n\t" \
  3198. "adde %[h], %[h], 17 \n\t" \
  3199. "addze %[o], %[o] \n\t" \
  3200. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3201. : [a] "r" (va), [b] "r" (vb) \
  3202. : "16", "17", "cc" \
  3203. )
  3204. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  3205. * Assumes first add will not overflow vh | vl
  3206. */
  3207. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  3208. __asm__ __volatile__ ( \
  3209. "mulld 16, %[a], %[b] \n\t" \
  3210. "mulhdu 17, %[a], %[b] \n\t" \
  3211. "addc %[l], %[l], 16 \n\t" \
  3212. "adde %[h], %[h], 17 \n\t" \
  3213. "addc %[l], %[l], 16 \n\t" \
  3214. "adde %[h], %[h], 17 \n\t" \
  3215. "addze %[o], %[o] \n\t" \
  3216. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3217. : [a] "r" (va), [b] "r" (vb) \
  3218. : "16", "17", "cc" \
  3219. )
  3220. /* Square va and store double size result in: vh | vl */
  3221. #define SP_ASM_SQR(vl, vh, va) \
  3222. __asm__ __volatile__ ( \
  3223. "mulld %[l], %[a], %[a] \n\t" \
  3224. "mulhdu %[h], %[a], %[a] \n\t" \
  3225. : [h] "+r" (vh), [l] "+r" (vl) \
  3226. : [a] "r" (va) \
  3227. : "memory" \
  3228. )
  3229. /* Square va and add double size result into: vo | vh | vl */
  3230. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  3231. __asm__ __volatile__ ( \
  3232. "mulld 16, %[a], %[a] \n\t" \
  3233. "mulhdu 17, %[a], %[a] \n\t" \
  3234. "addc %[l], %[l], 16 \n\t" \
  3235. "adde %[h], %[h], 17 \n\t" \
  3236. "addze %[o], %[o] \n\t" \
  3237. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3238. : [a] "r" (va) \
  3239. : "16", "17", "cc" \
  3240. )
  3241. /* Square va and add double size result into: vh | vl */
  3242. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  3243. __asm__ __volatile__ ( \
  3244. "mulld 16, %[a], %[a] \n\t" \
  3245. "mulhdu 17, %[a], %[a] \n\t" \
  3246. "addc %[l], %[l], 16 \n\t" \
  3247. "adde %[h], %[h], 17 \n\t" \
  3248. : [l] "+r" (vl), [h] "+r" (vh) \
  3249. : [a] "r" (va) \
  3250. : "16", "17", "cc" \
  3251. )
  3252. /* Add va into: vh | vl */
  3253. #define SP_ASM_ADDC(vl, vh, va) \
  3254. __asm__ __volatile__ ( \
  3255. "addc %[l], %[l], %[a] \n\t" \
  3256. "addze %[h], %[h] \n\t" \
  3257. : [l] "+r" (vl), [h] "+r" (vh) \
  3258. : [a] "r" (va) \
  3259. : "cc" \
  3260. )
  3261. /* Sub va from: vh | vl */
  3262. #define SP_ASM_SUBB(vl, vh, va) \
  3263. __asm__ __volatile__ ( \
  3264. "subfc %[l], %[a], %[l] \n\t" \
  3265. "li 16, 0 \n\t" \
  3266. "subfe %[h], 16, %[h] \n\t" \
  3267. : [l] "+r" (vl), [h] "+r" (vh) \
  3268. : [a] "r" (va) \
  3269. : "16", "cc" \
  3270. )
  3271. /* Add two times vc | vb | va into vo | vh | vl */
  3272. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  3273. __asm__ __volatile__ ( \
  3274. "addc %[l], %[l], %[a] \n\t" \
  3275. "adde %[h], %[h], %[b] \n\t" \
  3276. "adde %[o], %[o], %[c] \n\t" \
  3277. "addc %[l], %[l], %[a] \n\t" \
  3278. "adde %[h], %[h], %[b] \n\t" \
  3279. "adde %[o], %[o], %[c] \n\t" \
  3280. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3281. : [a] "r" (va), [b] "r" (vb), [c] "r" (vc) \
  3282. : "cc" \
  3283. )
  3284. #define SP_INT_ASM_AVAILABLE
  3285. #endif /* WOLFSSL_SP_PPC64 && SP_WORD_SIZE == 64 */
  3286. #if defined(WOLFSSL_SP_PPC) && SP_WORD_SIZE == 32
  3287. /*
  3288. * CPU: PPC 32-bit
  3289. */
  3290. /* Multiply va by vb and store double size result in: vh | vl */
  3291. #define SP_ASM_MUL(vl, vh, va, vb) \
  3292. __asm__ __volatile__ ( \
  3293. "mullw %[l], %[a], %[b] \n\t" \
  3294. "mulhwu %[h], %[a], %[b] \n\t" \
  3295. : [h] "+r" (vh), [l] "+r" (vl) \
  3296. : [a] "r" (va), [b] "r" (vb) \
  3297. : "memory" \
  3298. )
  3299. /* Multiply va by vb and store double size result in: vo | vh | vl */
  3300. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  3301. __asm__ __volatile__ ( \
  3302. "mulhwu %[h], %[a], %[b] \n\t" \
  3303. "mullw %[l], %[a], %[b] \n\t" \
  3304. "li %[o], 0 \n\t" \
  3305. : [l] "+r" (vl), [h] "+r" (vh), [o] "=r" (vo) \
  3306. : [a] "r" (va), [b] "r" (vb) \
  3307. : \
  3308. )
  3309. /* Multiply va by vb and add double size result into: vo | vh | vl */
  3310. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  3311. __asm__ __volatile__ ( \
  3312. "mullw 16, %[a], %[b] \n\t" \
  3313. "mulhwu 17, %[a], %[b] \n\t" \
  3314. "addc %[l], %[l], 16 \n\t" \
  3315. "adde %[h], %[h], 17 \n\t" \
  3316. "addze %[o], %[o] \n\t" \
  3317. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3318. : [a] "r" (va), [b] "r" (vb) \
  3319. : "16", "17", "cc" \
  3320. )
  3321. /* Multiply va by vb and add double size result into: vh | vl */
  3322. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  3323. __asm__ __volatile__ ( \
  3324. "mullw 16, %[a], %[b] \n\t" \
  3325. "mulhwu 17, %[a], %[b] \n\t" \
  3326. "addc %[l], %[l], 16 \n\t" \
  3327. "adde %[h], %[h], 17 \n\t" \
  3328. : [l] "+r" (vl), [h] "+r" (vh) \
  3329. : [a] "r" (va), [b] "r" (vb) \
  3330. : "16", "17", "cc" \
  3331. )
  3332. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  3333. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  3334. __asm__ __volatile__ ( \
  3335. "mullw 16, %[a], %[b] \n\t" \
  3336. "mulhwu 17, %[a], %[b] \n\t" \
  3337. "addc %[l], %[l], 16 \n\t" \
  3338. "adde %[h], %[h], 17 \n\t" \
  3339. "addze %[o], %[o] \n\t" \
  3340. "addc %[l], %[l], 16 \n\t" \
  3341. "adde %[h], %[h], 17 \n\t" \
  3342. "addze %[o], %[o] \n\t" \
  3343. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3344. : [a] "r" (va), [b] "r" (vb) \
  3345. : "16", "17", "cc" \
  3346. )
  3347. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  3348. * Assumes first add will not overflow vh | vl
  3349. */
  3350. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  3351. __asm__ __volatile__ ( \
  3352. "mullw 16, %[a], %[b] \n\t" \
  3353. "mulhwu 17, %[a], %[b] \n\t" \
  3354. "addc %[l], %[l], 16 \n\t" \
  3355. "adde %[h], %[h], 17 \n\t" \
  3356. "addc %[l], %[l], 16 \n\t" \
  3357. "adde %[h], %[h], 17 \n\t" \
  3358. "addze %[o], %[o] \n\t" \
  3359. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3360. : [a] "r" (va), [b] "r" (vb) \
  3361. : "16", "17", "cc" \
  3362. )
  3363. /* Square va and store double size result in: vh | vl */
  3364. #define SP_ASM_SQR(vl, vh, va) \
  3365. __asm__ __volatile__ ( \
  3366. "mullw %[l], %[a], %[a] \n\t" \
  3367. "mulhwu %[h], %[a], %[a] \n\t" \
  3368. : [h] "+r" (vh), [l] "+r" (vl) \
  3369. : [a] "r" (va) \
  3370. : "memory" \
  3371. )
  3372. /* Square va and add double size result into: vo | vh | vl */
  3373. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  3374. __asm__ __volatile__ ( \
  3375. "mullw 16, %[a], %[a] \n\t" \
  3376. "mulhwu 17, %[a], %[a] \n\t" \
  3377. "addc %[l], %[l], 16 \n\t" \
  3378. "adde %[h], %[h], 17 \n\t" \
  3379. "addze %[o], %[o] \n\t" \
  3380. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3381. : [a] "r" (va) \
  3382. : "16", "17", "cc" \
  3383. )
  3384. /* Square va and add double size result into: vh | vl */
  3385. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  3386. __asm__ __volatile__ ( \
  3387. "mullw 16, %[a], %[a] \n\t" \
  3388. "mulhwu 17, %[a], %[a] \n\t" \
  3389. "addc %[l], %[l], 16 \n\t" \
  3390. "adde %[h], %[h], 17 \n\t" \
  3391. : [l] "+r" (vl), [h] "+r" (vh) \
  3392. : [a] "r" (va) \
  3393. : "16", "17", "cc" \
  3394. )
  3395. /* Add va into: vh | vl */
  3396. #define SP_ASM_ADDC(vl, vh, va) \
  3397. __asm__ __volatile__ ( \
  3398. "addc %[l], %[l], %[a] \n\t" \
  3399. "addze %[h], %[h] \n\t" \
  3400. : [l] "+r" (vl), [h] "+r" (vh) \
  3401. : [a] "r" (va) \
  3402. : "cc" \
  3403. )
  3404. /* Sub va from: vh | vl */
  3405. #define SP_ASM_SUBB(vl, vh, va) \
  3406. __asm__ __volatile__ ( \
  3407. "subfc %[l], %[a], %[l] \n\t" \
  3408. "li 16, 0 \n\t" \
  3409. "subfe %[h], 16, %[h] \n\t" \
  3410. : [l] "+r" (vl), [h] "+r" (vh) \
  3411. : [a] "r" (va) \
  3412. : "16", "cc" \
  3413. )
  3414. /* Add two times vc | vb | va into vo | vh | vl */
  3415. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  3416. __asm__ __volatile__ ( \
  3417. "addc %[l], %[l], %[a] \n\t" \
  3418. "adde %[h], %[h], %[b] \n\t" \
  3419. "adde %[o], %[o], %[c] \n\t" \
  3420. "addc %[l], %[l], %[a] \n\t" \
  3421. "adde %[h], %[h], %[b] \n\t" \
  3422. "adde %[o], %[o], %[c] \n\t" \
  3423. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3424. : [a] "r" (va), [b] "r" (vb), [c] "r" (vc) \
  3425. : "cc" \
  3426. )
  3427. #define SP_INT_ASM_AVAILABLE
  3428. #endif /* WOLFSSL_SP_PPC && SP_WORD_SIZE == 64 */
  3429. #if defined(WOLFSSL_SP_MIPS64) && SP_WORD_SIZE == 64
  3430. /*
  3431. * CPU: MIPS 64-bit
  3432. */
  3433. /* Multiply va by vb and store double size result in: vh | vl */
  3434. #define SP_ASM_MUL(vl, vh, va, vb) \
  3435. __asm__ __volatile__ ( \
  3436. "dmultu %[a], %[b] \n\t" \
  3437. "mflo %[l] \n\t" \
  3438. "mfhi %[h] \n\t" \
  3439. : [h] "+r" (vh), [l] "+r" (vl) \
  3440. : [a] "r" (va), [b] "r" (vb) \
  3441. : "memory", "$lo", "$hi" \
  3442. )
  3443. /* Multiply va by vb and store double size result in: vo | vh | vl */
  3444. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  3445. __asm__ __volatile__ ( \
  3446. "dmultu %[a], %[b] \n\t" \
  3447. "mflo %[l] \n\t" \
  3448. "mfhi %[h] \n\t" \
  3449. "move %[o], $0 \n\t" \
  3450. : [l] "+r" (vl), [h] "+r" (vh), [o] "=r" (vo) \
  3451. : [a] "r" (va), [b] "r" (vb) \
  3452. : "$lo", "$hi" \
  3453. )
  3454. /* Multiply va by vb and add double size result into: vo | vh | vl */
  3455. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  3456. __asm__ __volatile__ ( \
  3457. "dmultu %[a], %[b] \n\t" \
  3458. "mflo $10 \n\t" \
  3459. "mfhi $11 \n\t" \
  3460. "daddu %[l], %[l], $10 \n\t" \
  3461. "sltu $12, %[l], $10 \n\t" \
  3462. "daddu %[h], %[h], $12 \n\t" \
  3463. "sltu $12, %[h], $12 \n\t" \
  3464. "daddu %[o], %[o], $12 \n\t" \
  3465. "daddu %[h], %[h], $11 \n\t" \
  3466. "sltu $12, %[h], $11 \n\t" \
  3467. "daddu %[o], %[o], $12 \n\t" \
  3468. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3469. : [a] "r" (va), [b] "r" (vb) \
  3470. : "$10", "$11", "$12", "$lo", "$hi" \
  3471. )
  3472. /* Multiply va by vb and add double size result into: vh | vl */
  3473. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  3474. __asm__ __volatile__ ( \
  3475. "dmultu %[a], %[b] \n\t" \
  3476. "mflo $10 \n\t" \
  3477. "mfhi $11 \n\t" \
  3478. "daddu %[l], %[l], $10 \n\t" \
  3479. "sltu $12, %[l], $10 \n\t" \
  3480. "daddu %[h], %[h], $11 \n\t" \
  3481. "daddu %[h], %[h], $12 \n\t" \
  3482. : [l] "+r" (vl), [h] "+r" (vh) \
  3483. : [a] "r" (va), [b] "r" (vb) \
  3484. : "$10", "$11", "$12", "$lo", "$hi" \
  3485. )
  3486. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  3487. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  3488. __asm__ __volatile__ ( \
  3489. "dmultu %[a], %[b] \n\t" \
  3490. "mflo $10 \n\t" \
  3491. "mfhi $11 \n\t" \
  3492. "daddu %[l], %[l], $10 \n\t" \
  3493. "sltu $12, %[l], $10 \n\t" \
  3494. "daddu %[h], %[h], $12 \n\t" \
  3495. "sltu $12, %[h], $12 \n\t" \
  3496. "daddu %[o], %[o], $12 \n\t" \
  3497. "daddu %[h], %[h], $11 \n\t" \
  3498. "sltu $12, %[h], $11 \n\t" \
  3499. "daddu %[o], %[o], $12 \n\t" \
  3500. "daddu %[l], %[l], $10 \n\t" \
  3501. "sltu $12, %[l], $10 \n\t" \
  3502. "daddu %[h], %[h], $12 \n\t" \
  3503. "sltu $12, %[h], $12 \n\t" \
  3504. "daddu %[o], %[o], $12 \n\t" \
  3505. "daddu %[h], %[h], $11 \n\t" \
  3506. "sltu $12, %[h], $11 \n\t" \
  3507. "daddu %[o], %[o], $12 \n\t" \
  3508. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3509. : [a] "r" (va), [b] "r" (vb) \
  3510. : "$10", "$11", "$12", "$lo", "$hi" \
  3511. )
  3512. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  3513. * Assumes first add will not overflow vh | vl
  3514. */
  3515. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  3516. __asm__ __volatile__ ( \
  3517. "dmultu %[a], %[b] \n\t" \
  3518. "mflo $10 \n\t" \
  3519. "mfhi $11 \n\t" \
  3520. "daddu %[l], %[l], $10 \n\t" \
  3521. "sltu $12, %[l], $10 \n\t" \
  3522. "daddu %[h], %[h], $11 \n\t" \
  3523. "daddu %[h], %[h], $12 \n\t" \
  3524. "daddu %[l], %[l], $10 \n\t" \
  3525. "sltu $12, %[l], $10 \n\t" \
  3526. "daddu %[h], %[h], $12 \n\t" \
  3527. "sltu $12, %[h], $12 \n\t" \
  3528. "daddu %[o], %[o], $12 \n\t" \
  3529. "daddu %[h], %[h], $11 \n\t" \
  3530. "sltu $12, %[h], $11 \n\t" \
  3531. "daddu %[o], %[o], $12 \n\t" \
  3532. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3533. : [a] "r" (va), [b] "r" (vb) \
  3534. : "$10", "$11", "$12", "$lo", "$hi" \
  3535. )
  3536. /* Square va and store double size result in: vh | vl */
  3537. #define SP_ASM_SQR(vl, vh, va) \
  3538. __asm__ __volatile__ ( \
  3539. "dmultu %[a], %[a] \n\t" \
  3540. "mflo %[l] \n\t" \
  3541. "mfhi %[h] \n\t" \
  3542. : [h] "+r" (vh), [l] "+r" (vl) \
  3543. : [a] "r" (va) \
  3544. : "memory", "$lo", "$hi" \
  3545. )
  3546. /* Square va and add double size result into: vo | vh | vl */
  3547. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  3548. __asm__ __volatile__ ( \
  3549. "dmultu %[a], %[a] \n\t" \
  3550. "mflo $10 \n\t" \
  3551. "mfhi $11 \n\t" \
  3552. "daddu %[l], %[l], $10 \n\t" \
  3553. "sltu $12, %[l], $10 \n\t" \
  3554. "daddu %[h], %[h], $12 \n\t" \
  3555. "sltu $12, %[h], $12 \n\t" \
  3556. "daddu %[o], %[o], $12 \n\t" \
  3557. "daddu %[h], %[h], $11 \n\t" \
  3558. "sltu $12, %[h], $11 \n\t" \
  3559. "daddu %[o], %[o], $12 \n\t" \
  3560. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3561. : [a] "r" (va) \
  3562. : "$10", "$11", "$12", "$lo", "$hi" \
  3563. )
  3564. /* Square va and add double size result into: vh | vl */
  3565. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  3566. __asm__ __volatile__ ( \
  3567. "dmultu %[a], %[a] \n\t" \
  3568. "mflo $10 \n\t" \
  3569. "mfhi $11 \n\t" \
  3570. "daddu %[l], %[l], $10 \n\t" \
  3571. "sltu $12, %[l], $10 \n\t" \
  3572. "daddu %[h], %[h], $11 \n\t" \
  3573. "daddu %[h], %[h], $12 \n\t" \
  3574. : [l] "+r" (vl), [h] "+r" (vh) \
  3575. : [a] "r" (va) \
  3576. : "$10", "$11", "$12", "$lo", "$hi" \
  3577. )
  3578. /* Add va into: vh | vl */
  3579. #define SP_ASM_ADDC(vl, vh, va) \
  3580. __asm__ __volatile__ ( \
  3581. "daddu %[l], %[l], %[a] \n\t" \
  3582. "sltu $12, %[l], %[a] \n\t" \
  3583. "daddu %[h], %[h], $12 \n\t" \
  3584. : [l] "+r" (vl), [h] "+r" (vh) \
  3585. : [a] "r" (va) \
  3586. : "$12" \
  3587. )
  3588. /* Sub va from: vh | vl */
  3589. #define SP_ASM_SUBB(vl, vh, va) \
  3590. __asm__ __volatile__ ( \
  3591. "move $12, %[l] \n\t" \
  3592. "dsubu %[l], $12, %[a] \n\t" \
  3593. "sltu $12, $12, %[l] \n\t" \
  3594. "dsubu %[h], %[h], $12 \n\t" \
  3595. : [l] "+r" (vl), [h] "+r" (vh) \
  3596. : [a] "r" (va) \
  3597. : "$12" \
  3598. )
  3599. /* Add two times vc | vb | va into vo | vh | vl */
  3600. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  3601. __asm__ __volatile__ ( \
  3602. "daddu %[l], %[l], %[a] \n\t" \
  3603. "sltu $12, %[l], %[a] \n\t" \
  3604. "daddu %[h], %[h], $12 \n\t" \
  3605. "sltu $12, %[h], $12 \n\t" \
  3606. "daddu %[o], %[o], $12 \n\t" \
  3607. "daddu %[h], %[h], %[b] \n\t" \
  3608. "sltu $12, %[h], %[b] \n\t" \
  3609. "daddu %[o], %[o], %[c] \n\t" \
  3610. "daddu %[o], %[o], $12 \n\t" \
  3611. "daddu %[l], %[l], %[a] \n\t" \
  3612. "sltu $12, %[l], %[a] \n\t" \
  3613. "daddu %[h], %[h], $12 \n\t" \
  3614. "sltu $12, %[h], $12 \n\t" \
  3615. "daddu %[o], %[o], $12 \n\t" \
  3616. "daddu %[h], %[h], %[b] \n\t" \
  3617. "sltu $12, %[h], %[b] \n\t" \
  3618. "daddu %[o], %[o], %[c] \n\t" \
  3619. "daddu %[o], %[o], $12 \n\t" \
  3620. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3621. : [a] "r" (va), [b] "r" (vb), [c] "r" (vc) \
  3622. : "$12" \
  3623. )
  3624. #define SP_INT_ASM_AVAILABLE
  3625. #endif /* WOLFSSL_SP_MIPS64 && SP_WORD_SIZE == 64 */
  3626. #if defined(WOLFSSL_SP_MIPS) && SP_WORD_SIZE == 32
  3627. /*
  3628. * CPU: MIPS 32-bit
  3629. */
  3630. /* Multiply va by vb and store double size result in: vh | vl */
  3631. #define SP_ASM_MUL(vl, vh, va, vb) \
  3632. __asm__ __volatile__ ( \
  3633. "multu %[a], %[b] \n\t" \
  3634. "mflo %[l] \n\t" \
  3635. "mfhi %[h] \n\t" \
  3636. : [h] "+r" (vh), [l] "+r" (vl) \
  3637. : [a] "r" (va), [b] "r" (vb) \
  3638. : "memory", "%lo", "%hi" \
  3639. )
  3640. /* Multiply va by vb and store double size result in: vo | vh | vl */
  3641. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  3642. __asm__ __volatile__ ( \
  3643. "multu %[a], %[b] \n\t" \
  3644. "mflo %[l] \n\t" \
  3645. "mfhi %[h] \n\t" \
  3646. "move %[o], $0 \n\t" \
  3647. : [l] "+r" (vl), [h] "+r" (vh), [o] "=r" (vo) \
  3648. : [a] "r" (va), [b] "r" (vb) \
  3649. : "%lo", "%hi" \
  3650. )
  3651. /* Multiply va by vb and add double size result into: vo | vh | vl */
  3652. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  3653. __asm__ __volatile__ ( \
  3654. "multu %[a], %[b] \n\t" \
  3655. "mflo $10 \n\t" \
  3656. "mfhi $11 \n\t" \
  3657. "addu %[l], %[l], $10 \n\t" \
  3658. "sltu $12, %[l], $10 \n\t" \
  3659. "addu %[h], %[h], $12 \n\t" \
  3660. "sltu $12, %[h], $12 \n\t" \
  3661. "addu %[o], %[o], $12 \n\t" \
  3662. "addu %[h], %[h], $11 \n\t" \
  3663. "sltu $12, %[h], $11 \n\t" \
  3664. "addu %[o], %[o], $12 \n\t" \
  3665. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3666. : [a] "r" (va), [b] "r" (vb) \
  3667. : "$10", "$11", "$12", "%lo", "%hi" \
  3668. )
  3669. /* Multiply va by vb and add double size result into: vh | vl */
  3670. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  3671. __asm__ __volatile__ ( \
  3672. "multu %[a], %[b] \n\t" \
  3673. "mflo $10 \n\t" \
  3674. "mfhi $11 \n\t" \
  3675. "addu %[l], %[l], $10 \n\t" \
  3676. "sltu $12, %[l], $10 \n\t" \
  3677. "addu %[h], %[h], $11 \n\t" \
  3678. "addu %[h], %[h], $12 \n\t" \
  3679. : [l] "+r" (vl), [h] "+r" (vh) \
  3680. : [a] "r" (va), [b] "r" (vb) \
  3681. : "$10", "$11", "$12", "%lo", "%hi" \
  3682. )
  3683. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  3684. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  3685. __asm__ __volatile__ ( \
  3686. "multu %[a], %[b] \n\t" \
  3687. "mflo $10 \n\t" \
  3688. "mfhi $11 \n\t" \
  3689. "addu %[l], %[l], $10 \n\t" \
  3690. "sltu $12, %[l], $10 \n\t" \
  3691. "addu %[h], %[h], $12 \n\t" \
  3692. "sltu $12, %[h], $12 \n\t" \
  3693. "addu %[o], %[o], $12 \n\t" \
  3694. "addu %[h], %[h], $11 \n\t" \
  3695. "sltu $12, %[h], $11 \n\t" \
  3696. "addu %[o], %[o], $12 \n\t" \
  3697. "addu %[l], %[l], $10 \n\t" \
  3698. "sltu $12, %[l], $10 \n\t" \
  3699. "addu %[h], %[h], $12 \n\t" \
  3700. "sltu $12, %[h], $12 \n\t" \
  3701. "addu %[o], %[o], $12 \n\t" \
  3702. "addu %[h], %[h], $11 \n\t" \
  3703. "sltu $12, %[h], $11 \n\t" \
  3704. "addu %[o], %[o], $12 \n\t" \
  3705. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3706. : [a] "r" (va), [b] "r" (vb) \
  3707. : "$10", "$11", "$12", "%lo", "%hi" \
  3708. )
  3709. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  3710. * Assumes first add will not overflow vh | vl
  3711. */
  3712. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  3713. __asm__ __volatile__ ( \
  3714. "multu %[a], %[b] \n\t" \
  3715. "mflo $10 \n\t" \
  3716. "mfhi $11 \n\t" \
  3717. "addu %[l], %[l], $10 \n\t" \
  3718. "sltu $12, %[l], $10 \n\t" \
  3719. "addu %[h], %[h], $11 \n\t" \
  3720. "addu %[h], %[h], $12 \n\t" \
  3721. "addu %[l], %[l], $10 \n\t" \
  3722. "sltu $12, %[l], $10 \n\t" \
  3723. "addu %[h], %[h], $12 \n\t" \
  3724. "sltu $12, %[h], $12 \n\t" \
  3725. "addu %[o], %[o], $12 \n\t" \
  3726. "addu %[h], %[h], $11 \n\t" \
  3727. "sltu $12, %[h], $11 \n\t" \
  3728. "addu %[o], %[o], $12 \n\t" \
  3729. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3730. : [a] "r" (va), [b] "r" (vb) \
  3731. : "$10", "$11", "$12", "%lo", "%hi" \
  3732. )
  3733. /* Square va and store double size result in: vh | vl */
  3734. #define SP_ASM_SQR(vl, vh, va) \
  3735. __asm__ __volatile__ ( \
  3736. "multu %[a], %[a] \n\t" \
  3737. "mflo %[l] \n\t" \
  3738. "mfhi %[h] \n\t" \
  3739. : [h] "+r" (vh), [l] "+r" (vl) \
  3740. : [a] "r" (va) \
  3741. : "memory", "%lo", "%hi" \
  3742. )
  3743. /* Square va and add double size result into: vo | vh | vl */
  3744. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  3745. __asm__ __volatile__ ( \
  3746. "multu %[a], %[a] \n\t" \
  3747. "mflo $10 \n\t" \
  3748. "mfhi $11 \n\t" \
  3749. "addu %[l], %[l], $10 \n\t" \
  3750. "sltu $12, %[l], $10 \n\t" \
  3751. "addu %[h], %[h], $12 \n\t" \
  3752. "sltu $12, %[h], $12 \n\t" \
  3753. "addu %[o], %[o], $12 \n\t" \
  3754. "addu %[h], %[h], $11 \n\t" \
  3755. "sltu $12, %[h], $11 \n\t" \
  3756. "addu %[o], %[o], $12 \n\t" \
  3757. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3758. : [a] "r" (va) \
  3759. : "$10", "$11", "$12", "%lo", "%hi" \
  3760. )
  3761. /* Square va and add double size result into: vh | vl */
  3762. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  3763. __asm__ __volatile__ ( \
  3764. "multu %[a], %[a] \n\t" \
  3765. "mflo $10 \n\t" \
  3766. "mfhi $11 \n\t" \
  3767. "addu %[l], %[l], $10 \n\t" \
  3768. "sltu $12, %[l], $10 \n\t" \
  3769. "addu %[h], %[h], $11 \n\t" \
  3770. "addu %[h], %[h], $12 \n\t" \
  3771. : [l] "+r" (vl), [h] "+r" (vh) \
  3772. : [a] "r" (va) \
  3773. : "$10", "$11", "$12", "%lo", "%hi" \
  3774. )
  3775. /* Add va into: vh | vl */
  3776. #define SP_ASM_ADDC(vl, vh, va) \
  3777. __asm__ __volatile__ ( \
  3778. "addu %[l], %[l], %[a] \n\t" \
  3779. "sltu $12, %[l], %[a] \n\t" \
  3780. "addu %[h], %[h], $12 \n\t" \
  3781. : [l] "+r" (vl), [h] "+r" (vh) \
  3782. : [a] "r" (va) \
  3783. : "$12" \
  3784. )
  3785. /* Sub va from: vh | vl */
  3786. #define SP_ASM_SUBB(vl, vh, va) \
  3787. __asm__ __volatile__ ( \
  3788. "move $12, %[l] \n\t" \
  3789. "subu %[l], $12, %[a] \n\t" \
  3790. "sltu $12, $12, %[l] \n\t" \
  3791. "subu %[h], %[h], $12 \n\t" \
  3792. : [l] "+r" (vl), [h] "+r" (vh) \
  3793. : [a] "r" (va) \
  3794. : "$12" \
  3795. )
  3796. /* Add two times vc | vb | va into vo | vh | vl */
  3797. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  3798. __asm__ __volatile__ ( \
  3799. "addu %[l], %[l], %[a] \n\t" \
  3800. "sltu $12, %[l], %[a] \n\t" \
  3801. "addu %[h], %[h], $12 \n\t" \
  3802. "sltu $12, %[h], $12 \n\t" \
  3803. "addu %[o], %[o], $12 \n\t" \
  3804. "addu %[h], %[h], %[b] \n\t" \
  3805. "sltu $12, %[h], %[b] \n\t" \
  3806. "addu %[o], %[o], %[c] \n\t" \
  3807. "addu %[o], %[o], $12 \n\t" \
  3808. "addu %[l], %[l], %[a] \n\t" \
  3809. "sltu $12, %[l], %[a] \n\t" \
  3810. "addu %[h], %[h], $12 \n\t" \
  3811. "sltu $12, %[h], $12 \n\t" \
  3812. "addu %[o], %[o], $12 \n\t" \
  3813. "addu %[h], %[h], %[b] \n\t" \
  3814. "sltu $12, %[h], %[b] \n\t" \
  3815. "addu %[o], %[o], %[c] \n\t" \
  3816. "addu %[o], %[o], $12 \n\t" \
  3817. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3818. : [a] "r" (va), [b] "r" (vb), [c] "r" (vc) \
  3819. : "$12" \
  3820. )
  3821. #define SP_INT_ASM_AVAILABLE
  3822. #endif /* WOLFSSL_SP_MIPS && SP_WORD_SIZE == 32 */
  3823. #if defined(WOLFSSL_SP_RISCV64) && SP_WORD_SIZE == 64
  3824. /*
  3825. * CPU: RISCV 64-bit
  3826. */
  3827. /* Multiply va by vb and store double size result in: vh | vl */
  3828. #define SP_ASM_MUL(vl, vh, va, vb) \
  3829. __asm__ __volatile__ ( \
  3830. "mul %[l], %[a], %[b] \n\t" \
  3831. "mulhu %[h], %[a], %[b] \n\t" \
  3832. : [h] "+r" (vh), [l] "+r" (vl) \
  3833. : [a] "r" (va), [b] "r" (vb) \
  3834. : "memory" \
  3835. )
  3836. /* Multiply va by vb and store double size result in: vo | vh | vl */
  3837. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  3838. __asm__ __volatile__ ( \
  3839. "mulhu %[h], %[a], %[b] \n\t" \
  3840. "mul %[l], %[a], %[b] \n\t" \
  3841. "add %[o], zero, zero \n\t" \
  3842. : [l] "+r" (vl), [h] "+r" (vh), [o] "=r" (vo) \
  3843. : [a] "r" (va), [b] "r" (vb) \
  3844. : \
  3845. )
  3846. /* Multiply va by vb and add double size result into: vo | vh | vl */
  3847. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  3848. __asm__ __volatile__ ( \
  3849. "mul a5, %[a], %[b] \n\t" \
  3850. "mulhu a6, %[a], %[b] \n\t" \
  3851. "add %[l], %[l], a5 \n\t" \
  3852. "sltu a7, %[l], a5 \n\t" \
  3853. "add %[h], %[h], a7 \n\t" \
  3854. "sltu a7, %[h], a7 \n\t" \
  3855. "add %[o], %[o], a7 \n\t" \
  3856. "add %[h], %[h], a6 \n\t" \
  3857. "sltu a7, %[h], a6 \n\t" \
  3858. "add %[o], %[o], a7 \n\t" \
  3859. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3860. : [a] "r" (va), [b] "r" (vb) \
  3861. : "a5", "a6", "a7" \
  3862. )
  3863. /* Multiply va by vb and add double size result into: vh | vl */
  3864. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  3865. __asm__ __volatile__ ( \
  3866. "mul a5, %[a], %[b] \n\t" \
  3867. "mulhu a6, %[a], %[b] \n\t" \
  3868. "add %[l], %[l], a5 \n\t" \
  3869. "sltu a7, %[l], a5 \n\t" \
  3870. "add %[h], %[h], a6 \n\t" \
  3871. "add %[h], %[h], a7 \n\t" \
  3872. : [l] "+r" (vl), [h] "+r" (vh) \
  3873. : [a] "r" (va), [b] "r" (vb) \
  3874. : "a5", "a6", "a7" \
  3875. )
  3876. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  3877. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  3878. __asm__ __volatile__ ( \
  3879. "mul a5, %[a], %[b] \n\t" \
  3880. "mulhu a6, %[a], %[b] \n\t" \
  3881. "add %[l], %[l], a5 \n\t" \
  3882. "sltu a7, %[l], a5 \n\t" \
  3883. "add %[h], %[h], a7 \n\t" \
  3884. "sltu a7, %[h], a7 \n\t" \
  3885. "add %[o], %[o], a7 \n\t" \
  3886. "add %[h], %[h], a6 \n\t" \
  3887. "sltu a7, %[h], a6 \n\t" \
  3888. "add %[o], %[o], a7 \n\t" \
  3889. "add %[l], %[l], a5 \n\t" \
  3890. "sltu a7, %[l], a5 \n\t" \
  3891. "add %[h], %[h], a7 \n\t" \
  3892. "sltu a7, %[h], a7 \n\t" \
  3893. "add %[o], %[o], a7 \n\t" \
  3894. "add %[h], %[h], a6 \n\t" \
  3895. "sltu a7, %[h], a6 \n\t" \
  3896. "add %[o], %[o], a7 \n\t" \
  3897. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3898. : [a] "r" (va), [b] "r" (vb) \
  3899. : "a5", "a6", "a7" \
  3900. )
  3901. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  3902. * Assumes first add will not overflow vh | vl
  3903. */
  3904. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  3905. __asm__ __volatile__ ( \
  3906. "mul a5, %[a], %[b] \n\t" \
  3907. "mulhu a6, %[a], %[b] \n\t" \
  3908. "add %[l], %[l], a5 \n\t" \
  3909. "sltu a7, %[l], a5 \n\t" \
  3910. "add %[h], %[h], a6 \n\t" \
  3911. "add %[h], %[h], a7 \n\t" \
  3912. "add %[l], %[l], a5 \n\t" \
  3913. "sltu a7, %[l], a5 \n\t" \
  3914. "add %[h], %[h], a7 \n\t" \
  3915. "sltu a7, %[h], a7 \n\t" \
  3916. "add %[o], %[o], a7 \n\t" \
  3917. "add %[h], %[h], a6 \n\t" \
  3918. "sltu a7, %[h], a6 \n\t" \
  3919. "add %[o], %[o], a7 \n\t" \
  3920. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3921. : [a] "r" (va), [b] "r" (vb) \
  3922. : "a5", "a6", "a7" \
  3923. )
  3924. /* Square va and store double size result in: vh | vl */
  3925. #define SP_ASM_SQR(vl, vh, va) \
  3926. __asm__ __volatile__ ( \
  3927. "mul %[l], %[a], %[a] \n\t" \
  3928. "mulhu %[h], %[a], %[a] \n\t" \
  3929. : [h] "+r" (vh), [l] "+r" (vl) \
  3930. : [a] "r" (va) \
  3931. : "memory" \
  3932. )
  3933. /* Square va and add double size result into: vo | vh | vl */
  3934. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  3935. __asm__ __volatile__ ( \
  3936. "mul a5, %[a], %[a] \n\t" \
  3937. "mulhu a6, %[a], %[a] \n\t" \
  3938. "add %[l], %[l], a5 \n\t" \
  3939. "sltu a7, %[l], a5 \n\t" \
  3940. "add %[h], %[h], a7 \n\t" \
  3941. "sltu a7, %[h], a7 \n\t" \
  3942. "add %[o], %[o], a7 \n\t" \
  3943. "add %[h], %[h], a6 \n\t" \
  3944. "sltu a7, %[h], a6 \n\t" \
  3945. "add %[o], %[o], a7 \n\t" \
  3946. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  3947. : [a] "r" (va) \
  3948. : "a5", "a6", "a7" \
  3949. )
  3950. /* Square va and add double size result into: vh | vl */
  3951. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  3952. __asm__ __volatile__ ( \
  3953. "mul a5, %[a], %[a] \n\t" \
  3954. "mulhu a6, %[a], %[a] \n\t" \
  3955. "add %[l], %[l], a5 \n\t" \
  3956. "sltu a7, %[l], a5 \n\t" \
  3957. "add %[h], %[h], a6 \n\t" \
  3958. "add %[h], %[h], a7 \n\t" \
  3959. : [l] "+r" (vl), [h] "+r" (vh) \
  3960. : [a] "r" (va) \
  3961. : "a5", "a6", "a7" \
  3962. )
  3963. /* Add va into: vh | vl */
  3964. #define SP_ASM_ADDC(vl, vh, va) \
  3965. __asm__ __volatile__ ( \
  3966. "add %[l], %[l], %[a] \n\t" \
  3967. "sltu a7, %[l], %[a] \n\t" \
  3968. "add %[h], %[h], a7 \n\t" \
  3969. : [l] "+r" (vl), [h] "+r" (vh) \
  3970. : [a] "r" (va) \
  3971. : "a7" \
  3972. )
  3973. /* Sub va from: vh | vl */
  3974. #define SP_ASM_SUBB(vl, vh, va) \
  3975. __asm__ __volatile__ ( \
  3976. "add a7, %[l], zero \n\t" \
  3977. "sub %[l], a7, %[a] \n\t" \
  3978. "sltu a7, a7, %[l] \n\t" \
  3979. "sub %[h], %[h], a7 \n\t" \
  3980. : [l] "+r" (vl), [h] "+r" (vh) \
  3981. : [a] "r" (va) \
  3982. : "a7" \
  3983. )
  3984. /* Add two times vc | vb | va into vo | vh | vl */
  3985. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  3986. __asm__ __volatile__ ( \
  3987. "add %[l], %[l], %[a] \n\t" \
  3988. "sltu a7, %[l], %[a] \n\t" \
  3989. "add %[h], %[h], a7 \n\t" \
  3990. "sltu a7, %[h], a7 \n\t" \
  3991. "add %[o], %[o], a7 \n\t" \
  3992. "add %[h], %[h], %[b] \n\t" \
  3993. "sltu a7, %[h], %[b] \n\t" \
  3994. "add %[o], %[o], %[c] \n\t" \
  3995. "add %[o], %[o], a7 \n\t" \
  3996. "add %[l], %[l], %[a] \n\t" \
  3997. "sltu a7, %[l], %[a] \n\t" \
  3998. "add %[h], %[h], a7 \n\t" \
  3999. "sltu a7, %[h], a7 \n\t" \
  4000. "add %[o], %[o], a7 \n\t" \
  4001. "add %[h], %[h], %[b] \n\t" \
  4002. "sltu a7, %[h], %[b] \n\t" \
  4003. "add %[o], %[o], %[c] \n\t" \
  4004. "add %[o], %[o], a7 \n\t" \
  4005. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  4006. : [a] "r" (va), [b] "r" (vb), [c] "r" (vc) \
  4007. : "a7" \
  4008. )
  4009. #define SP_INT_ASM_AVAILABLE
  4010. #endif /* WOLFSSL_SP_RISCV64 && SP_WORD_SIZE == 64 */
  4011. #if defined(WOLFSSL_SP_RISCV32) && SP_WORD_SIZE == 32
  4012. /*
  4013. * CPU: RISCV 32-bit
  4014. */
  4015. /* Multiply va by vb and store double size result in: vh | vl */
  4016. #define SP_ASM_MUL(vl, vh, va, vb) \
  4017. __asm__ __volatile__ ( \
  4018. "mul %[l], %[a], %[b] \n\t" \
  4019. "mulhu %[h], %[a], %[b] \n\t" \
  4020. : [h] "+r" (vh), [l] "+r" (vl) \
  4021. : [a] "r" (va), [b] "r" (vb) \
  4022. : "memory" \
  4023. )
  4024. /* Multiply va by vb and store double size result in: vo | vh | vl */
  4025. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  4026. __asm__ __volatile__ ( \
  4027. "mulhu %[h], %[a], %[b] \n\t" \
  4028. "mul %[l], %[a], %[b] \n\t" \
  4029. "add %[o], zero, zero \n\t" \
  4030. : [l] "+r" (vl), [h] "+r" (vh), [o] "=r" (vo) \
  4031. : [a] "r" (va), [b] "r" (vb) \
  4032. : \
  4033. )
  4034. /* Multiply va by vb and add double size result into: vo | vh | vl */
  4035. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  4036. __asm__ __volatile__ ( \
  4037. "mul a5, %[a], %[b] \n\t" \
  4038. "mulhu a6, %[a], %[b] \n\t" \
  4039. "add %[l], %[l], a5 \n\t" \
  4040. "sltu a7, %[l], a5 \n\t" \
  4041. "add %[h], %[h], a7 \n\t" \
  4042. "sltu a7, %[h], a7 \n\t" \
  4043. "add %[o], %[o], a7 \n\t" \
  4044. "add %[h], %[h], a6 \n\t" \
  4045. "sltu a7, %[h], a6 \n\t" \
  4046. "add %[o], %[o], a7 \n\t" \
  4047. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  4048. : [a] "r" (va), [b] "r" (vb) \
  4049. : "a5", "a6", "a7" \
  4050. )
  4051. /* Multiply va by vb and add double size result into: vh | vl */
  4052. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  4053. __asm__ __volatile__ ( \
  4054. "mul a5, %[a], %[b] \n\t" \
  4055. "mulhu a6, %[a], %[b] \n\t" \
  4056. "add %[l], %[l], a5 \n\t" \
  4057. "sltu a7, %[l], a5 \n\t" \
  4058. "add %[h], %[h], a6 \n\t" \
  4059. "add %[h], %[h], a7 \n\t" \
  4060. : [l] "+r" (vl), [h] "+r" (vh) \
  4061. : [a] "r" (va), [b] "r" (vb) \
  4062. : "a5", "a6", "a7" \
  4063. )
  4064. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  4065. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  4066. __asm__ __volatile__ ( \
  4067. "mul a5, %[a], %[b] \n\t" \
  4068. "mulhu a6, %[a], %[b] \n\t" \
  4069. "add %[l], %[l], a5 \n\t" \
  4070. "sltu a7, %[l], a5 \n\t" \
  4071. "add %[h], %[h], a7 \n\t" \
  4072. "sltu a7, %[h], a7 \n\t" \
  4073. "add %[o], %[o], a7 \n\t" \
  4074. "add %[h], %[h], a6 \n\t" \
  4075. "sltu a7, %[h], a6 \n\t" \
  4076. "add %[o], %[o], a7 \n\t" \
  4077. "add %[l], %[l], a5 \n\t" \
  4078. "sltu a7, %[l], a5 \n\t" \
  4079. "add %[h], %[h], a7 \n\t" \
  4080. "sltu a7, %[h], a7 \n\t" \
  4081. "add %[o], %[o], a7 \n\t" \
  4082. "add %[h], %[h], a6 \n\t" \
  4083. "sltu a7, %[h], a6 \n\t" \
  4084. "add %[o], %[o], a7 \n\t" \
  4085. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  4086. : [a] "r" (va), [b] "r" (vb) \
  4087. : "a5", "a6", "a7" \
  4088. )
  4089. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  4090. * Assumes first add will not overflow vh | vl
  4091. */
  4092. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  4093. __asm__ __volatile__ ( \
  4094. "mul a5, %[a], %[b] \n\t" \
  4095. "mulhu a6, %[a], %[b] \n\t" \
  4096. "add %[l], %[l], a5 \n\t" \
  4097. "sltu a7, %[l], a5 \n\t" \
  4098. "add %[h], %[h], a6 \n\t" \
  4099. "add %[h], %[h], a7 \n\t" \
  4100. "add %[l], %[l], a5 \n\t" \
  4101. "sltu a7, %[l], a5 \n\t" \
  4102. "add %[h], %[h], a7 \n\t" \
  4103. "sltu a7, %[h], a7 \n\t" \
  4104. "add %[o], %[o], a7 \n\t" \
  4105. "add %[h], %[h], a6 \n\t" \
  4106. "sltu a7, %[h], a6 \n\t" \
  4107. "add %[o], %[o], a7 \n\t" \
  4108. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  4109. : [a] "r" (va), [b] "r" (vb) \
  4110. : "a5", "a6", "a7" \
  4111. )
  4112. /* Square va and store double size result in: vh | vl */
  4113. #define SP_ASM_SQR(vl, vh, va) \
  4114. __asm__ __volatile__ ( \
  4115. "mul %[l], %[a], %[a] \n\t" \
  4116. "mulhu %[h], %[a], %[a] \n\t" \
  4117. : [h] "+r" (vh), [l] "+r" (vl) \
  4118. : [a] "r" (va) \
  4119. : "memory" \
  4120. )
  4121. /* Square va and add double size result into: vo | vh | vl */
  4122. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  4123. __asm__ __volatile__ ( \
  4124. "mul a5, %[a], %[a] \n\t" \
  4125. "mulhu a6, %[a], %[a] \n\t" \
  4126. "add %[l], %[l], a5 \n\t" \
  4127. "sltu a7, %[l], a5 \n\t" \
  4128. "add %[h], %[h], a7 \n\t" \
  4129. "sltu a7, %[h], a7 \n\t" \
  4130. "add %[o], %[o], a7 \n\t" \
  4131. "add %[h], %[h], a6 \n\t" \
  4132. "sltu a7, %[h], a6 \n\t" \
  4133. "add %[o], %[o], a7 \n\t" \
  4134. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  4135. : [a] "r" (va) \
  4136. : "a5", "a6", "a7" \
  4137. )
  4138. /* Square va and add double size result into: vh | vl */
  4139. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  4140. __asm__ __volatile__ ( \
  4141. "mul a5, %[a], %[a] \n\t" \
  4142. "mulhu a6, %[a], %[a] \n\t" \
  4143. "add %[l], %[l], a5 \n\t" \
  4144. "sltu a7, %[l], a5 \n\t" \
  4145. "add %[h], %[h], a6 \n\t" \
  4146. "add %[h], %[h], a7 \n\t" \
  4147. : [l] "+r" (vl), [h] "+r" (vh) \
  4148. : [a] "r" (va) \
  4149. : "a5", "a6", "a7" \
  4150. )
  4151. /* Add va into: vh | vl */
  4152. #define SP_ASM_ADDC(vl, vh, va) \
  4153. __asm__ __volatile__ ( \
  4154. "add %[l], %[l], %[a] \n\t" \
  4155. "sltu a7, %[l], %[a] \n\t" \
  4156. "add %[h], %[h], a7 \n\t" \
  4157. : [l] "+r" (vl), [h] "+r" (vh) \
  4158. : [a] "r" (va) \
  4159. : "a7" \
  4160. )
  4161. /* Sub va from: vh | vl */
  4162. #define SP_ASM_SUBB(vl, vh, va) \
  4163. __asm__ __volatile__ ( \
  4164. "add a7, %[l], zero \n\t" \
  4165. "sub %[l], a7, %[a] \n\t" \
  4166. "sltu a7, a7, %[l] \n\t" \
  4167. "sub %[h], %[h], a7 \n\t" \
  4168. : [l] "+r" (vl), [h] "+r" (vh) \
  4169. : [a] "r" (va) \
  4170. : "a7" \
  4171. )
  4172. /* Add two times vc | vb | va into vo | vh | vl */
  4173. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  4174. __asm__ __volatile__ ( \
  4175. "add %[l], %[l], %[a] \n\t" \
  4176. "sltu a7, %[l], %[a] \n\t" \
  4177. "add %[h], %[h], a7 \n\t" \
  4178. "sltu a7, %[h], a7 \n\t" \
  4179. "add %[o], %[o], a7 \n\t" \
  4180. "add %[h], %[h], %[b] \n\t" \
  4181. "sltu a7, %[h], %[b] \n\t" \
  4182. "add %[o], %[o], %[c] \n\t" \
  4183. "add %[o], %[o], a7 \n\t" \
  4184. "add %[l], %[l], %[a] \n\t" \
  4185. "sltu a7, %[l], %[a] \n\t" \
  4186. "add %[h], %[h], a7 \n\t" \
  4187. "sltu a7, %[h], a7 \n\t" \
  4188. "add %[o], %[o], a7 \n\t" \
  4189. "add %[h], %[h], %[b] \n\t" \
  4190. "sltu a7, %[h], %[b] \n\t" \
  4191. "add %[o], %[o], %[c] \n\t" \
  4192. "add %[o], %[o], a7 \n\t" \
  4193. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  4194. : [a] "r" (va), [b] "r" (vb), [c] "r" (vc) \
  4195. : "a7" \
  4196. )
  4197. #define SP_INT_ASM_AVAILABLE
  4198. #endif /* WOLFSSL_SP_RISCV32 && SP_WORD_SIZE == 32 */
  4199. #if defined(WOLFSSL_SP_S390X) && SP_WORD_SIZE == 64
  4200. /*
  4201. * CPU: Intel s390x
  4202. */
  4203. /* Multiply va by vb and store double size result in: vh | vl */
  4204. #define SP_ASM_MUL(vl, vh, va, vb) \
  4205. __asm__ __volatile__ ( \
  4206. "lgr %%r1, %[a] \n\t" \
  4207. "mlgr %%r0, %[b] \n\t" \
  4208. "lgr %[l], %%r1 \n\t" \
  4209. "lgr %[h], %%r0 \n\t" \
  4210. : [h] "+r" (vh), [l] "+r" (vl) \
  4211. : [a] "r" (va), [b] "r" (vb) \
  4212. : "memory", "r0", "r1" \
  4213. )
  4214. /* Multiply va by vb and store double size result in: vo | vh | vl */
  4215. #define SP_ASM_MUL_SET(vl, vh, vo, va, vb) \
  4216. __asm__ __volatile__ ( \
  4217. "lgr %%r1, %[a] \n\t" \
  4218. "mlgr %%r0, %[b] \n\t" \
  4219. "lghi %[o], 0 \n\t" \
  4220. "lgr %[l], %%r1 \n\t" \
  4221. "lgr %[h], %%r0 \n\t" \
  4222. : [l] "+r" (vl), [h] "+r" (vh), [o] "=r" (vo) \
  4223. : [a] "r" (va), [b] "r" (vb) \
  4224. : "r0", "r1" \
  4225. )
  4226. /* Multiply va by vb and add double size result into: vo | vh | vl */
  4227. #define SP_ASM_MUL_ADD(vl, vh, vo, va, vb) \
  4228. __asm__ __volatile__ ( \
  4229. "lghi %%r10, 0 \n\t" \
  4230. "lgr %%r1, %[a] \n\t" \
  4231. "mlgr %%r0, %[b] \n\t" \
  4232. "algr %[l], %%r1 \n\t" \
  4233. "alcgr %[h], %%r0 \n\t" \
  4234. "alcgr %[o], %%r10 \n\t" \
  4235. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  4236. : [a] "r" (va), [b] "r" (vb) \
  4237. : "r0", "r1", "r10", "cc" \
  4238. )
  4239. /* Multiply va by vb and add double size result into: vh | vl */
  4240. #define SP_ASM_MUL_ADD_NO(vl, vh, va, vb) \
  4241. __asm__ __volatile__ ( \
  4242. "lgr %%r1, %[a] \n\t" \
  4243. "mlgr %%r0, %[b] \n\t" \
  4244. "algr %[l], %%r1 \n\t" \
  4245. "alcgr %[h], %%r0 \n\t" \
  4246. : [l] "+r" (vl), [h] "+r" (vh) \
  4247. : [a] "r" (va), [b] "r" (vb) \
  4248. : "r0", "r1", "cc" \
  4249. )
  4250. /* Multiply va by vb and add double size result twice into: vo | vh | vl */
  4251. #define SP_ASM_MUL_ADD2(vl, vh, vo, va, vb) \
  4252. __asm__ __volatile__ ( \
  4253. "lghi %%r10, 0 \n\t" \
  4254. "lgr %%r1, %[a] \n\t" \
  4255. "mlgr %%r0, %[b] \n\t" \
  4256. "algr %[l], %%r1 \n\t" \
  4257. "alcgr %[h], %%r0 \n\t" \
  4258. "alcgr %[o], %%r10 \n\t" \
  4259. "algr %[l], %%r1 \n\t" \
  4260. "alcgr %[h], %%r0 \n\t" \
  4261. "alcgr %[o], %%r10 \n\t" \
  4262. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  4263. : [a] "r" (va), [b] "r" (vb) \
  4264. : "r0", "r1", "r10", "cc" \
  4265. )
  4266. /* Multiply va by vb and add double size result twice into: vo | vh | vl
  4267. * Assumes first add will not overflow vh | vl
  4268. */
  4269. #define SP_ASM_MUL_ADD2_NO(vl, vh, vo, va, vb) \
  4270. __asm__ __volatile__ ( \
  4271. "lghi %%r10, 0 \n\t" \
  4272. "lgr %%r1, %[a] \n\t" \
  4273. "mlgr %%r0, %[b] \n\t" \
  4274. "algr %[l], %%r1 \n\t" \
  4275. "alcgr %[h], %%r0 \n\t" \
  4276. "algr %[l], %%r1 \n\t" \
  4277. "alcgr %[h], %%r0 \n\t" \
  4278. "alcgr %[o], %%r10 \n\t" \
  4279. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  4280. : [a] "r" (va), [b] "r" (vb) \
  4281. : "r0", "r1", "r10", "cc" \
  4282. )
  4283. /* Square va and store double size result in: vh | vl */
  4284. #define SP_ASM_SQR(vl, vh, va) \
  4285. __asm__ __volatile__ ( \
  4286. "lgr %%r1, %[a] \n\t" \
  4287. "mlgr %%r0, %%r1 \n\t" \
  4288. "lgr %[l], %%r1 \n\t" \
  4289. "lgr %[h], %%r0 \n\t" \
  4290. : [h] "+r" (vh), [l] "+r" (vl) \
  4291. : [a] "r" (va) \
  4292. : "memory", "r0", "r1" \
  4293. )
  4294. /* Square va and add double size result into: vo | vh | vl */
  4295. #define SP_ASM_SQR_ADD(vl, vh, vo, va) \
  4296. __asm__ __volatile__ ( \
  4297. "lghi %%r10, 0 \n\t" \
  4298. "lgr %%r1, %[a] \n\t" \
  4299. "mlgr %%r0, %%r1 \n\t" \
  4300. "algr %[l], %%r1 \n\t" \
  4301. "alcgr %[h], %%r0 \n\t" \
  4302. "alcgr %[o], %%r10 \n\t" \
  4303. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  4304. : [a] "r" (va) \
  4305. : "r0", "r1", "r10", "cc" \
  4306. )
  4307. /* Square va and add double size result into: vh | vl */
  4308. #define SP_ASM_SQR_ADD_NO(vl, vh, va) \
  4309. __asm__ __volatile__ ( \
  4310. "lgr %%r1, %[a] \n\t" \
  4311. "mlgr %%r0, %%r1 \n\t" \
  4312. "algr %[l], %%r1 \n\t" \
  4313. "alcgr %[h], %%r0 \n\t" \
  4314. : [l] "+r" (vl), [h] "+r" (vh) \
  4315. : [a] "r" (va) \
  4316. : "r0", "r1", "cc" \
  4317. )
  4318. /* Add va into: vh | vl */
  4319. #define SP_ASM_ADDC(vl, vh, va) \
  4320. __asm__ __volatile__ ( \
  4321. "lghi %%r10, 0 \n\t" \
  4322. "algr %[l], %[a] \n\t" \
  4323. "alcgr %[h], %%r10 \n\t" \
  4324. : [l] "+r" (vl), [h] "+r" (vh) \
  4325. : [a] "r" (va) \
  4326. : "r10", "cc" \
  4327. )
  4328. /* Sub va from: vh | vl */
  4329. #define SP_ASM_SUBB(vl, vh, va) \
  4330. __asm__ __volatile__ ( \
  4331. "lghi %%r10, 0 \n\t" \
  4332. "slgr %[l], %[a] \n\t" \
  4333. "slbgr %[h], %%r10 \n\t" \
  4334. : [l] "+r" (vl), [h] "+r" (vh) \
  4335. : [a] "r" (va) \
  4336. : "r10", "cc" \
  4337. )
  4338. /* Add two times vc | vb | va into vo | vh | vl */
  4339. #define SP_ASM_ADD_DBL_3(vl, vh, vo, va, vb, vc) \
  4340. __asm__ __volatile__ ( \
  4341. "algr %[l], %[a] \n\t" \
  4342. "alcgr %[h], %[b] \n\t" \
  4343. "alcgr %[o], %[c] \n\t" \
  4344. "algr %[l], %[a] \n\t" \
  4345. "alcgr %[h], %[b] \n\t" \
  4346. "alcgr %[o], %[c] \n\t" \
  4347. : [l] "+r" (vl), [h] "+r" (vh), [o] "+r" (vo) \
  4348. : [a] "r" (va), [b] "r" (vb), [c] "r" (vc) \
  4349. : "cc" \
  4350. )
  4351. #define SP_INT_ASM_AVAILABLE
  4352. #endif /* WOLFSSL_SP_S390X && SP_WORD_SIZE == 64 */
  4353. #ifdef SP_INT_ASM_AVAILABLE
  4354. #ifndef SP_INT_NO_ASM
  4355. #define SQR_MUL_ASM
  4356. #endif
  4357. #ifndef SP_ASM_ADDC_REG
  4358. #define SP_ASM_ADDC_REG SP_ASM_ADDC
  4359. #endif /* SP_ASM_ADDC_REG */
  4360. #ifndef SP_ASM_SUBB_REG
  4361. #define SP_ASM_SUBB_REG SP_ASM_SUBB
  4362. #endif /* SP_ASM_ADDC_REG */
  4363. #endif /* SQR_MUL_ASM */
  4364. #endif /* !WOLFSSL_NO_ASM */
  4365. #if (!defined(NO_RSA) && !defined(WOLFSSL_RSA_PUBLIC_ONLY)) || \
  4366. !defined(NO_DSA) || !defined(NO_DH) || \
  4367. (defined(HAVE_ECC) && defined(HAVE_COMP_KEY)) || defined(OPENSSL_EXTRA) || \
  4368. (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_PUBLIC_ONLY))
  4369. #ifndef WC_NO_CACHE_RESISTANT
  4370. /* Mask of address for constant time operations. */
  4371. const size_t sp_off_on_addr[2] =
  4372. {
  4373. (size_t) 0,
  4374. (size_t)-1
  4375. };
  4376. #endif
  4377. #endif
  4378. #if defined(WOLFSSL_HAVE_SP_DH) || defined(WOLFSSL_HAVE_SP_RSA)
  4379. #ifdef __cplusplus
  4380. extern "C" {
  4381. #endif
  4382. /* Modular exponentiation implementations using Single Precision. */
  4383. WOLFSSL_LOCAL int sp_ModExp_1024(sp_int* base, sp_int* exp, sp_int* mod,
  4384. sp_int* res);
  4385. WOLFSSL_LOCAL int sp_ModExp_1536(sp_int* base, sp_int* exp, sp_int* mod,
  4386. sp_int* res);
  4387. WOLFSSL_LOCAL int sp_ModExp_2048(sp_int* base, sp_int* exp, sp_int* mod,
  4388. sp_int* res);
  4389. WOLFSSL_LOCAL int sp_ModExp_3072(sp_int* base, sp_int* exp, sp_int* mod,
  4390. sp_int* res);
  4391. WOLFSSL_LOCAL int sp_ModExp_4096(sp_int* base, sp_int* exp, sp_int* mod,
  4392. sp_int* res);
  4393. #ifdef __cplusplus
  4394. } /* extern "C" */
  4395. #endif
  4396. #endif /* WOLFSSL_HAVE_SP_DH || WOLFSSL_HAVE_SP_RSA */
  4397. #if defined(WOLFSSL_SP_MATH_ALL) || defined(WOLFSSL_HAVE_SP_DH) || \
  4398. defined(OPENSSL_ALL)
  4399. static int _sp_mont_red(sp_int* a, const sp_int* m, sp_int_digit mp);
  4400. #endif
  4401. #if defined(WOLFSSL_SP_MATH_ALL) || defined(WOLFSSL_HAVE_SP_DH) || \
  4402. defined(WOLFCRYPT_HAVE_ECCSI) || defined(WOLFCRYPT_HAVE_SAKKE) || \
  4403. defined(OPENSSL_ALL)
  4404. static void _sp_mont_setup(const sp_int* m, sp_int_digit* rho);
  4405. #endif
  4406. /* Determine when mp_add_d is required. */
  4407. #if !defined(NO_PWDBASED) || defined(WOLFSSL_KEY_GEN) || !defined(NO_DH) || \
  4408. !defined(NO_DSA) || \
  4409. (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  4410. defined(OPENSSL_EXTRA)
  4411. #define WOLFSSL_SP_ADD_D
  4412. #endif
  4413. /* Determine when mp_sub_d is required. */
  4414. #if (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  4415. !defined(NO_DH) || defined(HAVE_ECC) || !defined(NO_DSA)
  4416. #define WOLFSSL_SP_SUB_D
  4417. #endif
  4418. /* Determine when mp_read_radix with a radix of 10 is required. */
  4419. #if defined(WOLFSSL_SP_MATH_ALL) && !defined(NO_RSA) && \
  4420. !defined(WOLFSSL_RSA_VERIFY_ONLY)
  4421. #define WOLFSSL_SP_READ_RADIX_10
  4422. #endif
  4423. /* Determine when mp_invmod is required. */
  4424. #if defined(HAVE_ECC) || !defined(NO_DSA) || defined(OPENSSL_EXTRA) || \
  4425. (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY) && \
  4426. !defined(WOLFSSL_RSA_PUBLIC_ONLY))
  4427. #define WOLFSSL_SP_INVMOD
  4428. #endif
  4429. /* Determine when mp_invmod_mont_ct is required. */
  4430. #if defined(WOLFSSL_SP_MATH_ALL) && defined(HAVE_ECC)
  4431. #define WOLFSSL_SP_INVMOD_MONT_CT
  4432. #endif
  4433. /* Determine when mp_prime_gen is required. */
  4434. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY) && \
  4435. !defined(WOLFSSL_RSA_PUBLIC_ONLY)) || !defined(NO_DH) || \
  4436. (!defined(NO_RSA) && defined(WOLFSSL_KEY_GEN))
  4437. #define WOLFSSL_SP_PRIME_GEN
  4438. #endif
  4439. /* Set the multi-precision number to zero.
  4440. *
  4441. * Assumes a is not NULL.
  4442. *
  4443. * @param [out] a SP integer to set to zero.
  4444. */
  4445. static void _sp_zero(sp_int* a)
  4446. {
  4447. sp_int_minimal* am = (sp_int_minimal *)a;
  4448. am->used = 0;
  4449. am->dp[0] = 0;
  4450. #ifdef WOLFSSL_SP_INT_NEGATIVE
  4451. am->sign = MP_ZPOS;
  4452. #endif
  4453. }
  4454. /* Initialize the multi-precision number to be zero with a given max size.
  4455. *
  4456. * @param [out] a SP integer.
  4457. * @param [in] size Number of words to say are available.
  4458. */
  4459. static void _sp_init_size(sp_int* a, int size)
  4460. {
  4461. volatile sp_int_minimal* am = (sp_int_minimal *)a;
  4462. #ifdef HAVE_WOLF_BIGINT
  4463. wc_bigint_init((struct WC_BIGINT*)&am->raw);
  4464. #endif
  4465. _sp_zero((sp_int*)am);
  4466. am->size = size;
  4467. }
  4468. /* Initialize the multi-precision number to be zero with a given max size.
  4469. *
  4470. * @param [out] a SP integer.
  4471. * @param [in] size Number of words to say are available.
  4472. *
  4473. * @return MP_OKAY on success.
  4474. * @return MP_VAL when a is NULL.
  4475. */
  4476. int sp_init_size(sp_int* a, int size)
  4477. {
  4478. int err = MP_OKAY;
  4479. /* Validate parameters. Don't use size more than max compiled. */
  4480. if ((a == NULL) || ((size <= 0) || (size > SP_INT_DIGITS))) {
  4481. err = MP_VAL;
  4482. }
  4483. if (err == MP_OKAY) {
  4484. _sp_init_size(a, size);
  4485. }
  4486. return err;
  4487. }
  4488. /* Initialize the multi-precision number to be zero.
  4489. *
  4490. * @param [out] a SP integer.
  4491. *
  4492. * @return MP_OKAY on success.
  4493. * @return MP_VAL when a is NULL.
  4494. */
  4495. int sp_init(sp_int* a)
  4496. {
  4497. int err = MP_OKAY;
  4498. /* Validate parameter. */
  4499. if (a == NULL) {
  4500. err = MP_VAL;
  4501. }
  4502. else {
  4503. /* Assume complete sp_int with SP_INT_DIGITS digits. */
  4504. _sp_init_size(a, SP_INT_DIGITS);
  4505. }
  4506. return err;
  4507. }
  4508. #if !defined(WOLFSSL_RSA_PUBLIC_ONLY) || !defined(NO_DH) || defined(HAVE_ECC)
  4509. /* Initialize up to six multi-precision numbers to be zero.
  4510. *
  4511. * @param [out] n1 SP integer.
  4512. * @param [out] n2 SP integer.
  4513. * @param [out] n3 SP integer.
  4514. * @param [out] n4 SP integer.
  4515. * @param [out] n5 SP integer.
  4516. * @param [out] n6 SP integer.
  4517. *
  4518. * @return MP_OKAY on success.
  4519. */
  4520. int sp_init_multi(sp_int* n1, sp_int* n2, sp_int* n3, sp_int* n4, sp_int* n5,
  4521. sp_int* n6)
  4522. {
  4523. /* Initialize only those pointers that are valid. */
  4524. if (n1 != NULL) {
  4525. _sp_init_size(n1, SP_INT_DIGITS);
  4526. }
  4527. if (n2 != NULL) {
  4528. _sp_init_size(n2, SP_INT_DIGITS);
  4529. }
  4530. if (n3 != NULL) {
  4531. _sp_init_size(n3, SP_INT_DIGITS);
  4532. }
  4533. if (n4 != NULL) {
  4534. _sp_init_size(n4, SP_INT_DIGITS);
  4535. }
  4536. if (n5 != NULL) {
  4537. _sp_init_size(n5, SP_INT_DIGITS);
  4538. }
  4539. if (n6 != NULL) {
  4540. _sp_init_size(n6, SP_INT_DIGITS);
  4541. }
  4542. return MP_OKAY;
  4543. }
  4544. #endif /* !WOLFSSL_RSA_PUBLIC_ONLY || !NO_DH || HAVE_ECC */
  4545. /* Free the memory allocated in the multi-precision number.
  4546. *
  4547. * @param [in] a SP integer.
  4548. */
  4549. void sp_free(sp_int* a)
  4550. {
  4551. if (a != NULL) {
  4552. #ifdef HAVE_WOLF_BIGINT
  4553. wc_bigint_free(&a->raw);
  4554. #endif
  4555. }
  4556. }
  4557. #if !defined(WOLFSSL_RSA_VERIFY_ONLY) || !defined(NO_DH) || defined(HAVE_ECC)
  4558. /* Grow multi-precision number to be able to hold l digits.
  4559. * This function does nothing as the number of digits is fixed.
  4560. *
  4561. * @param [in,out] a SP integer.
  4562. * @param [in] l Number of digits to grow to.
  4563. *
  4564. * @return MP_OKAY on success
  4565. * @return MP_MEM if the number of digits requested is more than available.
  4566. */
  4567. int sp_grow(sp_int* a, int l)
  4568. {
  4569. int err = MP_OKAY;
  4570. /* Validate parameter. */
  4571. if (a == NULL) {
  4572. err = MP_VAL;
  4573. }
  4574. /* Ensure enough words allocated for grow. */
  4575. if ((err == MP_OKAY) && (l > a->size)) {
  4576. err = MP_MEM;
  4577. }
  4578. if (err == MP_OKAY) {
  4579. int i;
  4580. /* Put in zeros up to the new length. */
  4581. for (i = a->used; i < l; i++) {
  4582. a->dp[i] = 0;
  4583. }
  4584. }
  4585. return err;
  4586. }
  4587. #endif /* !WOLFSSL_RSA_VERIFY_ONLY || !NO_DH || HAVE_ECC */
  4588. #if !defined(WOLFSSL_RSA_VERIFY_ONLY) || defined(HAVE_ECC)
  4589. /* Set the multi-precision number to zero.
  4590. *
  4591. * @param [out] a SP integer to set to zero.
  4592. */
  4593. void sp_zero(sp_int* a)
  4594. {
  4595. /* Make an sp_int with valid pointer zero. */
  4596. if (a != NULL) {
  4597. _sp_zero(a);
  4598. }
  4599. }
  4600. #endif /* !WOLFSSL_RSA_VERIFY_ONLY */
  4601. /* Clear the data from the multi-precision number, set to zero and free.
  4602. *
  4603. * @param [out] a SP integer.
  4604. */
  4605. void sp_clear(sp_int* a)
  4606. {
  4607. /* Clear when valid pointer passed in. */
  4608. if (a != NULL) {
  4609. int i;
  4610. /* Only clear the digits being used. */
  4611. for (i = 0; i < a->used; i++) {
  4612. a->dp[i] = 0;
  4613. }
  4614. /* Set back to zero and free. */
  4615. _sp_zero(a);
  4616. sp_free(a);
  4617. }
  4618. }
  4619. #if !defined(NO_RSA) || !defined(NO_DH) || defined(HAVE_ECC) || \
  4620. !defined(NO_DSA) || defined(WOLFSSL_SP_PRIME_GEN)
  4621. /* Ensure the data in the multi-precision number is zeroed.
  4622. *
  4623. * Use when security sensitive data needs to be wiped.
  4624. *
  4625. * @param [in] a SP integer.
  4626. */
  4627. void sp_forcezero(sp_int* a)
  4628. {
  4629. /* Zeroize when a vald pointer passed in. */
  4630. if (a != NULL) {
  4631. /* Ensure all data zeroized - data not zeroed when used decreases. */
  4632. ForceZero(a->dp, a->size * SP_WORD_SIZEOF);
  4633. /* Set back to zero. */
  4634. #ifdef HAVE_WOLF_BIGINT
  4635. /* Zeroize the raw data as well. */
  4636. wc_bigint_zero(&a->raw);
  4637. #endif
  4638. /* Make value zero and free. */
  4639. _sp_zero(a);
  4640. sp_free(a);
  4641. }
  4642. }
  4643. #endif /* !WOLFSSL_RSA_VERIFY_ONLY || !NO_DH || HAVE_ECC */
  4644. #if defined(WOLFSSL_SP_MATH_ALL) || !defined(NO_DH) || defined(HAVE_ECC) || \
  4645. !defined(NO_RSA) || defined(WOLFSSL_KEY_GEN) || defined(HAVE_COMP_KEY)
  4646. /* Copy value of multi-precision number a into r.
  4647. *
  4648. * @param [in] a SP integer - source.
  4649. * @param [out] r SP integer - destination.
  4650. *
  4651. * @return MP_OKAY on success.
  4652. */
  4653. int sp_copy(const sp_int* a, sp_int* r)
  4654. {
  4655. int err = MP_OKAY;
  4656. /* Validate parameters. */
  4657. if ((a == NULL) || (r == NULL)) {
  4658. err = MP_VAL;
  4659. }
  4660. /* Only copy if different pointers. */
  4661. else if (a != r) {
  4662. /* Validated space in result. */
  4663. if (a->used > r->size) {
  4664. err = MP_VAL;
  4665. }
  4666. else {
  4667. /* Copy words across. */
  4668. if (a->used == 0) {
  4669. r->dp[0] = 0;
  4670. }
  4671. else {
  4672. XMEMCPY(r->dp, a->dp, a->used * SP_WORD_SIZEOF);
  4673. }
  4674. /* Set number of used words in result. */
  4675. r->used = a->used;
  4676. #ifdef WOLFSSL_SP_INT_NEGATIVE
  4677. /* Set sign of result. */
  4678. r->sign = a->sign;
  4679. #endif
  4680. }
  4681. }
  4682. return err;
  4683. }
  4684. #endif
  4685. #if defined(WOLFSSL_SP_MATH_ALL) || (defined(HAVE_ECC) && defined(FP_ECC))
  4686. /* Initializes r and copies in value from a.
  4687. *
  4688. * @param [out] r SP integer - destination.
  4689. * @param [in] a SP integer - source.
  4690. *
  4691. * @return MP_OKAY on success.
  4692. * @return MP_VAL when a or r is NULL.
  4693. */
  4694. int sp_init_copy(sp_int* r, const sp_int* a)
  4695. {
  4696. int err;
  4697. /* Initialize r and copy value in a into it. */
  4698. err = sp_init(r);
  4699. if (err == MP_OKAY) {
  4700. err = sp_copy(a, r);
  4701. }
  4702. return err;
  4703. }
  4704. #endif /* WOLFSSL_SP_MATH_ALL || (HAVE_ECC && FP_ECC) */
  4705. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  4706. !defined(NO_DH) || !defined(NO_DSA)
  4707. /* Exchange the values in a and b.
  4708. *
  4709. * Avoid using this API as three copy operations are performed.
  4710. *
  4711. * @param [in,out] a SP integer to swap.
  4712. * @param [in,out] b SP integer to swap.
  4713. *
  4714. * @return MP_OKAY on success.
  4715. * @return MP_VAL when a or b is NULL.
  4716. * @return MP_MEM when dynamic memory allocation fails.
  4717. */
  4718. int sp_exch(sp_int* a, sp_int* b)
  4719. {
  4720. int err = MP_OKAY;
  4721. DECL_SP_INT(t, (a != NULL) ? a->used : 1);
  4722. /* Validate parameters. */
  4723. if ((a == NULL) || (b == NULL)) {
  4724. err = MP_VAL;
  4725. }
  4726. /* Check space for a in b and b in a. */
  4727. if ((err == MP_OKAY) && ((a->size < b->used) || (b->size < a->used))) {
  4728. err = MP_VAL;
  4729. }
  4730. /* Create temporary for swapping. */
  4731. ALLOC_SP_INT(t, a->used, err, NULL);
  4732. if (err == MP_OKAY) {
  4733. /* Cache allocated size of a and b. */
  4734. int asize = a->size;
  4735. int bsize = b->size;
  4736. /* Copy all of SP int: t <= a, a <= b, b <= t. */
  4737. XMEMCPY(t, a, MP_INT_SIZEOF(a->used));
  4738. XMEMCPY(a, b, MP_INT_SIZEOF(b->used));
  4739. XMEMCPY(b, t, MP_INT_SIZEOF(t->used));
  4740. /* Put back size of a and b. */
  4741. a->size = asize;
  4742. b->size = bsize;
  4743. }
  4744. FREE_SP_INT(t, NULL);
  4745. return err;
  4746. }
  4747. #endif /* (WOLFSSL_SP_MATH_ALL && !WOLFSSL_RSA_VERIFY_ONLY) || !NO_DH ||
  4748. * !NO_DSA */
  4749. #if defined(HAVE_ECC) && defined(ECC_TIMING_RESISTANT) && \
  4750. !defined(WC_NO_CACHE_RESISTANT)
  4751. /* Conditional swap of SP int values in constant time.
  4752. *
  4753. * @param [in] a First SP int to conditionally swap.
  4754. * @param [in] b Second SP int to conditionally swap.
  4755. * @param [in] cnt Count of words to copy.
  4756. * @param [in] swap When value is 1 then swap.
  4757. * @return MP_OKAY on success.
  4758. * @return MP_MEM when dynamic memory allocation fails.
  4759. */
  4760. int sp_cond_swap_ct(sp_int* a, sp_int* b, int cnt, int swap)
  4761. {
  4762. int i;
  4763. int err = MP_OKAY;
  4764. sp_int_digit mask = (sp_int_digit)0 - swap;
  4765. DECL_SP_INT(t, cnt);
  4766. /* Allocate temporary to hold masked xor of a and b. */
  4767. ALLOC_SP_INT(t, cnt, err, NULL);
  4768. if (err == MP_OKAY) {
  4769. /* XOR other fields in sp_int into temp - mask set when swapping. */
  4770. t->used = (int)((a->used ^ b->used) & mask);
  4771. #ifdef WOLFSSL_SP_INT_NEGATIVE
  4772. t->sign = (int)((a->sign ^ b->sign) & mask);
  4773. #endif
  4774. /* XOR requested words into temp - mask set when swapping. */
  4775. for (i = 0; i < cnt; i++) {
  4776. t->dp[i] = (a->dp[i] ^ b->dp[i]) & mask;
  4777. }
  4778. /* XOR temporary - when mask set then result will be b. */
  4779. a->used ^= t->used;
  4780. #ifdef WOLFSSL_SP_INT_NEGATIVE
  4781. a->sign ^= t->sign;
  4782. #endif
  4783. for (i = 0; i < cnt; i++) {
  4784. a->dp[i] ^= t->dp[i];
  4785. }
  4786. /* XOR temporary - when mask set then result will be a. */
  4787. b->used ^= t->used;
  4788. #ifdef WOLFSSL_SP_INT_NEGATIVE
  4789. b->sign ^= b->sign;
  4790. #endif
  4791. for (i = 0; i < cnt; i++) {
  4792. b->dp[i] ^= t->dp[i];
  4793. }
  4794. }
  4795. FREE_SP_INT(t, NULL);
  4796. return err;
  4797. }
  4798. #endif /* HAVE_ECC && ECC_TIMING_RESISTANT && !WC_NO_CACHE_RESISTANT */
  4799. #ifdef WOLFSSL_SP_INT_NEGATIVE
  4800. /* Calculate the absolute value of the multi-precision number.
  4801. *
  4802. * @param [in] a SP integer to calculate absolute value of.
  4803. * @param [out] r SP integer to hold result.
  4804. *
  4805. * @return MP_OKAY on success.
  4806. * @return MP_VAL when a or r is NULL.
  4807. */
  4808. int sp_abs(const sp_int* a, sp_int* r)
  4809. {
  4810. int err;
  4811. /* Copy a into r - copy fails when r is NULL. */
  4812. err = sp_copy(a, r);
  4813. if (err == MP_OKAY) {
  4814. r->sign = MP_ZPOS;
  4815. }
  4816. return err;
  4817. }
  4818. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  4819. #if defined(WOLFSSL_SP_MATH_ALL) || !defined(NO_DH) || defined(HAVE_ECC) || \
  4820. (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY))
  4821. /* Compare absolute value of two multi-precision numbers.
  4822. *
  4823. * @param [in] a SP integer.
  4824. * @param [in] b SP integer.
  4825. *
  4826. * @return MP_GT when a is greater than b.
  4827. * @return MP_LT when a is less than b.
  4828. * @return MP_EQ when a is equals b.
  4829. */
  4830. static int _sp_cmp_abs(const sp_int* a, const sp_int* b)
  4831. {
  4832. int ret = MP_EQ;
  4833. /* Check number of words first. */
  4834. if (a->used > b->used) {
  4835. ret = MP_GT;
  4836. }
  4837. else if (a->used < b->used) {
  4838. ret = MP_LT;
  4839. }
  4840. else {
  4841. int i;
  4842. /* Starting from most significant word, compare words.
  4843. * Stop when different and set comparison return.
  4844. */
  4845. for (i = a->used - 1; i >= 0; i--) {
  4846. if (a->dp[i] > b->dp[i]) {
  4847. ret = MP_GT;
  4848. break;
  4849. }
  4850. else if (a->dp[i] < b->dp[i]) {
  4851. ret = MP_LT;
  4852. break;
  4853. }
  4854. }
  4855. /* If we made to the end then ret is MP_EQ from initialization. */
  4856. }
  4857. return ret;
  4858. }
  4859. #endif
  4860. #if defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_PUBLIC_ONLY)
  4861. /* Compare absolute value of two multi-precision numbers.
  4862. *
  4863. * Pointers are compared such that NULL is less than not NULL.
  4864. *
  4865. * @param [in] a SP integer.
  4866. * @param [in] b SP integer.
  4867. *
  4868. * @return MP_GT when a is greater than b.
  4869. * @return MP_LT when a is less than b.
  4870. * @return MP_EQ when a equals b.
  4871. */
  4872. int sp_cmp_mag(const sp_int* a, const sp_int* b)
  4873. {
  4874. int ret;
  4875. /* Do pointer checks first. Both NULL returns equal. */
  4876. if (a == b) {
  4877. ret = MP_EQ;
  4878. }
  4879. /* Nothing is smaller than something. */
  4880. else if (a == NULL) {
  4881. ret = MP_LT;
  4882. }
  4883. /* Something is larger than nothing. */
  4884. else if (b == NULL) {
  4885. ret = MP_GT;
  4886. }
  4887. else
  4888. {
  4889. /* Compare values - a and b are not NULL. */
  4890. ret = _sp_cmp_abs(a, b);
  4891. }
  4892. return ret;
  4893. }
  4894. #endif
  4895. #if defined(WOLFSSL_SP_MATH_ALL) || defined(HAVE_ECC) || !defined(NO_DSA) || \
  4896. defined(OPENSSL_EXTRA) || !defined(NO_DH) || \
  4897. (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY))
  4898. /* Compare two multi-precision numbers.
  4899. *
  4900. * Assumes a and b are not NULL.
  4901. *
  4902. * @param [in] a SP integer.
  4903. * @param [in] a SP integer.
  4904. *
  4905. * @return MP_GT when a is greater than b.
  4906. * @return MP_LT when a is less than b.
  4907. * @return MP_EQ when a is equals b.
  4908. */
  4909. static int _sp_cmp(const sp_int* a, const sp_int* b)
  4910. {
  4911. int ret;
  4912. #ifdef WOLFSSL_SP_INT_NEGATIVE
  4913. /* Check sign first. */
  4914. if (a->sign > b->sign) {
  4915. ret = MP_LT;
  4916. }
  4917. else if (a->sign < b->sign) {
  4918. ret = MP_GT;
  4919. }
  4920. else /* (a->sign == b->sign) */ {
  4921. #endif
  4922. /* Compare values. */
  4923. ret = _sp_cmp_abs(a, b);
  4924. #ifdef WOLFSSL_SP_INT_NEGATIVE
  4925. if (a->sign == MP_NEG) {
  4926. /* MP_GT = 1, MP_LT = -1, MP_EQ = 0
  4927. * Swapping MP_GT and MP_LT results.
  4928. */
  4929. ret = -ret;
  4930. }
  4931. }
  4932. #endif
  4933. return ret;
  4934. }
  4935. #endif
  4936. #if (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  4937. !defined(NO_DSA) || defined(HAVE_ECC) || !defined(NO_DH) || \
  4938. defined(WOLFSSL_SP_MATH_ALL)
  4939. /* Compare two multi-precision numbers.
  4940. *
  4941. * Pointers are compared such that NULL is less than not NULL.
  4942. *
  4943. * @param [in] a SP integer.
  4944. * @param [in] a SP integer.
  4945. *
  4946. * @return MP_GT when a is greater than b.
  4947. * @return MP_LT when a is less than b.
  4948. * @return MP_EQ when a is equals b.
  4949. */
  4950. int sp_cmp(const sp_int* a, const sp_int* b)
  4951. {
  4952. int ret;
  4953. /* Check pointers first. Both NULL returns equal. */
  4954. if (a == b) {
  4955. ret = MP_EQ;
  4956. }
  4957. /* Nothing is smaller than something. */
  4958. else if (a == NULL) {
  4959. ret = MP_LT;
  4960. }
  4961. /* Something is larger than nothing. */
  4962. else if (b == NULL) {
  4963. ret = MP_GT;
  4964. }
  4965. else
  4966. {
  4967. /* Compare values - a and b are not NULL. */
  4968. ret = _sp_cmp(a, b);
  4969. }
  4970. return ret;
  4971. }
  4972. #endif
  4973. /*************************
  4974. * Bit check/set functions
  4975. *************************/
  4976. #if !defined(WOLFSSL_RSA_VERIFY_ONLY) || (defined(WOLFSSL_SP_MATH_ALL) && \
  4977. defined(HAVE_ECC))
  4978. /* Check if a bit is set
  4979. *
  4980. * When a is NULL, result is 0.
  4981. *
  4982. * @param [in] a SP integer.
  4983. * @param [in] b Bit position to check.
  4984. *
  4985. * @return 0 when bit is not set.
  4986. * @return 1 when bit is set.
  4987. */
  4988. int sp_is_bit_set(const sp_int* a, unsigned int b)
  4989. {
  4990. int ret = 0;
  4991. /* Index of word. */
  4992. int i = (int)(b >> SP_WORD_SHIFT);
  4993. /* Check parameters. */
  4994. if ((a != NULL) && (i < a->used)) {
  4995. /* Shift amount to get bit down to index 0. */
  4996. int s = (int)(b & SP_WORD_MASK);
  4997. /* Get and mask bit. */
  4998. ret = (int)((a->dp[i] >> s) & (sp_int_digit)1);
  4999. }
  5000. return ret;
  5001. }
  5002. #endif /* WOLFSSL_RSA_VERIFY_ONLY */
  5003. /* Count the number of bits in the multi-precision number.
  5004. *
  5005. * When a is NULL, result is 0.
  5006. *
  5007. * @param [in] a SP integer.
  5008. *
  5009. * @return Number of bits in the SP integer value.
  5010. */
  5011. int sp_count_bits(const sp_int* a)
  5012. {
  5013. int n = 0;
  5014. /* Check parameter. */
  5015. if (a != NULL) {
  5016. /* Get index of last word. */
  5017. n = a->used - 1;
  5018. /* Don't count leading zeros. */
  5019. while ((n >= 0) && (a->dp[n] == 0)) {
  5020. n--;
  5021. }
  5022. /* -1 indicates SP integer value was zero. */
  5023. if (n < 0) {
  5024. n = 0;
  5025. }
  5026. else {
  5027. sp_int_digit d;
  5028. /* Get the most significant word. */
  5029. d = a->dp[n];
  5030. /* Count of bits up to last word. */
  5031. n *= SP_WORD_SIZE;
  5032. /* Check if top word has more than half the bits set. */
  5033. if (d > SP_HALF_MAX) {
  5034. /* Set count to a full last word. */
  5035. n += SP_WORD_SIZE;
  5036. /* Don't count leading zero bits. */
  5037. while ((d & ((sp_int_digit)1 << (SP_WORD_SIZE - 1))) == 0) {
  5038. n--;
  5039. d <<= 1;
  5040. }
  5041. }
  5042. else {
  5043. /* Add to count until highest set bit is shifted out. */
  5044. while (d != 0) {
  5045. n++;
  5046. d >>= 1;
  5047. }
  5048. }
  5049. }
  5050. }
  5051. return n;
  5052. }
  5053. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY) && \
  5054. !defined(WOLFSSL_RSA_PUBLIC_ONLY)) || !defined(NO_DH) || \
  5055. (defined(HAVE_ECC) && defined(FP_ECC)) || \
  5056. (!defined(NO_RSA) && defined(WOLFSSL_KEY_GEN))
  5057. /* Number of entries in array of number of least significant zero bits. */
  5058. #define SP_LNZ_CNT 16
  5059. /* Number of bits the array checks. */
  5060. #define SP_LNZ_BITS 4
  5061. /* Mask to apply to check with array. */
  5062. #define SP_LNZ_MASK 0xf
  5063. /* Number of least significant zero bits in first SP_LNZ_CNT numbers. */
  5064. static const int sp_lnz[SP_LNZ_CNT] = {
  5065. 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
  5066. };
  5067. /* Count the number of least significant zero bits.
  5068. *
  5069. * When a is not NULL, result is 0.
  5070. *
  5071. * @param [in] a SP integer to use.
  5072. *
  5073. * @return Number of least significant zero bits.
  5074. */
  5075. #if !defined(HAVE_ECC) || !defined(HAVE_COMP_KEY)
  5076. static
  5077. #endif /* !HAVE_ECC || HAVE_COMP_KEY */
  5078. int sp_cnt_lsb(const sp_int* a)
  5079. {
  5080. int bc = 0;
  5081. /* Check for number with a value. */
  5082. if ((a != NULL) && (!sp_iszero(a))) {
  5083. int i;
  5084. int j;
  5085. /* Count least significant words that are zero. */
  5086. for (i = 0; i < a->used && a->dp[i] == 0; i++, bc += SP_WORD_SIZE) {
  5087. }
  5088. /* Use 4-bit table to get count. */
  5089. for (j = 0; j < SP_WORD_SIZE; j += SP_LNZ_BITS) {
  5090. /* Get number of lesat significant 0 bits in nibble. */
  5091. int cnt = sp_lnz[(a->dp[i] >> j) & SP_LNZ_MASK];
  5092. /* Done if not all 4 bits are zero. */
  5093. if (cnt != 4) {
  5094. /* Add checked bits and count in last 4 bits checked. */
  5095. bc += j + cnt;
  5096. break;
  5097. }
  5098. }
  5099. }
  5100. return bc;
  5101. }
  5102. #endif /* WOLFSSL_SP_MATH_ALL || WOLFSSL_HAVE_SP_DH || (HAVE_ECC && FP_ECC) */
  5103. #if !defined(WOLFSSL_RSA_VERIFY_ONLY) || \
  5104. (defined(WOLFSSL_SP_MATH_ALL) && !defined(NO_ASN))
  5105. /* Determine if the most significant byte of the encoded multi-precision number
  5106. * has the top bit set.
  5107. *
  5108. * When a is NULL, result is 0.
  5109. *
  5110. * @param [in] a SP integer.
  5111. *
  5112. * @return 1 when the top bit of top byte is set.
  5113. * @return 0 when the top bit of top byte is not set.
  5114. */
  5115. int sp_leading_bit(const sp_int* a)
  5116. {
  5117. int bit = 0;
  5118. /* Check if we have a number and value to use. */
  5119. if ((a != NULL) && (a->used > 0)) {
  5120. /* Get top word. */
  5121. sp_int_digit d = a->dp[a->used - 1];
  5122. #if SP_WORD_SIZE > 8
  5123. /* Remove bottom 8 bits until highest 8 bits left. */
  5124. while (d > (sp_int_digit)0xff) {
  5125. d >>= 8;
  5126. }
  5127. #endif
  5128. /* Get the highest bit of the 8-bit value. */
  5129. bit = (int)(d >> 7);
  5130. }
  5131. return bit;
  5132. }
  5133. #endif /* !WOLFSSL_RSA_VERIFY_ONLY */
  5134. #if defined(WOLFSSL_SP_MATH_ALL) || defined(WOLFSSL_HAVE_SP_DH) || \
  5135. defined(HAVE_ECC) || defined(WOLFSSL_KEY_GEN) || defined(OPENSSL_EXTRA) || \
  5136. !defined(NO_RSA)
  5137. /* Set one bit of a: a |= 1 << i
  5138. * The field 'used' is updated in a.
  5139. *
  5140. * @param [in,out] a SP integer to set bit into.
  5141. * @param [in] i Index of bit to set.
  5142. *
  5143. * @return MP_OKAY on success.
  5144. * @return MP_VAL when a is NULL or index is too large.
  5145. */
  5146. int sp_set_bit(sp_int* a, int i)
  5147. {
  5148. int err = MP_OKAY;
  5149. /* Get index of word to set. */
  5150. int w = (int)(i >> SP_WORD_SHIFT);
  5151. /* Check for valid number and and space for bit. */
  5152. if ((a == NULL) || (w >= a->size)) {
  5153. err = MP_VAL;
  5154. }
  5155. if (err == MP_OKAY) {
  5156. /* Amount to shift up to set bit in word. */
  5157. int s = (int)(i & (SP_WORD_SIZE - 1));
  5158. int j;
  5159. /* Set to zero all unused words up to and including word to have bit
  5160. * set.
  5161. */
  5162. for (j = a->used; j <= w; j++) {
  5163. a->dp[j] = 0;
  5164. }
  5165. /* Set bit in word. */
  5166. a->dp[w] |= (sp_int_digit)1 << s;
  5167. /* Update used if necessary */
  5168. if (a->used <= w) {
  5169. a->used = w + 1;
  5170. }
  5171. }
  5172. return err;
  5173. }
  5174. #endif /* WOLFSSL_SP_MATH_ALL || WOLFSSL_HAVE_SP_DH || HAVE_ECC ||
  5175. * WOLFSSL_KEY_GEN || OPENSSL_EXTRA || !NO_RSA */
  5176. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  5177. defined(WOLFSSL_KEY_GEN) || !defined(NO_DH)
  5178. /* Exponentiate 2 to the power of e: a = 2^e
  5179. * This is done by setting the 'e'th bit.
  5180. *
  5181. * @param [out] a SP integer to hold result.
  5182. * @param [in] e Exponent.
  5183. *
  5184. * @return MP_OKAY on success.
  5185. * @return MP_VAL when a is NULL or 2^exponent is too large.
  5186. */
  5187. int sp_2expt(sp_int* a, int e)
  5188. {
  5189. int err = MP_OKAY;
  5190. /* Validate parameters. */
  5191. if (a == NULL) {
  5192. err = MP_VAL;
  5193. }
  5194. if (err == MP_OKAY) {
  5195. /* Set number to zero and then set bit. */
  5196. _sp_zero(a);
  5197. err = sp_set_bit(a, e);
  5198. }
  5199. return err;
  5200. }
  5201. #endif /* (WOLFSSL_SP_MATH_ALL && !WOLFSSL_RSA_VERIFY_ONLY) ||
  5202. * WOLFSSL_KEY_GEN || !NO_DH */
  5203. /**********************
  5204. * Digit/Long functions
  5205. **********************/
  5206. /* Set the multi-precision number to be the value of the digit.
  5207. *
  5208. * @param [out] a SP integer to become number.
  5209. * @param [in] d Digit to be set.
  5210. *
  5211. * @return MP_OKAY on success.
  5212. * @return MP_VAL when a is NULL.
  5213. */
  5214. int sp_set(sp_int* a, sp_int_digit d)
  5215. {
  5216. int err = MP_OKAY;
  5217. /* Validate parameters. */
  5218. if (a == NULL) {
  5219. err = MP_VAL;
  5220. }
  5221. if (err == MP_OKAY) {
  5222. /* Use sp_int_minimal to support allocated byte arrays as sp_ints. */
  5223. sp_int_minimal* am = (sp_int_minimal*)a;
  5224. am->dp[0] = d;
  5225. /* d == 0 => used = 0, d > 0 => used = 1 */
  5226. am->used = (d > 0);
  5227. #ifdef WOLFSSL_SP_INT_NEGATIVE
  5228. am->sign = MP_ZPOS;
  5229. #endif
  5230. }
  5231. return err;
  5232. }
  5233. #if defined(WOLFSSL_SP_MATH_ALL) || !defined(NO_RSA) || defined(OPENSSL_EXTRA)
  5234. /* Set a number into the multi-precision number.
  5235. *
  5236. * Number may be larger than the size of a digit.
  5237. *
  5238. * @param [out] a SP integer to set.
  5239. * @param [in] n Long value to set.
  5240. *
  5241. * @return MP_OKAY on success.
  5242. * @return MP_VAL when a is NULL.
  5243. */
  5244. int sp_set_int(sp_int* a, unsigned long n)
  5245. {
  5246. int err = MP_OKAY;
  5247. if (a == NULL) {
  5248. err = MP_VAL;
  5249. }
  5250. if (err == MP_OKAY) {
  5251. #if SP_WORD_SIZE < SP_ULONG_BITS
  5252. /* Assign if value first in one word. */
  5253. if (n <= (sp_int_digit)SP_DIGIT_MAX) {
  5254. #endif
  5255. a->dp[0] = (sp_int_digit)n;
  5256. a->used = (n != 0);
  5257. #if SP_WORD_SIZE < SP_ULONG_BITS
  5258. }
  5259. else {
  5260. int i;
  5261. /* Assign value word by word. */
  5262. for (i = 0; (i < a->size) && (n > 0); i++,n >>= SP_WORD_SIZE) {
  5263. a->dp[i] = (sp_int_digit)n;
  5264. }
  5265. /* Update number of words used. */
  5266. a->used = i;
  5267. /* Check for overflow. */
  5268. if ((i == a->size) && (n != 0)) {
  5269. err = MP_VAL;
  5270. }
  5271. }
  5272. #endif
  5273. #ifdef WOLFSSL_SP_INT_NEGATIVE
  5274. a->sign = MP_ZPOS;
  5275. #endif
  5276. }
  5277. return err;
  5278. }
  5279. #endif /* WOLFSSL_SP_MATH_ALL || !NO_RSA */
  5280. #if !defined(WOLFSSL_RSA_VERIFY_ONLY) || \
  5281. (defined(WOLFSSL_SP_MATH_ALL) && !defined(NO_DH))
  5282. /* Compare a one digit number with a multi-precision number.
  5283. *
  5284. * When a is NULL, MP_LT is returned.
  5285. *
  5286. * @param [in] a SP integer to compare.
  5287. * @param [in] d Digit to compare with.
  5288. *
  5289. * @return MP_GT when a is greater than d.
  5290. * @return MP_LT when a is less than d.
  5291. * @return MP_EQ when a is equals d.
  5292. */
  5293. int sp_cmp_d(const sp_int* a, sp_int_digit d)
  5294. {
  5295. int ret = MP_EQ;
  5296. /* No SP integer is always less - even when d is zero. */
  5297. if (a == NULL) {
  5298. ret = MP_LT;
  5299. }
  5300. else
  5301. #ifdef WOLFSSL_SP_INT_NEGATIVE
  5302. /* Check sign first. */
  5303. if (a->sign == MP_NEG) {
  5304. ret = MP_LT;
  5305. }
  5306. else
  5307. #endif
  5308. {
  5309. /* Check if SP integer as more than one word. */
  5310. if (a->used > 1) {
  5311. ret = MP_GT;
  5312. }
  5313. /* Special case for zero. */
  5314. else if (a->used == 0) {
  5315. if (d != 0) {
  5316. ret = MP_LT;
  5317. }
  5318. /* ret initialized to equal. */
  5319. }
  5320. else {
  5321. /* The single word in the SP integer can now be compared with d. */
  5322. if (a->dp[0] > d) {
  5323. ret = MP_GT;
  5324. }
  5325. else if (a->dp[0] < d) {
  5326. ret = MP_LT;
  5327. }
  5328. /* ret initialized to equal. */
  5329. }
  5330. }
  5331. return ret;
  5332. }
  5333. #endif
  5334. #if defined(WOLFSSL_SP_ADD_D) || (defined(WOLFSSL_SP_INT_NEGATIVE) && \
  5335. defined(WOLFSSL_SP_SUB_D)) || defined(WOLFSSL_SP_READ_RADIX_10)
  5336. /* Add a one digit number to the multi-precision number.
  5337. *
  5338. * @param [in] a SP integer be added to.
  5339. * @param [in] d Digit to add.
  5340. * @param [out] r SP integer to store result in.
  5341. *
  5342. * @return MP_OKAY on success.
  5343. * @return MP_VAL when result is too large for fixed size dp array.
  5344. */
  5345. static int _sp_add_d(const sp_int* a, sp_int_digit d, sp_int* r)
  5346. {
  5347. int err = MP_OKAY;
  5348. /* Special case of zero means we want result to have a digit when not adding
  5349. * zero. */
  5350. if (a->used == 0) {
  5351. r->dp[0] = d;
  5352. r->used = d > 0;
  5353. }
  5354. else {
  5355. int i = 0;
  5356. sp_int_digit a0 = a->dp[0];
  5357. /* Set used of result - updated if overflow seen. */
  5358. r->used = a->used;
  5359. r->dp[0] = a0 + d;
  5360. /* Check for carry. */
  5361. if (r->dp[0] < a0) {
  5362. /* Do carry through all words. */
  5363. for (++i; i < a->used; i++) {
  5364. r->dp[i] = a->dp[i] + 1;
  5365. if (r->dp[i] != 0) {
  5366. break;
  5367. }
  5368. }
  5369. /* Add another word if required. */
  5370. if (i == a->used) {
  5371. /* Check result has enough space for another word. */
  5372. if (i < r->size) {
  5373. r->used++;
  5374. r->dp[i] = 1;
  5375. }
  5376. else {
  5377. err = MP_VAL;
  5378. }
  5379. }
  5380. }
  5381. /* When result is not the same as input, copy rest of digits. */
  5382. if ((err == MP_OKAY) && (r != a)) {
  5383. /* Copy any words that didn't update with carry. */
  5384. for (++i; i < a->used; i++) {
  5385. r->dp[i] = a->dp[i];
  5386. }
  5387. }
  5388. }
  5389. return err;
  5390. }
  5391. #endif /* WOLFSSL_SP_ADD_D || (WOLFSSL_SP_INT_NEGATIVE && WOLFSSL_SP_SUB_D) ||
  5392. * defined(WOLFSSL_SP_READ_RADIX_10) */
  5393. #if (defined(WOLFSSL_SP_INT_NEGATIVE) && defined(WOLFSSL_SP_ADD_D)) || \
  5394. defined(WOLFSSL_SP_SUB_D) || defined(WOLFSSL_SP_INVMOD) || \
  5395. defined(WOLFSSL_SP_INVMOD_MONT_CT) || (defined(WOLFSSL_SP_PRIME_GEN) && \
  5396. !defined(WC_NO_RNG))
  5397. /* Sub a one digit number from the multi-precision number.
  5398. *
  5399. * @param [in] a SP integer be subtracted from.
  5400. * @param [in] d Digit to subtract.
  5401. * @param [out] r SP integer to store result in.
  5402. */
  5403. static void _sp_sub_d(const sp_int* a, sp_int_digit d, sp_int* r)
  5404. {
  5405. /* Set result used to be same as input. Updated with clamp. */
  5406. r->used = a->used;
  5407. /* Only possible when not handling negatives. */
  5408. if (a->used == 0) {
  5409. /* Set result to zero as no negative support. */
  5410. r->dp[0] = 0;
  5411. }
  5412. else {
  5413. int i = 0;
  5414. sp_int_digit a0 = a->dp[0];
  5415. r->dp[0] = a0 - d;
  5416. /* Check for borrow. */
  5417. if (r->dp[0] > a0) {
  5418. /* Do borrow through all words. */
  5419. for (++i; i < a->used; i++) {
  5420. r->dp[i] = a->dp[i] - 1;
  5421. if (r->dp[i] != SP_DIGIT_MAX) {
  5422. break;
  5423. }
  5424. }
  5425. }
  5426. /* When result is not the same as input, copy rest of digits. */
  5427. if (r != a) {
  5428. /* Copy any words that didn't update with borrow. */
  5429. for (++i; i < a->used; i++) {
  5430. r->dp[i] = a->dp[i];
  5431. }
  5432. }
  5433. /* Remove leading zero words. */
  5434. sp_clamp(r);
  5435. }
  5436. }
  5437. #endif /* (WOLFSSL_SP_INT_NEGATIVE && WOLFSSL_SP_ADD_D) || WOLFSSL_SP_SUB_D
  5438. * WOLFSSL_SP_INVMOD || WOLFSSL_SP_INVMOD_MONT_CT ||
  5439. * WOLFSSL_SP_PRIME_GEN */
  5440. #ifdef WOLFSSL_SP_ADD_D
  5441. /* Add a one digit number to the multi-precision number.
  5442. *
  5443. * @param [in] a SP integer be added to.
  5444. * @param [in] d Digit to add.
  5445. * @param [out] r SP integer to store result in.
  5446. *
  5447. * @return MP_OKAY on success.
  5448. * @return MP_VAL when result is too large for fixed size dp array.
  5449. */
  5450. int sp_add_d(const sp_int* a, sp_int_digit d, sp_int* r)
  5451. {
  5452. int err = MP_OKAY;
  5453. /* Check validity of parameters. */
  5454. if ((a == NULL) || (r == NULL)) {
  5455. err = MP_VAL;
  5456. }
  5457. #ifndef WOLFSSL_SP_INT_NEGATIVE
  5458. /* Check for space in result especially when carry adds a new word. */
  5459. if ((err == MP_OKAY) && (a->used + 1 > r->size)) {
  5460. err = MP_VAL;
  5461. }
  5462. if (err == MP_OKAY) {
  5463. /* Positive only so just use internal function. */
  5464. err = _sp_add_d(a, d, r);
  5465. }
  5466. #else
  5467. /* Check for space in result especially when carry adds a new word. */
  5468. if ((err == MP_OKAY) && (a->sign == MP_ZPOS) && (a->used + 1 > r->size)) {
  5469. err = MP_VAL;
  5470. }
  5471. /* Check for space in result - no carry but borrow possible. */
  5472. if ((err == MP_OKAY) && (a->sign == MP_NEG) && (a->used > r->size)) {
  5473. err = MP_VAL;
  5474. }
  5475. if (err == MP_OKAY) {
  5476. if (a->sign == MP_ZPOS) {
  5477. /* Positive, so use internal function. */
  5478. r->sign = MP_ZPOS;
  5479. err = _sp_add_d(a, d, r);
  5480. }
  5481. else if ((a->used > 1) || (a->dp[0] > d)) {
  5482. /* Negative value bigger than digit so subtract digit. */
  5483. r->sign = MP_NEG;
  5484. _sp_sub_d(a, d, r);
  5485. }
  5486. else {
  5487. /* Negative value smaller or equal to digit. */
  5488. r->sign = MP_ZPOS;
  5489. /* Subtract negative value from digit. */
  5490. r->dp[0] = d - a->dp[0];
  5491. /* Result is a digit equal to or greater than zero. */
  5492. r->used = (r->dp[0] > 0);
  5493. }
  5494. }
  5495. #endif
  5496. return err;
  5497. }
  5498. #endif /* WOLFSSL_SP_ADD_D */
  5499. #ifdef WOLFSSL_SP_SUB_D
  5500. /* Sub a one digit number from the multi-precision number.
  5501. *
  5502. * @param [in] a SP integer be subtracted from.
  5503. * @param [in] d Digit to subtract.
  5504. * @param [out] r SP integer to store result in.
  5505. *
  5506. * @return MP_OKAY on success.
  5507. * @return MP_VAL when a or r is NULL.
  5508. */
  5509. int sp_sub_d(const sp_int* a, sp_int_digit d, sp_int* r)
  5510. {
  5511. int err = MP_OKAY;
  5512. /* Check validity of parameters. */
  5513. if ((a == NULL) || (r == NULL)) {
  5514. err = MP_VAL;
  5515. }
  5516. #ifndef WOLFSSL_SP_INT_NEGATIVE
  5517. /* Check for space in result. */
  5518. if ((err == MP_OKAY) && (a->used > r->size)) {
  5519. err = MP_VAL;
  5520. }
  5521. if (err == MP_OKAY) {
  5522. /* Positive only so just use internal function. */
  5523. _sp_sub_d(a, d, r);
  5524. }
  5525. #else
  5526. /* Check for space in result especially when borrow adds a new word. */
  5527. if ((err == MP_OKAY) && (a->sign == MP_NEG) && (a->used + 1 > r->size)) {
  5528. err = MP_VAL;
  5529. }
  5530. /* Check for space in result - no carry but borrow possible. */
  5531. if ((err == MP_OKAY) && (a->sign == MP_ZPOS) && (a->used > r->size)) {
  5532. err = MP_VAL;
  5533. }
  5534. if (err == MP_OKAY) {
  5535. if (a->sign == MP_NEG) {
  5536. /* Subtracting from negative use internal add. */
  5537. r->sign = MP_NEG;
  5538. err = _sp_add_d(a, d, r);
  5539. }
  5540. else if ((a->used > 1) || (a->dp[0] >= d)) {
  5541. /* Positive number greater than or equal to digit - subtract digit.
  5542. */
  5543. r->sign = MP_ZPOS;
  5544. _sp_sub_d(a, d, r);
  5545. }
  5546. else {
  5547. /* Positive value smaller than digit. */
  5548. r->sign = MP_NEG;
  5549. /* Subtract positive value from digit. */
  5550. r->dp[0] = d - a->dp[0];
  5551. /* Result is a digit equal to or greater than zero. */
  5552. r->used = 1;
  5553. }
  5554. }
  5555. #endif
  5556. return err;
  5557. }
  5558. #endif /* WOLFSSL_SP_SUB_D */
  5559. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  5560. defined(WOLFSSL_SP_SMALL) && (defined(WOLFSSL_SP_MATH_ALL) || \
  5561. !defined(NO_DH) || defined(HAVE_ECC) || \
  5562. (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY) && \
  5563. !defined(WOLFSSL_RSA_PUBLIC_ONLY))) || \
  5564. (defined(WOLFSSL_KEY_GEN) && !defined(NO_RSA))
  5565. /* Multiply a by digit n and put result into r shifting up o digits.
  5566. * r = (a * n) << (o * SP_WORD_SIZE)
  5567. *
  5568. * @param [in] a SP integer to be multiplied.
  5569. * @param [in] d SP digit to multiply by.
  5570. * @param [out] r SP integer result.
  5571. * @param [in] o Number of digits to move result up by.
  5572. * @return MP_OKAY on success.
  5573. * @return MP_VAL when result is too large for sp_int.
  5574. */
  5575. static int _sp_mul_d(const sp_int* a, sp_int_digit d, sp_int* r, int o)
  5576. {
  5577. int err = MP_OKAY;
  5578. int i;
  5579. #ifndef SQR_MUL_ASM
  5580. sp_int_word t = 0;
  5581. #else
  5582. sp_int_digit l = 0;
  5583. sp_int_digit h = 0;
  5584. #endif
  5585. #ifdef WOLFSSL_SP_SMALL
  5586. /* Zero out offset words. */
  5587. for (i = 0; i < o; i++) {
  5588. r->dp[i] = 0;
  5589. }
  5590. #else
  5591. /* Don't use the offset. Only when doing small code size div. */
  5592. (void)o;
  5593. #endif
  5594. /* Multiply each word of a by n. */
  5595. for (i = 0; i < a->used; i++, o++) {
  5596. #ifndef SQR_MUL_ASM
  5597. /* Add product to top word of previous result. */
  5598. t += (sp_int_word)a->dp[i] * d;
  5599. /* Store low word. */
  5600. r->dp[o] = (sp_int_digit)t;
  5601. /* Move top word down. */
  5602. t >>= SP_WORD_SIZE;
  5603. #else
  5604. /* Multiply and add into low and high from previous result.
  5605. * No overflow of possible with add. */
  5606. SP_ASM_MUL_ADD_NO(l, h, a->dp[i], d);
  5607. /* Store low word. */
  5608. r->dp[o] = l;
  5609. /* Move high word into low word and set high word to 0. */
  5610. l = h;
  5611. h = 0;
  5612. #endif
  5613. }
  5614. /* Check whether new word to be appended to result. */
  5615. #ifndef SQR_MUL_ASM
  5616. if (t > 0)
  5617. #else
  5618. if (l > 0)
  5619. #endif
  5620. {
  5621. /* Validate space available in result. */
  5622. if (o == r->size) {
  5623. err = MP_VAL;
  5624. }
  5625. else {
  5626. /* Store new top word. */
  5627. #ifndef SQR_MUL_ASM
  5628. r->dp[o++] = (sp_int_digit)t;
  5629. #else
  5630. r->dp[o++] = l;
  5631. #endif
  5632. }
  5633. }
  5634. /* Update number of words in result. */
  5635. r->used = o;
  5636. /* In case n is zero. */
  5637. sp_clamp(r);
  5638. return err;
  5639. }
  5640. #endif /* (WOLFSSL_SP_MATH_ALL && !WOLFSSL_RSA_VERIFY_ONLY) ||
  5641. * WOLFSSL_SP_SMALL || (WOLFSSL_KEY_GEN && !NO_RSA) */
  5642. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  5643. (defined(WOLFSSL_KEY_GEN) && !defined(NO_RSA))
  5644. /* Multiply a by digit n and put result into r. r = a * n
  5645. *
  5646. * @param [in] a SP integer to multiply.
  5647. * @param [in] n Digit to multiply by.
  5648. * @param [out] r SP integer to hold result.
  5649. *
  5650. * @return MP_OKAY on success.
  5651. * @return MP_VAL when a or b is NULL, or a has maximum number of digits used.
  5652. */
  5653. int sp_mul_d(const sp_int* a, sp_int_digit d, sp_int* r)
  5654. {
  5655. int err = MP_OKAY;
  5656. /* Validate parameters. */
  5657. if ((a == NULL) || (r == NULL)) {
  5658. err = MP_VAL;
  5659. }
  5660. /* Check space for product result - _sp_mul_d checks when new word added. */
  5661. if ((err == MP_OKAY) && (a->used > r->size)) {
  5662. err = MP_VAL;
  5663. }
  5664. if (err == MP_OKAY) {
  5665. err = _sp_mul_d(a, d, r, 0);
  5666. #ifdef WOLFSSL_SP_INT_NEGATIVE
  5667. /* Update sign. */
  5668. if (d == 0) {
  5669. r->sign = MP_ZPOS;
  5670. }
  5671. else {
  5672. r->sign = a->sign;
  5673. }
  5674. #endif
  5675. }
  5676. return err;
  5677. }
  5678. #endif /* (WOLFSSL_SP_MATH_ALL && !WOLFSSL_RSA_VERIFY_ONLY) ||
  5679. * (WOLFSSL_KEY_GEN && !NO_RSA) */
  5680. /* Predefine complicated rules of when to compile in sp_div_d and sp_mod_d. */
  5681. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  5682. defined(WOLFSSL_KEY_GEN) || defined(HAVE_COMP_KEY) || \
  5683. defined(OPENSSL_EXTRA) || defined(WC_MP_TO_RADIX)
  5684. #define WOLFSSL_SP_DIV_D
  5685. #endif
  5686. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  5687. !defined(NO_DH) || \
  5688. (defined(HAVE_ECC) && (defined(FP_ECC) || defined(HAVE_COMP_KEY))) || \
  5689. (!defined(NO_RSA) && defined(WOLFSSL_KEY_GEN))
  5690. #define WOLFSSL_SP_MOD_D
  5691. #endif
  5692. #if (defined(WOLFSSL_SP_MATH_ALL) || !defined(NO_DH) || defined(HAVE_ECC) || \
  5693. (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY) && \
  5694. !defined(WOLFSSL_RSA_PUBLIC_ONLY))) || \
  5695. defined(WOLFSSL_SP_DIV_D) || defined(WOLFSSL_SP_MOD_D)
  5696. #ifndef SP_ASM_DIV_WORD
  5697. /* Divide a two digit number by a digit number and return. (hi | lo) / d
  5698. *
  5699. * @param [in] hi SP integer digit. High digit of the dividend.
  5700. * @param [in] lo SP integer digit. Lower digit of the dividend.
  5701. * @param [in] d SP integer digit. Number to divide by.
  5702. * @return The division result.
  5703. */
  5704. static WC_INLINE sp_int_digit sp_div_word(sp_int_digit hi, sp_int_digit lo,
  5705. sp_int_digit d)
  5706. {
  5707. #ifdef WOLFSSL_SP_DIV_WORD_HALF
  5708. sp_int_digit r;
  5709. /* Trial division using half of the bits in d. */
  5710. /* Check for shortcut when no high word set. */
  5711. if (hi == 0) {
  5712. r = lo / d;
  5713. }
  5714. else {
  5715. /* Half the bits of d. */
  5716. sp_int_digit divh = d >> SP_HALF_SIZE;
  5717. /* Number to divide in one value. */
  5718. sp_int_word w = ((sp_int_word)hi << SP_WORD_SIZE) | lo;
  5719. sp_int_word trial;
  5720. sp_int_digit r2;
  5721. /* Calculation for top SP_WORD_SIZE / 2 bits of dividend. */
  5722. /* Divide high word by top half of divisor. */
  5723. r = hi / divh;
  5724. /* When result too big then assume only max value. */
  5725. if (r > SP_HALF_MAX) {
  5726. r = SP_HALF_MAX;
  5727. }
  5728. /* Shift up result for trial division calucation. */
  5729. r <<= SP_HALF_SIZE;
  5730. /* Calculate trial value. */
  5731. trial = r * (sp_int_word)d;
  5732. /* Decrease r while trial is too big. */
  5733. while (trial > w) {
  5734. r -= (sp_int_digit)1 << SP_HALF_SIZE;
  5735. trial -= (sp_int_word)d << SP_HALF_SIZE;
  5736. }
  5737. /* Subtract trial. */
  5738. w -= trial;
  5739. /* Calculation for remaining second SP_WORD_SIZE / 2 bits. */
  5740. /* Divide top SP_WORD_SIZE of remainder by top half of divisor. */
  5741. r2 = ((sp_int_digit)(w >> SP_HALF_SIZE)) / divh;
  5742. /* Calculate trial value. */
  5743. trial = r2 * (sp_int_word)d;
  5744. /* Decrease r while trial is too big. */
  5745. while (trial > w) {
  5746. r2--;
  5747. trial -= d;
  5748. }
  5749. /* Subtract trial. */
  5750. w -= trial;
  5751. /* Update result. */
  5752. r += r2;
  5753. /* Calculation for remaining bottom SP_WORD_SIZE bits. */
  5754. r2 = ((sp_int_digit)w) / d;
  5755. /* Update result. */
  5756. r += r2;
  5757. }
  5758. return r;
  5759. #else
  5760. sp_int_word w;
  5761. sp_int_digit r;
  5762. /* Use built-in divide. */
  5763. w = ((sp_int_word)hi << SP_WORD_SIZE) | lo;
  5764. w /= d;
  5765. r = (sp_int_digit)w;
  5766. return r;
  5767. #endif /* WOLFSSL_SP_DIV_WORD_HALF */
  5768. }
  5769. #endif /* !SP_ASM_DIV_WORD */
  5770. #endif /* WOLFSSL_SP_MATH_ALL || !NO_DH || HAVE_ECC ||
  5771. * (!NO_RSA && !WOLFSSL_RSA_VERIFY_ONLY) */
  5772. #if (defined(WOLFSSL_SP_DIV_D) || defined(WOLFSSL_SP_MOD_D)) && \
  5773. !defined(WOLFSSL_SP_SMALL)
  5774. #if SP_WORD_SIZE == 64
  5775. /* 2^64 / 3 */
  5776. #define SP_DIV_3_CONST 0x5555555555555555L
  5777. /* 2^64 / 10 */
  5778. #define SP_DIV_10_CONST 0x1999999999999999L
  5779. #elif SP_WORD_SIZE == 32
  5780. /* 2^32 / 3 */
  5781. #define SP_DIV_3_CONST 0x55555555
  5782. /* 2^32 / 10 */
  5783. #define SP_DIV_10_CONST 0x19999999
  5784. #elif SP_WORD_SIZE == 16
  5785. /* 2^16 / 3 */
  5786. #define SP_DIV_3_CONST 0x5555
  5787. /* 2^16 / 10 */
  5788. #define SP_DIV_10_CONST 0x1999
  5789. #elif SP_WORD_SIZE == 8
  5790. /* 2^8 / 3 */
  5791. #define SP_DIV_3_CONST 0x55
  5792. /* 2^8 / 10 */
  5793. #define SP_DIV_10_CONST 0x19
  5794. #endif
  5795. #if !defined(WOLFSSL_SP_SMALL) && (SP_WORD_SIZE < 64)
  5796. /* Divide by 3: r = a / 3 and rem = a % 3
  5797. *
  5798. * Used in checking prime: (a % 3) == 0?.
  5799. *
  5800. * @param [in] a SP integer to be divided.
  5801. * @param [out] r SP integer that is the quotient. May be NULL.
  5802. * @param [out] rem SP integer that is the remainder. May be NULL.
  5803. */
  5804. static void _sp_div_3(const sp_int* a, sp_int* r, sp_int_digit* rem)
  5805. {
  5806. int i;
  5807. #ifndef SQR_MUL_ASM
  5808. sp_int_word t;
  5809. sp_int_digit tt;
  5810. #else
  5811. sp_int_digit l = 0;
  5812. sp_int_digit tt = 0;
  5813. sp_int_digit t = SP_DIV_3_CONST;
  5814. #endif
  5815. sp_int_digit tr = 0;
  5816. /* Quotient fixup. */
  5817. static const unsigned char sp_r6[6] = { 0, 0, 0, 1, 1, 1 };
  5818. /* Remainder fixup. */
  5819. static const unsigned char sp_rem6[6] = { 0, 1, 2, 0, 1, 2 };
  5820. /* Check whether only mod value needed. */
  5821. if (r == NULL) {
  5822. /* Divide starting at most significant word down to least. */
  5823. for (i = a->used - 1; i >= 0; i--) {
  5824. #ifndef SQR_MUL_ASM
  5825. /* Combine remainder from last operation with this word. */
  5826. t = ((sp_int_word)tr << SP_WORD_SIZE) | a->dp[i];
  5827. /* Get top digit after multipling by (2^SP_WORD_SIZE) / 3. */
  5828. tt = (t * SP_DIV_3_CONST) >> SP_WORD_SIZE;
  5829. /* Subtract trail division. */
  5830. tr = (sp_int_digit)(t - (sp_int_word)tt * 3);
  5831. #else
  5832. /* Multiply digit by (2^SP_WORD_SIZE) / 3. */
  5833. SP_ASM_MUL(l, tt, a->dp[i], t);
  5834. /* Add remainder multiplied by (2^SP_WORD_SIZE) / 3 to top digit. */
  5835. tt += tr * SP_DIV_3_CONST;
  5836. /* Subtract trail division from digit. */
  5837. tr = a->dp[i] - (tt * 3);
  5838. #endif
  5839. /* tr is 0..5 but need 0..2 */
  5840. /* Fix up remainder. */
  5841. tr = sp_rem6[tr];
  5842. }
  5843. *rem = tr;
  5844. }
  5845. /* At least result needed - remainder is calculated anyway. */
  5846. else {
  5847. /* Divide starting at most significant word down to least. */
  5848. for (i = a->used - 1; i >= 0; i--) {
  5849. #ifndef SQR_MUL_ASM
  5850. /* Combine remainder from last operation with this word. */
  5851. t = ((sp_int_word)tr << SP_WORD_SIZE) | a->dp[i];
  5852. /* Get top digit after multipling by (2^SP_WORD_SIZE) / 3. */
  5853. tt = (t * SP_DIV_3_CONST) >> SP_WORD_SIZE;
  5854. /* Subtract trail division. */
  5855. tr = (sp_int_digit)(t - (sp_int_word)tt * 3);
  5856. #else
  5857. /* Multiply digit by (2^SP_WORD_SIZE) / 3. */
  5858. SP_ASM_MUL(l, tt, a->dp[i], t);
  5859. /* Add remainder multiplied by (2^SP_WORD_SIZE) / 3 to top digit. */
  5860. tt += tr * SP_DIV_3_CONST;
  5861. /* Subtract trail division from digit. */
  5862. tr = a->dp[i] - (tt * 3);
  5863. #endif
  5864. /* tr is 0..5 but need 0..2 */
  5865. /* Fix up result. */
  5866. tt += sp_r6[tr];
  5867. /* Fix up remainder. */
  5868. tr = sp_rem6[tr];
  5869. /* Store result of digit divided by 3. */
  5870. r->dp[i] = tt;
  5871. }
  5872. /* Set the used amount to maximal amount. */
  5873. r->used = a->used;
  5874. /* Remove leading zeros. */
  5875. sp_clamp(r);
  5876. /* Return remainder if required. */
  5877. if (rem != NULL) {
  5878. *rem = tr;
  5879. }
  5880. }
  5881. }
  5882. #endif /* !(WOLFSSL_SP_SMALL && (SP_WORD_SIZE < 64) */
  5883. /* Divide by 10: r = a / 10 and rem = a % 10
  5884. *
  5885. * Used when writing with a radix of 10 - decimal number.
  5886. *
  5887. * @param [in] a SP integer to be divided.
  5888. * @param [out] r SP integer that is the quotient. May be NULL.
  5889. * @param [out] rem SP integer that is the remainder. May be NULL.
  5890. */
  5891. static void _sp_div_10(const sp_int* a, sp_int* r, sp_int_digit* rem)
  5892. {
  5893. int i;
  5894. #ifndef SQR_MUL_ASM
  5895. sp_int_word t;
  5896. sp_int_digit tt;
  5897. #else
  5898. sp_int_digit l = 0;
  5899. sp_int_digit tt = 0;
  5900. sp_int_digit t = SP_DIV_10_CONST;
  5901. #endif
  5902. sp_int_digit tr = 0;
  5903. /* Check whether only mod value needed. */
  5904. if (r == NULL) {
  5905. /* Divide starting at most significant word down to least. */
  5906. for (i = a->used - 1; i >= 0; i--) {
  5907. #ifndef SQR_MUL_ASM
  5908. /* Combine remainder from last operation with this word. */
  5909. t = ((sp_int_word)tr << SP_WORD_SIZE) | a->dp[i];
  5910. /* Get top digit after multipling by (2^SP_WORD_SIZE) / 10. */
  5911. tt = (t * SP_DIV_10_CONST) >> SP_WORD_SIZE;
  5912. /* Subtract trail division. */
  5913. tr = (sp_int_digit)(t - (sp_int_word)tt * 10);
  5914. #else
  5915. /* Multiply digit by (2^SP_WORD_SIZE) / 10. */
  5916. SP_ASM_MUL(l, tt, a->dp[i], t);
  5917. /* Add remainder multiplied by (2^SP_WORD_SIZE) / 10 to top digit.
  5918. */
  5919. tt += tr * SP_DIV_10_CONST;
  5920. /* Subtract trail division from digit. */
  5921. tr = a->dp[i] - (tt * 10);
  5922. #endif
  5923. /* tr is 0..99 but need 0..9 */
  5924. /* Fix up remainder. */
  5925. tr = tr % 10;
  5926. }
  5927. *rem = tr;
  5928. }
  5929. /* At least result needed - remainder is calculated anyway. */
  5930. else {
  5931. /* Divide starting at most significant word down to least. */
  5932. for (i = a->used - 1; i >= 0; i--) {
  5933. #ifndef SQR_MUL_ASM
  5934. /* Combine remainder from last operation with this word. */
  5935. t = ((sp_int_word)tr << SP_WORD_SIZE) | a->dp[i];
  5936. /* Get top digit after multipling by (2^SP_WORD_SIZE) / 10. */
  5937. tt = (t * SP_DIV_10_CONST) >> SP_WORD_SIZE;
  5938. /* Subtract trail division. */
  5939. tr = (sp_int_digit)(t - (sp_int_word)tt * 10);
  5940. #else
  5941. /* Multiply digit by (2^SP_WORD_SIZE) / 10. */
  5942. SP_ASM_MUL(l, tt, a->dp[i], t);
  5943. /* Add remainder multiplied by (2^SP_WORD_SIZE) / 10 to top digit.
  5944. */
  5945. tt += tr * SP_DIV_10_CONST;
  5946. /* Subtract trail division from digit. */
  5947. tr = a->dp[i] - (tt * 10);
  5948. #endif
  5949. /* tr is 0..99 but need 0..9 */
  5950. /* Fix up result. */
  5951. tt += tr / 10;
  5952. /* Fix up remainder. */
  5953. tr %= 10;
  5954. /* Store result of digit divided by 10. */
  5955. r->dp[i] = tt;
  5956. }
  5957. /* Set the used amount to maximal amount. */
  5958. r->used = a->used;
  5959. /* Remove leading zeros. */
  5960. sp_clamp(r);
  5961. /* Return remainder if required. */
  5962. if (rem != NULL) {
  5963. *rem = tr;
  5964. }
  5965. }
  5966. }
  5967. #endif /* (WOLFSSL_SP_DIV_D || WOLFSSL_SP_MOD_D) && !WOLFSSL_SP_SMALL */
  5968. #if defined(WOLFSSL_SP_DIV_D) || defined(WOLFSSL_SP_MOD_D)
  5969. /* Divide by small number: r = a / d and rem = a % d
  5970. *
  5971. * @param [in] a SP integer to be divided.
  5972. * @param [in] d Digit to divide by.
  5973. * @param [out] r SP integer that is the quotient. May be NULL.
  5974. * @param [out] rem SP integer that is the remainder. May be NULL.
  5975. */
  5976. static void _sp_div_small(const sp_int* a, sp_int_digit d, sp_int* r,
  5977. sp_int_digit* rem)
  5978. {
  5979. int i;
  5980. #ifndef SQR_MUL_ASM
  5981. sp_int_word t;
  5982. sp_int_digit tt;
  5983. #else
  5984. sp_int_digit l = 0;
  5985. sp_int_digit tt = 0;
  5986. #endif
  5987. sp_int_digit tr = 0;
  5988. sp_int_digit m = SP_DIGIT_MAX / d;
  5989. #ifndef WOLFSSL_SP_SMALL
  5990. /* Check whether only mod value needed. */
  5991. if (r == NULL) {
  5992. /* Divide starting at most significant word down to least. */
  5993. for (i = a->used - 1; i >= 0; i--) {
  5994. #ifndef SQR_MUL_ASM
  5995. /* Combine remainder from last operation with this word. */
  5996. t = ((sp_int_word)tr << SP_WORD_SIZE) | a->dp[i];
  5997. /* Get top digit after multipling. */
  5998. tt = (t * m) >> SP_WORD_SIZE;
  5999. /* Subtract trail division. */
  6000. tr = (sp_int_digit)(t - tt * d);
  6001. #else
  6002. /* Multiply digit. */
  6003. SP_ASM_MUL(l, tt, a->dp[i], m);
  6004. /* Add multiplied remainder to top digit. */
  6005. tt += tr * m;
  6006. /* Subtract trail division from digit. */
  6007. tr = a->dp[i] - (tt * d);
  6008. #endif
  6009. /* tr < d * d */
  6010. /* Fix up remainder. */
  6011. tr = tr % d;
  6012. }
  6013. *rem = tr;
  6014. }
  6015. /* At least result needed - remainder is calculated anyway. */
  6016. else
  6017. #endif /* !WOLFSSL_SP_SMALL */
  6018. {
  6019. /* Divide starting at most significant word down to least. */
  6020. for (i = a->used - 1; i >= 0; i--) {
  6021. #ifndef SQR_MUL_ASM
  6022. /* Combine remainder from last operation with this word. */
  6023. t = ((sp_int_word)tr << SP_WORD_SIZE) | a->dp[i];
  6024. /* Get top digit after multipling. */
  6025. tt = (t * m) >> SP_WORD_SIZE;
  6026. /* Subtract trail division. */
  6027. tr = (sp_int_digit)(t - tt * d);
  6028. #else
  6029. /* Multiply digit. */
  6030. SP_ASM_MUL(l, tt, a->dp[i], m);
  6031. /* Add multiplied remainder to top digit. */
  6032. tt += tr * m;
  6033. /* Subtract trail division from digit. */
  6034. tr = a->dp[i] - (tt * d);
  6035. #endif
  6036. /* tr < d * d */
  6037. /* Fix up result. */
  6038. tt += tr / d;
  6039. /* Fix up remainder. */
  6040. tr %= d;
  6041. /* Store result of dividing the digit. */
  6042. #ifdef WOLFSSL_SP_SMALL
  6043. if (r != NULL)
  6044. #endif
  6045. {
  6046. r->dp[i] = tt;
  6047. }
  6048. }
  6049. #ifdef WOLFSSL_SP_SMALL
  6050. if (r != NULL)
  6051. #endif
  6052. {
  6053. /* Set the used amount to maximal amount. */
  6054. r->used = a->used;
  6055. /* Remove leading zeros. */
  6056. sp_clamp(r);
  6057. }
  6058. /* Return remainder if required. */
  6059. if (rem != NULL) {
  6060. *rem = tr;
  6061. }
  6062. }
  6063. }
  6064. #endif
  6065. #ifdef WOLFSSL_SP_DIV_D
  6066. /* Divide a multi-precision number by a digit size number and calculate
  6067. * remainder.
  6068. * r = a / d; rem = a % d
  6069. *
  6070. * Use trial division algorithm.
  6071. *
  6072. * @param [in] a SP integer to be divided.
  6073. * @param [in] d Digit to divide by.
  6074. * @param [out] r SP integer that is the quotient. May be NULL.
  6075. * @param [out] rem Digit that is the remainder. May be NULL.
  6076. */
  6077. static void _sp_div_d(const sp_int* a, sp_int_digit d, sp_int* r,
  6078. sp_int_digit* rem)
  6079. {
  6080. int i;
  6081. #ifndef SQR_MUL_ASM
  6082. sp_int_word w = 0;
  6083. #else
  6084. sp_int_digit l;
  6085. sp_int_digit h = 0;
  6086. #endif
  6087. sp_int_digit t;
  6088. /* Divide starting at most significant word down to least. */
  6089. for (i = a->used - 1; i >= 0; i--) {
  6090. #ifndef SQR_MUL_ASM
  6091. /* Combine remainder from last operation with this word and divide. */
  6092. t = sp_div_word((sp_int_digit)w, a->dp[i], d);
  6093. /* Combine remainder from last operation with this word. */
  6094. w = (w << SP_WORD_SIZE) | a->dp[i];
  6095. /* Subtract to get modulo result. */
  6096. w -= (sp_int_word)t * d;
  6097. #else
  6098. /* Get current word. */
  6099. l = a->dp[i];
  6100. /* Combine remainder from last operation with this word and divide. */
  6101. t = sp_div_word(h, l, d);
  6102. /* Subtract to get modulo result. */
  6103. h = l - t * d;
  6104. #endif
  6105. /* Store result of dividing the digit. */
  6106. if (r != NULL) {
  6107. r->dp[i] = t;
  6108. }
  6109. }
  6110. if (r != NULL) {
  6111. /* Set the used amount to maximal amount. */
  6112. r->used = a->used;
  6113. /* Remove leading zeros. */
  6114. sp_clamp(r);
  6115. }
  6116. /* Return remainder if required. */
  6117. if (rem != NULL) {
  6118. #ifndef SQR_MUL_ASM
  6119. *rem = (sp_int_digit)w;
  6120. #else
  6121. *rem = h;
  6122. #endif
  6123. }
  6124. }
  6125. /* Divide a multi-precision number by a digit size number and calculate
  6126. * remainder.
  6127. * r = a / d; rem = a % d
  6128. *
  6129. * @param [in] a SP integer to be divided.
  6130. * @param [in] d Digit to divide by.
  6131. * @param [out] r SP integer that is the quotient. May be NULL.
  6132. * @param [out] rem Digit that is the remainder. May be NULL.
  6133. *
  6134. * @return MP_OKAY on success.
  6135. * @return MP_VAL when a is NULL or d is 0.
  6136. */
  6137. int sp_div_d(const sp_int* a, sp_int_digit d, sp_int* r, sp_int_digit* rem)
  6138. {
  6139. int err = MP_OKAY;
  6140. /* Validate parameters. */
  6141. if ((a == NULL) || (d == 0)) {
  6142. err = MP_VAL;
  6143. }
  6144. /* Check space for maximal sized result. */
  6145. if ((err == MP_OKAY) && (r != NULL) && (a->used > r->size)) {
  6146. err = MP_VAL;
  6147. }
  6148. if (err == MP_OKAY) {
  6149. #if !defined(WOLFSSL_SP_SMALL)
  6150. #if SP_WORD_SIZE < 64
  6151. if (d == 3) {
  6152. /* Fast implementation for divisor of 3. */
  6153. _sp_div_3(a, r, rem);
  6154. }
  6155. else
  6156. #endif
  6157. if (d == 10) {
  6158. /* Fast implementation for divisor of 10 - sp_todecimal(). */
  6159. _sp_div_10(a, r, rem);
  6160. }
  6161. else
  6162. #endif
  6163. if (d <= SP_HALF_MAX) {
  6164. /* For small divisors. */
  6165. _sp_div_small(a, d, r, rem);
  6166. }
  6167. else
  6168. {
  6169. _sp_div_d(a, d, r, rem);
  6170. }
  6171. #ifdef WOLFSSL_SP_INT_NEGATIVE
  6172. if (r != NULL) {
  6173. r->sign = a->sign;
  6174. }
  6175. #endif
  6176. }
  6177. return err;
  6178. }
  6179. #endif /* WOLFSSL_SP_DIV_D */
  6180. #ifdef WOLFSSL_SP_MOD_D
  6181. /* Calculate a modulo the digit d into r: r = a mod d
  6182. *
  6183. * @param [in] a SP integer to reduce.
  6184. * @param [in] d Digit to that is the modulus.
  6185. * @param [out] r Digit that is the result.
  6186. */
  6187. static void _sp_mod_d(const sp_int* a, const sp_int_digit d, sp_int_digit* r)
  6188. {
  6189. int i;
  6190. #ifndef SQR_MUL_ASM
  6191. sp_int_word w = 0;
  6192. #else
  6193. sp_int_digit h = 0;
  6194. #endif
  6195. /* Divide starting at most significant word down to least. */
  6196. for (i = a->used - 1; i >= 0; i--) {
  6197. #ifndef SQR_MUL_ASM
  6198. /* Combine remainder from last operation with this word and divide. */
  6199. sp_int_digit t = sp_div_word((sp_int_digit)w, a->dp[i], d);
  6200. /* Combine remainder from last operation with this word. */
  6201. w = (w << SP_WORD_SIZE) | a->dp[i];
  6202. /* Subtract to get modulo result. */
  6203. w -= (sp_int_word)t * d;
  6204. #else
  6205. /* Combine remainder from last operation with this word and divide. */
  6206. sp_int_digit t = sp_div_word(h, a->dp[i], d);
  6207. /* Subtract to get modulo result. */
  6208. h = a->dp[i] - t * d;
  6209. #endif
  6210. }
  6211. /* Return remainder. */
  6212. #ifndef SQR_MUL_ASM
  6213. *r = (sp_int_digit)w;
  6214. #else
  6215. *r = h;
  6216. #endif
  6217. }
  6218. /* Calculate a modulo the digit d into r: r = a mod d
  6219. *
  6220. * @param [in] a SP integer to reduce.
  6221. * @param [in] d Digit to that is the modulus.
  6222. * @param [out] r Digit that is the result.
  6223. *
  6224. * @return MP_OKAY on success.
  6225. * @return MP_VAL when a is NULL or d is 0.
  6226. */
  6227. #if !defined(WOLFSSL_SP_MATH_ALL) && (!defined(HAVE_ECC) || \
  6228. !defined(HAVE_COMP_KEY)) && !defined(OPENSSL_EXTRA)
  6229. static
  6230. #endif /* !WOLFSSL_SP_MATH_ALL && (!HAVE_ECC || !HAVE_COMP_KEY) */
  6231. int sp_mod_d(const sp_int* a, sp_int_digit d, sp_int_digit* r)
  6232. {
  6233. int err = MP_OKAY;
  6234. /* Validate parameters. */
  6235. if ((a == NULL) || (r == NULL) || (d == 0)) {
  6236. err = MP_VAL;
  6237. }
  6238. #if 0
  6239. sp_print(a, "a");
  6240. sp_print_digit(d, "m");
  6241. #endif
  6242. if (err == MP_OKAY) {
  6243. /* Check whether d is a power of 2. */
  6244. if ((d & (d - 1)) == 0) {
  6245. if (a->used == 0) {
  6246. *r = 0;
  6247. }
  6248. else {
  6249. *r = a->dp[0] & (d - 1);
  6250. }
  6251. }
  6252. #if !defined(WOLFSSL_SP_SMALL)
  6253. #if SP_WORD_SIZE < 64
  6254. else if (d == 3) {
  6255. /* Fast implementation for divisor of 3. */
  6256. _sp_div_3(a, NULL, r);
  6257. }
  6258. #endif
  6259. else if (d == 10) {
  6260. /* Fast implementation for divisor of 10. */
  6261. _sp_div_10(a, NULL, r);
  6262. }
  6263. #endif
  6264. else if (d <= SP_HALF_MAX) {
  6265. /* For small divisors. */
  6266. _sp_div_small(a, d, NULL, r);
  6267. }
  6268. else {
  6269. _sp_mod_d(a, d, r);
  6270. }
  6271. #ifdef WOLFSSL_SP_INT_NEGATIVE
  6272. if (a->sign == MP_NEG) {
  6273. *r = d - *r;
  6274. }
  6275. #endif
  6276. }
  6277. #if 0
  6278. sp_print_digit(*r, "rmod");
  6279. #endif
  6280. return err;
  6281. }
  6282. #endif /* WOLFSSL_SP_MOD_D */
  6283. #if defined(WOLFSSL_SP_MATH_ALL) && defined(HAVE_ECC)
  6284. /* Divides a by 2 mod m and stores in r: r = (a / 2) mod m
  6285. *
  6286. * r = a / 2 (mod m) - constant time (a < m and positive)
  6287. *
  6288. * @param [in] a SP integer to divide.
  6289. * @param [in] m SP integer that is modulus.
  6290. * @param [out] r SP integer to hold result.
  6291. *
  6292. * @return MP_OKAY on success.
  6293. * @return MP_VAL when a, m or r is NULL.
  6294. */
  6295. int sp_div_2_mod_ct(const sp_int* a, const sp_int* m, sp_int* r)
  6296. {
  6297. int err = MP_OKAY;
  6298. /* Validate parameters. */
  6299. if ((a == NULL) || (m == NULL) || (r == NULL)) {
  6300. err = MP_VAL;
  6301. }
  6302. /* Check result has enough space for a + m. */
  6303. if ((err == MP_OKAY) && (m->used + 1 > r->size)) {
  6304. err = MP_VAL;
  6305. }
  6306. if (err == MP_OKAY) {
  6307. #ifndef SQR_MUL_ASM
  6308. sp_int_word w = 0;
  6309. #else
  6310. sp_int_digit l = 0;
  6311. sp_int_digit h;
  6312. sp_int_digit t;
  6313. #endif
  6314. /* Mask to apply to modulus. */
  6315. sp_int_digit mask = (sp_int_digit)0 - (a->dp[0] & 1);
  6316. int i;
  6317. #if 0
  6318. sp_print(a, "a");
  6319. sp_print(m, "m");
  6320. #endif
  6321. /* Add a to m, if a is odd, into r in constant time. */
  6322. for (i = 0; i < m->used; i++) {
  6323. /* Mask to apply to a - set when used value at index. */
  6324. sp_int_digit mask_a = (sp_int_digit)0 - (i < a->used);
  6325. #ifndef SQR_MUL_ASM
  6326. /* Conditionally add modulus. */
  6327. w += m->dp[i] & mask;
  6328. /* Conditionally add a. */
  6329. w += a->dp[i] & mask_a;
  6330. /* Store low digit in result. */
  6331. r->dp[i] = (sp_int_digit)w;
  6332. /* Move high digit down. */
  6333. w >>= DIGIT_BIT;
  6334. #else
  6335. /* No high digit. */
  6336. h = 0;
  6337. /* Conditionally use modulus. */
  6338. t = m->dp[i] & mask;
  6339. /* Add with carry modulus. */
  6340. SP_ASM_ADDC_REG(l, h, t);
  6341. /* Conditionally use a. */
  6342. t = a->dp[i] & mask_a;
  6343. /* Add with carry a. */
  6344. SP_ASM_ADDC_REG(l, h, t);
  6345. /* Store low digit in result. */
  6346. r->dp[i] = l;
  6347. /* Move high digit down. */
  6348. l = h;
  6349. #endif
  6350. }
  6351. /* Store carry. */
  6352. #ifndef SQR_MUL_ASM
  6353. r->dp[i] = (sp_int_digit)w;
  6354. #else
  6355. r->dp[i] = l;
  6356. #endif
  6357. /* Used includes carry - set or not. */
  6358. r->used = i + 1;
  6359. #ifdef WOLFSSL_SP_INT_NEGATIVE
  6360. r->sign = MP_ZPOS;
  6361. #endif
  6362. /* Divide conditional sum by 2. */
  6363. sp_div_2(r, r);
  6364. #if 0
  6365. sp_print(r, "rd2");
  6366. #endif
  6367. }
  6368. return err;
  6369. }
  6370. #endif /* WOLFSSL_SP_MATH_ALL && HAVE_ECC */
  6371. #if defined(HAVE_ECC) || !defined(NO_DSA) || defined(OPENSSL_EXTRA) || \
  6372. (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY) && \
  6373. !defined(WOLFSSL_RSA_PUBLIC_ONLY))
  6374. /* Divides a by 2 and stores in r: r = a >> 1
  6375. *
  6376. * @param [in] a SP integer to divide.
  6377. * @param [out] r SP integer to hold result.
  6378. *
  6379. * @return MP_OKAY on success.
  6380. * @return MP_VAL when a or r is NULL.
  6381. */
  6382. #if !(defined(WOLFSSL_SP_MATH_ALL) && defined(HAVE_ECC))
  6383. static
  6384. #endif
  6385. int sp_div_2(const sp_int* a, sp_int* r)
  6386. {
  6387. int err = MP_OKAY;
  6388. #if defined(WOLFSSL_SP_MATH_ALL) && defined(HAVE_ECC)
  6389. /* Only when a public API. */
  6390. if ((a == NULL) || (r == NULL)) {
  6391. err = MP_VAL;
  6392. }
  6393. /* Ensure maximal size is supported by result. */
  6394. if ((err == MP_OKAY) && (a->used > r->size)) {
  6395. err = MP_VAL;
  6396. }
  6397. #endif
  6398. if (err == MP_OKAY) {
  6399. int i;
  6400. /* Shift down each word by 1 and include bottom bit of next at top. */
  6401. for (i = 0; i < a->used - 1; i++) {
  6402. r->dp[i] = (a->dp[i] >> 1) | (a->dp[i+1] << (SP_WORD_SIZE - 1));
  6403. }
  6404. /* Last word only needs to be shifted down. */
  6405. r->dp[i] = a->dp[i] >> 1;
  6406. /* Set used to be all words seen. */
  6407. r->used = i + 1;
  6408. /* Remove leading zeros. */
  6409. sp_clamp(r);
  6410. #ifdef WOLFSSL_SP_INT_NEGATIVE
  6411. /* Same sign in result. */
  6412. r->sign = a->sign;
  6413. #endif
  6414. }
  6415. return err;
  6416. }
  6417. #endif /* HAVE_ECC || !NO_DSA || OPENSSL_EXTRA ||
  6418. * (!NO_RSA && !WOLFSSL_RSA_VERIFY_ONLY) */
  6419. /************************
  6420. * Add/Subtract Functions
  6421. ************************/
  6422. #if !defined(WOLFSSL_RSA_VERIFY_ONLY) || defined(WOLFSSL_SP_INVMOD)
  6423. /* Add offset b to a into r: r = a + (b << (o * SP_WORD_SIZEOF))
  6424. *
  6425. * @param [in] a SP integer to add to.
  6426. * @param [in] b SP integer to add.
  6427. * @param [out] r SP integer to store result in.
  6428. * @param [in] o Number of digits to offset b.
  6429. *
  6430. * @return MP_OKAY on success.
  6431. */
  6432. static int _sp_add_off(const sp_int* a, const sp_int* b, sp_int* r, int o)
  6433. {
  6434. int i = 0;
  6435. #ifndef SQR_MUL_ASM
  6436. sp_int_word t = 0;
  6437. #else
  6438. sp_int_digit l = 0;
  6439. sp_int_digit h = 0;
  6440. sp_int_digit t = 0;
  6441. #endif
  6442. #ifdef SP_MATH_NEED_ADD_OFF
  6443. int j;
  6444. /* Copy a into result up to offset. */
  6445. for (; (i < o) && (i < a->used); i++) {
  6446. r->dp[i] = a->dp[i];
  6447. }
  6448. /* Set result to 0 for digits beyonf those in a. */
  6449. for (; i < o; i++) {
  6450. r->dp[i] = 0;
  6451. }
  6452. /* Add each digit from a and b where both have values. */
  6453. for (j = 0; (i < a->used) && (j < b->used); i++, j++) {
  6454. #ifndef SQR_MUL_ASM
  6455. t += a->dp[i];
  6456. t += b->dp[j];
  6457. r->dp[i] = (sp_int_digit)t;
  6458. t >>= SP_WORD_SIZE;
  6459. #else
  6460. t = a->dp[i];
  6461. SP_ASM_ADDC(l, h, t);
  6462. t = b->dp[j];
  6463. SP_ASM_ADDC(l, h, t);
  6464. r->dp[i] = l;
  6465. l = h;
  6466. h = 0;
  6467. #endif
  6468. }
  6469. /* Either a and/or b are out of digits. Add carry and remaining a digits. */
  6470. for (; i < a->used; i++) {
  6471. #ifndef SQR_MUL_ASM
  6472. t += a->dp[i];
  6473. r->dp[i] = (sp_int_digit)t;
  6474. t >>= SP_WORD_SIZE;
  6475. #else
  6476. t = a->dp[i];
  6477. SP_ASM_ADDC(l, h, t);
  6478. r->dp[i] = l;
  6479. l = h;
  6480. h = 0;
  6481. #endif
  6482. }
  6483. /* a is out of digits. Add carry and remaining b digits. */
  6484. for (; j < b->used; i++, j++) {
  6485. #ifndef SQR_MUL_ASM
  6486. t += b->dp[j];
  6487. r->dp[i] = (sp_int_digit)t;
  6488. t >>= SP_WORD_SIZE;
  6489. #else
  6490. t = b->dp[j];
  6491. SP_ASM_ADDC(l, h, t);
  6492. r->dp[i] = l;
  6493. l = h;
  6494. h = 0;
  6495. #endif
  6496. }
  6497. #else
  6498. (void)o;
  6499. /* Add each digit from a and b where both have values. */
  6500. for (; (i < a->used) && (i < b->used); i++) {
  6501. #ifndef SQR_MUL_ASM
  6502. t += a->dp[i];
  6503. t += b->dp[i];
  6504. r->dp[i] = (sp_int_digit)t;
  6505. t >>= SP_WORD_SIZE;
  6506. #else
  6507. t = a->dp[i];
  6508. SP_ASM_ADDC(l, h, t);
  6509. t = b->dp[i];
  6510. SP_ASM_ADDC(l, h, t);
  6511. r->dp[i] = l;
  6512. l = h;
  6513. h = 0;
  6514. #endif
  6515. }
  6516. /* Either a and/or b are out of digits. Add carry and remaining a digits. */
  6517. for (; i < a->used; i++) {
  6518. #ifndef SQR_MUL_ASM
  6519. t += a->dp[i];
  6520. r->dp[i] = (sp_int_digit)t;
  6521. t >>= SP_WORD_SIZE;
  6522. #else
  6523. t = a->dp[i];
  6524. SP_ASM_ADDC(l, h, t);
  6525. r->dp[i] = l;
  6526. l = h;
  6527. h = 0;
  6528. #endif
  6529. }
  6530. /* a is out of digits. Add carry and remaining b digits. */
  6531. for (; i < b->used; i++) {
  6532. #ifndef SQR_MUL_ASM
  6533. t += b->dp[i];
  6534. r->dp[i] = (sp_int_digit)t;
  6535. t >>= SP_WORD_SIZE;
  6536. #else
  6537. t = b->dp[i];
  6538. SP_ASM_ADDC(l, h, t);
  6539. r->dp[i] = l;
  6540. l = h;
  6541. h = 0;
  6542. #endif
  6543. }
  6544. #endif
  6545. /* Set used based on last digit put in. */
  6546. r->used = i;
  6547. /* Put in carry. */
  6548. #ifndef SQR_MUL_ASM
  6549. r->dp[i] = (sp_int_digit)t;
  6550. r->used += (t != 0);
  6551. #else
  6552. r->dp[i] = l;
  6553. r->used += (l != 0);
  6554. #endif
  6555. /* Remove leading zeros. */
  6556. sp_clamp(r);
  6557. return MP_OKAY;
  6558. }
  6559. #endif /* !WOLFSSL_RSA_VERIFY_ONLY */
  6560. #if defined(WOLFSSL_SP_MATH_ALL) || defined(WOLFSSL_SP_INT_NEGATIVE) || \
  6561. !defined(NO_DH) || defined(HAVE_ECC) || (!defined(NO_RSA) && \
  6562. !defined(WOLFSSL_RSA_VERIFY_ONLY))
  6563. /* Sub offset b from a into r: r = a - (b << (o * SP_WORD_SIZEOF))
  6564. * a must be greater than b.
  6565. *
  6566. * When using offset, r == a is faster.
  6567. *
  6568. * @param [in] a SP integer to subtract from.
  6569. * @param [in] b SP integer to subtract.
  6570. * @param [out] r SP integer to store result in.
  6571. * @param [in] o Number of digits to offset b.
  6572. *
  6573. * @return MP_OKAY on success.
  6574. */
  6575. static int _sp_sub_off(const sp_int* a, const sp_int* b, sp_int* r, int o)
  6576. {
  6577. int i = 0;
  6578. int j;
  6579. #ifndef SQR_MUL_ASM
  6580. sp_int_sword t = 0;
  6581. #else
  6582. sp_int_digit l = 0;
  6583. sp_int_digit h = 0;
  6584. #endif
  6585. /* Need to copy digits up to offset into result. */
  6586. if (r != a) {
  6587. for (; (i < o) && (i < a->used); i++) {
  6588. r->dp[i] = a->dp[i];
  6589. }
  6590. }
  6591. else {
  6592. i = o;
  6593. }
  6594. /* Index to add at is the offset now. */
  6595. for (j = 0; (i < a->used) && (j < b->used); i++, j++) {
  6596. #ifndef SQR_MUL_ASM
  6597. /* Add a into and subtract b from current value. */
  6598. t += a->dp[i];
  6599. t -= b->dp[j];
  6600. /* Store low digit in result. */
  6601. r->dp[i] = (sp_int_digit)t;
  6602. /* Move high digit down. */
  6603. t >>= SP_WORD_SIZE;
  6604. #else
  6605. /* Add a into and subtract b from current value. */
  6606. SP_ASM_ADDC(l, h, a->dp[i]);
  6607. SP_ASM_SUBB(l, h, b->dp[j]);
  6608. /* Store low digit in result. */
  6609. r->dp[i] = l;
  6610. /* Move high digit down. */
  6611. l = h;
  6612. /* High digit is 0 when positive or -1 on negative. */
  6613. h = (sp_int_digit)0 - (h >> (SP_WORD_SIZE - 1));
  6614. #endif
  6615. }
  6616. for (; i < a->used; i++) {
  6617. #ifndef SQR_MUL_ASM
  6618. /* Add a into current value. */
  6619. t += a->dp[i];
  6620. /* Store low digit in result. */
  6621. r->dp[i] = (sp_int_digit)t;
  6622. /* Move high digit down. */
  6623. t >>= SP_WORD_SIZE;
  6624. #else
  6625. /* Add a into current value. */
  6626. SP_ASM_ADDC(l, h, a->dp[i]);
  6627. /* Store low digit in result. */
  6628. r->dp[i] = l;
  6629. /* Move high digit down. */
  6630. l = h;
  6631. /* High digit is 0 when positive or -1 on negative. */
  6632. h = (sp_int_digit)0 - (h >> (SP_WORD_SIZE - 1));
  6633. #endif
  6634. }
  6635. /* Set used based on last digit put in. */
  6636. r->used = i;
  6637. /* Remove leading zeros. */
  6638. sp_clamp(r);
  6639. return MP_OKAY;
  6640. }
  6641. #endif /* WOLFSSL_SP_MATH_ALL || WOLFSSL_SP_INT_NEGATIVE || !NO_DH ||
  6642. * HAVE_ECC || (!NO_RSA && !WOLFSSL_RSA_VERIFY_ONLY) */
  6643. #if !defined(WOLFSSL_RSA_VERIFY_ONLY) || defined(WOLFSSL_SP_INVMOD)
  6644. /* Add b to a into r: r = a + b
  6645. *
  6646. * @param [in] a SP integer to add to.
  6647. * @param [in] b SP integer to add.
  6648. * @param [out] r SP integer to store result in.
  6649. *
  6650. * @return MP_OKAY on success.
  6651. * @return MP_VAL when a, b, or r is NULL.
  6652. */
  6653. int sp_add(const sp_int* a, const sp_int* b, sp_int* r)
  6654. {
  6655. int err = MP_OKAY;
  6656. /* Validate parameters. */
  6657. if ((a == NULL) || (b == NULL) || (r == NULL)) {
  6658. err = MP_VAL;
  6659. }
  6660. /* Check that r as big as a and b plus one word. */
  6661. if ((err == MP_OKAY) && ((a->used >= r->size) || (b->used >= r->size))) {
  6662. err = MP_VAL;
  6663. }
  6664. if (err == MP_OKAY) {
  6665. #ifndef WOLFSSL_SP_INT_NEGATIVE
  6666. /* Add two positive numbers. */
  6667. err = _sp_add_off(a, b, r, 0);
  6668. #else
  6669. /* Same sign then add absolute values and use sign. */
  6670. if (a->sign == b->sign) {
  6671. err = _sp_add_off(a, b, r, 0);
  6672. r->sign = a->sign;
  6673. }
  6674. /* Different sign and abs(a) >= abs(b). */
  6675. else if (_sp_cmp_abs(a, b) != MP_LT) {
  6676. /* Subtract absolute values and use sign of a unless result 0. */
  6677. err = _sp_sub_off(a, b, r, 0);
  6678. if (sp_iszero(r)) {
  6679. r->sign = MP_ZPOS;
  6680. }
  6681. else {
  6682. r->sign = a->sign;
  6683. }
  6684. }
  6685. /* Different sign and abs(a) < abs(b). */
  6686. else {
  6687. /* Reverse subtract absolute values and use sign of b. */
  6688. err = _sp_sub_off(b, a, r, 0);
  6689. r->sign = b->sign;
  6690. }
  6691. #endif
  6692. }
  6693. return err;
  6694. }
  6695. #endif /* !WOLFSSL_RSA_VERIFY_ONLY */
  6696. #if defined(WOLFSSL_SP_MATH_ALL) || !defined(NO_DH) || defined(HAVE_ECC) || \
  6697. (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY))
  6698. /* Subtract b from a into r: r = a - b
  6699. *
  6700. * a must be greater than b unless WOLFSSL_SP_INT_NEGATIVE is defined.
  6701. *
  6702. * @param [in] a SP integer to subtract from.
  6703. * @param [in] b SP integer to subtract.
  6704. * @param [out] r SP integer to store result in.
  6705. *
  6706. * @return MP_OKAY on success.
  6707. * @return MP_VAL when a, b, or r is NULL.
  6708. */
  6709. int sp_sub(const sp_int* a, const sp_int* b, sp_int* r)
  6710. {
  6711. int err = MP_OKAY;
  6712. /* Validate parameters. */
  6713. if ((a == NULL) || (b == NULL) || (r == NULL)) {
  6714. err = MP_VAL;
  6715. }
  6716. /* Check that r as big as a and b plus one word. */
  6717. if ((err == MP_OKAY) && ((a->used >= r->size) || (b->used >= r->size))) {
  6718. err = MP_VAL;
  6719. }
  6720. if (err == MP_OKAY) {
  6721. #ifndef WOLFSSL_SP_INT_NEGATIVE
  6722. /* Subtract positive numbers b from a. */
  6723. err = _sp_sub_off(a, b, r, 0);
  6724. #else
  6725. /* Different sign. */
  6726. if (a->sign != b->sign) {
  6727. /* Add absolute values and use sign of a. */
  6728. err = _sp_add_off(a, b, r, 0);
  6729. r->sign = a->sign;
  6730. }
  6731. /* Same sign and abs(a) >= abs(b). */
  6732. else if (_sp_cmp_abs(a, b) != MP_LT) {
  6733. /* Subtract absolute values and use sign of a unless result 0. */
  6734. err = _sp_sub_off(a, b, r, 0);
  6735. if (sp_iszero(r)) {
  6736. r->sign = MP_ZPOS;
  6737. }
  6738. else {
  6739. r->sign = a->sign;
  6740. }
  6741. }
  6742. /* Same sign and abs(a) < abs(b). */
  6743. else {
  6744. /* Reverse subtract absolute values and use opposite sign of a */
  6745. err = _sp_sub_off(b, a, r, 0);
  6746. r->sign = 1 - a->sign;
  6747. }
  6748. #endif
  6749. }
  6750. return err;
  6751. }
  6752. #endif /* WOLFSSL_SP_MATH_ALL || !NO_DH || HAVE_ECC ||
  6753. * (!NO_RSA && !WOLFSSL_RSA_VERIFY_ONLY)*/
  6754. /****************************
  6755. * Add/Subtract mod functions
  6756. ****************************/
  6757. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  6758. (!defined(WOLFSSL_SP_MATH) && defined(WOLFSSL_CUSTOM_CURVES)) || \
  6759. defined(WOLFCRYPT_HAVE_ECCSI) || defined(WOLFCRYPT_HAVE_SAKKE)
  6760. /* Add two value and reduce: r = (a + b) % m
  6761. *
  6762. * @param [in] a SP integer to add.
  6763. * @param [in] b SP integer to add with.
  6764. * @param [in] m SP integer that is the modulus.
  6765. * @param [out] r SP integer to hold result.
  6766. *
  6767. * @return MP_OKAY on success.
  6768. * @return MP_VAL when a, b, m or r is NULL.
  6769. * @return MP_MEM when dynamic memory allocation fails.
  6770. */
  6771. int sp_addmod(const sp_int* a, const sp_int* b, const sp_int* m, sp_int* r)
  6772. {
  6773. int err = MP_OKAY;
  6774. /* Calculate used based on digits used in a and b. */
  6775. int used = ((a == NULL) || (b == NULL)) ? 1 :
  6776. ((a->used >= b->used) ? a->used + 1 : b->used + 1);
  6777. DECL_SP_INT(t, used);
  6778. /* Validate parameters. */
  6779. if ((a == NULL) || (b == NULL) || (m == NULL) || (r == NULL)) {
  6780. err = MP_VAL;
  6781. }
  6782. /* Allocate a temporary SP int to hold sum. */
  6783. ALLOC_SP_INT_SIZE(t, used, err, NULL);
  6784. #if 0
  6785. if (err == MP_OKAY) {
  6786. sp_print(a, "a");
  6787. sp_print(b, "b");
  6788. sp_print(m, "m");
  6789. }
  6790. #endif
  6791. if (err == MP_OKAY) {
  6792. /* Do sum. */
  6793. err = sp_add(a, b, t);
  6794. }
  6795. if (err == MP_OKAY) {
  6796. /* Mod result. */
  6797. err = sp_mod(t, m, r);
  6798. }
  6799. #if 0
  6800. if (err == MP_OKAY) {
  6801. sp_print(r, "rma");
  6802. }
  6803. #endif
  6804. FREE_SP_INT(t, NULL);
  6805. return err;
  6806. }
  6807. #endif /* WOLFSSL_SP_MATH_ALL || WOLFSSL_CUSTOM_CURVES) ||
  6808. * WOLFCRYPT_HAVE_ECCSI || WOLFCRYPT_HAVE_SAKKE */
  6809. #if defined(WOLFSSL_SP_MATH_ALL) && (!defined(WOLFSSL_RSA_VERIFY_ONLY) || \
  6810. defined(HAVE_ECC))
  6811. /* Sub b from a and reduce: r = (a - b) % m
  6812. * Result is always positive.
  6813. *
  6814. * @param [in] a SP integer to subtract from
  6815. * @param [in] b SP integer to subtract.
  6816. * @param [in] m SP integer that is the modulus.
  6817. * @param [out] r SP integer to hold result.
  6818. *
  6819. * @return MP_OKAY on success.
  6820. * @return MP_VAL when a, b, m or r is NULL.
  6821. * @return MP_MEM when dynamic memory allocation fails.
  6822. */
  6823. int sp_submod(const sp_int* a, const sp_int* b, const sp_int* m, sp_int* r)
  6824. {
  6825. #ifndef WOLFSSL_SP_INT_NEGATIVE
  6826. int err = MP_OKAY;
  6827. int used = ((a == NULL) || (b == NULL) || (m == NULL)) ? 1 :
  6828. ((a->used >= m->used) ?
  6829. ((a->used >= b->used) ? (a->used + 1) : (b->used + 1)) :
  6830. ((b->used >= m->used)) ? (b->used + 1) : (m->used + 1));
  6831. DECL_SP_INT_ARRAY(t, used, 2);
  6832. /* Validate parameters. */
  6833. if ((a == NULL) || (b == NULL) || (m == NULL) || (r == NULL)) {
  6834. err = MP_VAL;
  6835. }
  6836. #if 0
  6837. if (err == MP_OKAY) {
  6838. sp_print(a, "a");
  6839. sp_print(b, "b");
  6840. sp_print(m, "m");
  6841. }
  6842. #endif
  6843. ALLOC_SP_INT_ARRAY(t, used, 2, err, NULL);
  6844. if (err == MP_OKAY) {
  6845. /* Reduce a to less than m. */
  6846. if (_sp_cmp(a, m) != MP_LT) {
  6847. err = sp_mod(a, m, t[0]);
  6848. a = t[0];
  6849. }
  6850. }
  6851. if (err == MP_OKAY) {
  6852. /* Reduce b to less than m. */
  6853. if (_sp_cmp(b, m) != MP_LT) {
  6854. err = sp_mod(b, m, t[1]);
  6855. b = t[1];
  6856. }
  6857. }
  6858. if (err == MP_OKAY) {
  6859. /* Add m to a if a smaller than b. */
  6860. if (_sp_cmp(a, b) == MP_LT) {
  6861. err = sp_add(a, m, t[0]);
  6862. a = t[0];
  6863. }
  6864. }
  6865. if (err == MP_OKAY) {
  6866. /* Subtract b from a. */
  6867. err = sp_sub(a, b, r);
  6868. }
  6869. #if 0
  6870. if (err == MP_OKAY) {
  6871. sp_print(r, "rms");
  6872. }
  6873. #endif
  6874. FREE_SP_INT_ARRAY(t, NULL);
  6875. return err;
  6876. #else /* WOLFSSL_SP_INT_NEGATIVE */
  6877. int err = MP_OKAY;
  6878. int used = ((a == NULL) || (b == NULL)) ? 1 :
  6879. ((a->used >= b->used) ? a->used + 1 : b->used + 1);
  6880. DECL_SP_INT(t, used);
  6881. /* Validate parameters. */
  6882. if ((a == NULL) || (b == NULL) || (m == NULL) || (r == NULL)) {
  6883. err = MP_VAL;
  6884. }
  6885. #if 0
  6886. if (err == MP_OKAY) {
  6887. sp_print(a, "a");
  6888. sp_print(b, "b");
  6889. sp_print(m, "m");
  6890. }
  6891. #endif
  6892. ALLOC_SP_INT_SIZE(t, used, err, NULL);
  6893. /* Subtract b from a into temporary. */
  6894. if (err == MP_OKAY) {
  6895. err = sp_sub(a, b, t);
  6896. }
  6897. if (err == MP_OKAY) {
  6898. /* Reduce result mod m into result. */
  6899. err = sp_mod(t, m, r);
  6900. }
  6901. #if 0
  6902. if (err == MP_OKAY) {
  6903. sp_print(r, "rms");
  6904. }
  6905. #endif
  6906. FREE_SP_INT(t, NULL);
  6907. return err;
  6908. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  6909. }
  6910. #endif /* WOLFSSL_SP_MATH_ALL */
  6911. #if defined(WOLFSSL_SP_MATH_ALL) && defined(HAVE_ECC)
  6912. /* Add two value and reduce: r = (a + b) % m
  6913. *
  6914. * r = a + b (mod m) - constant time (a < m and b < m, a, b and m are positive)
  6915. *
  6916. * Assumes a, b, m and r are not NULL.
  6917. * m and r must not be the same pointer.
  6918. *
  6919. * @param [in] a SP integer to add.
  6920. * @param [in] b SP integer to add with.
  6921. * @param [in] m SP integer that is the modulus.
  6922. * @param [out] r SP integer to hold result.
  6923. *
  6924. * @return MP_OKAY on success.
  6925. */
  6926. int sp_addmod_ct(const sp_int* a, const sp_int* b, const sp_int* m, sp_int* r)
  6927. {
  6928. int err = MP_OKAY;
  6929. #ifndef SQR_MUL_ASM
  6930. sp_int_sword w;
  6931. sp_int_sword s;
  6932. #else
  6933. sp_int_digit wl;
  6934. sp_int_digit wh;
  6935. sp_int_digit sl;
  6936. sp_int_digit sh;
  6937. sp_int_digit t;
  6938. #endif
  6939. sp_int_digit mask;
  6940. sp_int_digit mask_a = (sp_int_digit)-1;
  6941. sp_int_digit mask_b = (sp_int_digit)-1;
  6942. int i;
  6943. /* Check result is as big as modulus. */
  6944. if (m->used > r->size) {
  6945. err = MP_VAL;
  6946. }
  6947. /* Validate parameters. */
  6948. if ((err == MP_OKAY) && (r == m)) {
  6949. err = MP_VAL;
  6950. }
  6951. if (err == MP_OKAY) {
  6952. #if 0
  6953. sp_print(a, "a");
  6954. sp_print(b, "b");
  6955. sp_print(m, "m");
  6956. #endif
  6957. /* Add a to b into r. Do the subtract of modulus but don't store result.
  6958. * When subtract result is negative, the overflow will be negative.
  6959. * Only need to subtract mod when result is positive - overflow is
  6960. * positive.
  6961. */
  6962. #ifndef SQR_MUL_ASM
  6963. w = 0;
  6964. s = 0;
  6965. #else
  6966. wl = 0;
  6967. sl = 0;
  6968. sh = 0;
  6969. #endif
  6970. /* Constant time - add modulus digits worth from a and b. */
  6971. for (i = 0; i < m->used; i++) {
  6972. /* Values past 'used' are not initialized. */
  6973. mask_a += (i == a->used);
  6974. mask_b += (i == b->used);
  6975. #ifndef SQR_MUL_ASM
  6976. /* Add next digits from a and b to current value. */
  6977. w += a->dp[i] & mask_a;
  6978. w += b->dp[i] & mask_b;
  6979. /* Store low digit in result. */
  6980. r->dp[i] = (sp_int_digit)w;
  6981. /* Add result to reducing value. */
  6982. s += (sp_int_digit)w;
  6983. /* Subtract next digit of modulus. */
  6984. s -= m->dp[i];
  6985. /* Move high digit of reduced result down. */
  6986. s >>= DIGIT_BIT;
  6987. /* Move high digit of sum result down. */
  6988. w >>= DIGIT_BIT;
  6989. #else
  6990. wh = 0;
  6991. /* Add next digits from a and b to current value. */
  6992. t = a->dp[i] & mask_a;
  6993. SP_ASM_ADDC_REG(wl, wh, t);
  6994. t = b->dp[i] & mask_b;
  6995. SP_ASM_ADDC_REG(wl, wh, t);
  6996. /* Store low digit in result. */
  6997. r->dp[i] = wl;
  6998. /* Add result to reducing value. */
  6999. SP_ASM_ADDC_REG(sl, sh, wl);
  7000. /* Subtract next digit of modulus. */
  7001. SP_ASM_SUBB(sl, sh, m->dp[i]);
  7002. /* Move high digit of reduced result down. */
  7003. sl = sh;
  7004. /* High digit is 0 when positive or -1 on negative. */
  7005. sh = (sp_int_digit)0 - (sh >> (SP_WORD_SIZE-1));
  7006. /* Move high digit of sum result down. */
  7007. wl = wh;
  7008. #endif
  7009. }
  7010. #ifndef SQR_MUL_ASM
  7011. /* Add carry into reduced result. */
  7012. s += (sp_int_digit)w;
  7013. /* s will be positive when subtracting modulus is needed. */
  7014. mask = (sp_int_digit)0 - (s >= 0);
  7015. #else
  7016. /* Add carry into reduced result. */
  7017. SP_ASM_ADDC_REG(sl, sh, wl);
  7018. /* s will be positive when subtracting modulus is needed. */
  7019. mask = (sh >> (SP_WORD_SIZE-1)) - 1;
  7020. #endif
  7021. /* Constant time, conditionally, subtract modulus from sum. */
  7022. #ifndef SQR_MUL_ASM
  7023. w = 0;
  7024. #else
  7025. wl = 0;
  7026. wh = 0;
  7027. #endif
  7028. for (i = 0; i < m->used; i++) {
  7029. #ifndef SQR_MUL_ASM
  7030. /* Add result to current value and conditionally subtract modulus.
  7031. */
  7032. w += r->dp[i];
  7033. w -= m->dp[i] & mask;
  7034. /* Store low digit in result. */
  7035. r->dp[i] = (sp_int_digit)w;
  7036. /* Move high digit of sum result down. */
  7037. w >>= DIGIT_BIT;
  7038. #else
  7039. /* Add result to current value and conditionally subtract modulus.
  7040. */
  7041. SP_ASM_ADDC(wl, wh, r->dp[i]);
  7042. t = m->dp[i] & mask;
  7043. SP_ASM_SUBB_REG(wl, wh, t);
  7044. /* Store low digit in result. */
  7045. r->dp[i] = wl;
  7046. /* Move high digit of sum result down. */
  7047. wl = wh;
  7048. /* High digit is 0 when positive or -1 on negative. */
  7049. wh = (sp_int_digit)0 - (wl >> (SP_WORD_SIZE-1));
  7050. #endif
  7051. }
  7052. /* Result will always have digits equal to or less than those in
  7053. * modulus. */
  7054. r->used = i;
  7055. #ifdef WOLFSSL_SP_INT_NEGATIVE
  7056. r->sign = MP_ZPOS;
  7057. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  7058. /* Remove leading zeros. */
  7059. sp_clamp(r);
  7060. #if 0
  7061. sp_print(r, "rma");
  7062. #endif
  7063. }
  7064. return err;
  7065. }
  7066. #endif /* WOLFSSL_SP_MATH_ALL && HAVE_ECC */
  7067. #if defined(WOLFSSL_SP_MATH_ALL) && defined(HAVE_ECC)
  7068. /* Sub b from a and reduce: r = (a - b) % m
  7069. * Result is always positive.
  7070. *
  7071. * r = a - b (mod m) - constant time (a < m and b < m, a, b and m are positive)
  7072. *
  7073. * Assumes a, b, m and r are not NULL.
  7074. * m and r must not be the same pointer.
  7075. *
  7076. * @param [in] a SP integer to subtract from
  7077. * @param [in] b SP integer to subtract.
  7078. * @param [in] m SP integer that is the modulus.
  7079. * @param [out] r SP integer to hold result.
  7080. *
  7081. * @return MP_OKAY on success.
  7082. */
  7083. int sp_submod_ct(const sp_int* a, const sp_int* b, const sp_int* m, sp_int* r)
  7084. {
  7085. int err = MP_OKAY;
  7086. #ifndef SQR_MUL_ASM
  7087. sp_int_sword w;
  7088. #else
  7089. sp_int_digit l;
  7090. sp_int_digit h;
  7091. sp_int_digit t;
  7092. #endif
  7093. sp_int_digit mask;
  7094. sp_int_digit mask_a = (sp_int_digit)-1;
  7095. sp_int_digit mask_b = (sp_int_digit)-1;
  7096. int i;
  7097. /* Check result is as big as modulus plus one digit. */
  7098. if (m->used > r->size) {
  7099. err = MP_VAL;
  7100. }
  7101. /* Validate parameters. */
  7102. if ((err == MP_OKAY) && (r == m)) {
  7103. err = MP_VAL;
  7104. }
  7105. if (err == MP_OKAY) {
  7106. #if 0
  7107. sp_print(a, "a");
  7108. sp_print(b, "b");
  7109. sp_print(m, "m");
  7110. #endif
  7111. /* In constant time, subtract b from a putting result in r. */
  7112. #ifndef SQR_MUL_ASM
  7113. w = 0;
  7114. #else
  7115. l = 0;
  7116. h = 0;
  7117. #endif
  7118. for (i = 0; i < m->used; i++) {
  7119. /* Values past 'used' are not initialized. */
  7120. mask_a += (i == a->used);
  7121. mask_b += (i == b->used);
  7122. #ifndef SQR_MUL_ASM
  7123. /* Add a to and subtract b from current value. */
  7124. w += a->dp[i] & mask_a;
  7125. w -= b->dp[i] & mask_b;
  7126. /* Store low digit in result. */
  7127. r->dp[i] = (sp_int_digit)w;
  7128. /* Move high digit down. */
  7129. w >>= DIGIT_BIT;
  7130. #else
  7131. /* Add a and subtract b from current value. */
  7132. t = a->dp[i] & mask_a;
  7133. SP_ASM_ADDC_REG(l, h, t);
  7134. t = b->dp[i] & mask_b;
  7135. SP_ASM_SUBB_REG(l, h, t);
  7136. /* Store low digit in result. */
  7137. r->dp[i] = l;
  7138. /* Move high digit down. */
  7139. l = h;
  7140. /* High digit is 0 when positive or -1 on negative. */
  7141. h = (sp_int_digit)0 - (l >> (SP_WORD_SIZE - 1));
  7142. #endif
  7143. }
  7144. /* When w is negative then we need to add modulus to make result
  7145. * positive. */
  7146. #ifndef SQR_MUL_ASM
  7147. mask = (sp_int_digit)0 - (w < 0);
  7148. #else
  7149. mask = h;
  7150. #endif
  7151. /* Constant time, conditionally, add modulus to difference. */
  7152. #ifndef SQR_MUL_ASM
  7153. w = 0;
  7154. #else
  7155. l = 0;
  7156. #endif
  7157. for (i = 0; i < m->used; i++) {
  7158. #ifndef SQR_MUL_ASM
  7159. /* Add result and conditionally modulus to current value. */
  7160. w += r->dp[i];
  7161. w += m->dp[i] & mask;
  7162. /* Store low digit in result. */
  7163. r->dp[i] = (sp_int_digit)w;
  7164. /* Move high digit down. */
  7165. w >>= DIGIT_BIT;
  7166. #else
  7167. h = 0;
  7168. /* Add result and conditionally modulus to current value. */
  7169. SP_ASM_ADDC(l, h, r->dp[i]);
  7170. t = m->dp[i] & mask;
  7171. SP_ASM_ADDC_REG(l, h, t);
  7172. /* Store low digit in result. */
  7173. r->dp[i] = l;
  7174. /* Move high digit down. */
  7175. l = h;
  7176. #endif
  7177. }
  7178. /* Result will always have digits equal to or less than those in
  7179. * modulus. */
  7180. r->used = i;
  7181. #ifdef WOLFSSL_SP_INT_NEGATIVE
  7182. r->sign = MP_ZPOS;
  7183. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  7184. /* Remove leading zeros. */
  7185. sp_clamp(r);
  7186. #if 0
  7187. sp_print(r, "rms");
  7188. #endif
  7189. }
  7190. return err;
  7191. }
  7192. #endif /* WOLFSSL_SP_MATH_ALL && HAVE_ECC */
  7193. /********************
  7194. * Shifting functoins
  7195. ********************/
  7196. #if !defined(NO_DH) || defined(HAVE_ECC) || (defined(WC_RSA_BLINDING) && \
  7197. !defined(WOLFSSL_RSA_VERIFY_ONLY))
  7198. /* Left shift the multi-precision number by a number of digits.
  7199. *
  7200. * @param [in,out] a SP integer to shift.
  7201. * @param [in] s Number of digits to shift.
  7202. *
  7203. * @return MP_OKAY on success.
  7204. * @return MP_VAL when a is NULL or the result is too big to fit in an SP.
  7205. */
  7206. int sp_lshd(sp_int* a, int s)
  7207. {
  7208. int err = MP_OKAY;
  7209. /* Validate parameters. */
  7210. if (a == NULL) {
  7211. err = MP_VAL;
  7212. }
  7213. /* Ensure number has enough digits for operation. */
  7214. if ((err == MP_OKAY) && (a->used + s > a->size)) {
  7215. err = MP_VAL;
  7216. }
  7217. if (err == MP_OKAY) {
  7218. /* Move up digits. */
  7219. XMEMMOVE(a->dp + s, a->dp, a->used * SP_WORD_SIZEOF);
  7220. /* Back fill with zeros. */
  7221. XMEMSET(a->dp, 0, s * SP_WORD_SIZEOF);
  7222. /* Update used. */
  7223. a->used += s;
  7224. /* Remove leading zeros. */
  7225. sp_clamp(a);
  7226. }
  7227. return err;
  7228. }
  7229. #endif
  7230. #if defined(WOLFSSL_SP_MATH_ALL) || !defined(NO_DH) || defined(HAVE_ECC) || \
  7231. (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY) && \
  7232. !defined(WOLFSSL_RSA_PUBLIC_ONLY))
  7233. /* Left shift the multi-precision number by n bits.
  7234. * Bits may be larger than the word size.
  7235. *
  7236. * Used by sp_mul_2d() and other internal functions.
  7237. *
  7238. * @param [in,out] a SP integer to shift.
  7239. * @param [in] n Number of bits to shift left.
  7240. *
  7241. * @return MP_OKAY on success.
  7242. */
  7243. static int sp_lshb(sp_int* a, int n)
  7244. {
  7245. int err = MP_OKAY;
  7246. if (a->used != 0) {
  7247. /* Calculate number of digits to shift. */
  7248. int s = n >> SP_WORD_SHIFT;
  7249. int i;
  7250. /* Ensure number has enough digits for result. */
  7251. if (a->used + s >= a->size) {
  7252. err = MP_VAL;
  7253. }
  7254. if (err == MP_OKAY) {
  7255. /* Get count of bits to move in digit. */
  7256. n &= SP_WORD_MASK;
  7257. /* Check whether this is a complicated case. */
  7258. if (n != 0) {
  7259. /* Shift up starting at most significant digit. */
  7260. /* Get new most significant digit. */
  7261. sp_int_digit v = a->dp[a->used - 1] >> (SP_WORD_SIZE - n);
  7262. /* Shift up each digit. */
  7263. for (i = a->used - 1; i >= 1; i--) {
  7264. a->dp[i + s] = (a->dp[i] << n) |
  7265. (a->dp[i - 1] >> (SP_WORD_SIZE - n));
  7266. }
  7267. /* Shift up least significant digit. */
  7268. a->dp[s] = a->dp[0] << n;
  7269. /* Add new high digit unless zero. */
  7270. if (v != 0) {
  7271. a->dp[a->used + s] = v;
  7272. a->used++;
  7273. }
  7274. }
  7275. /* Only digits to move and ensure not zero. */
  7276. else if (s > 0) {
  7277. /* Move up digits. */
  7278. XMEMMOVE(a->dp + s, a->dp, a->used * SP_WORD_SIZEOF);
  7279. }
  7280. /* Update used digit count. */
  7281. a->used += s;
  7282. /* Back fill with zeros. */
  7283. XMEMSET(a->dp, 0, SP_WORD_SIZEOF * s);
  7284. }
  7285. }
  7286. return err;
  7287. }
  7288. #endif /* WOLFSSL_SP_MATH_ALL || !NO_DH || HAVE_ECC ||
  7289. * (!NO_RSA && !WOLFSSL_RSA_VERIFY_ONLY) */
  7290. #ifdef WOLFSSL_SP_MATH_ALL
  7291. /* Shift a right by n digits into r: r = a >> (n * SP_WORD_SIZE)
  7292. *
  7293. * @param [in] a SP integer to shift.
  7294. * @param [in] n Number of digits to shift.
  7295. * @param [out] r SP integer to store result in.
  7296. */
  7297. void sp_rshd(sp_int* a, int c)
  7298. {
  7299. /* Do shift if we have an SP int. */
  7300. if (a != NULL) {
  7301. /* Make zero if shift removes all digits. */
  7302. if (c >= a->used) {
  7303. _sp_zero(a);
  7304. }
  7305. else {
  7306. int i;
  7307. /* Update used digits count. */
  7308. a->used -= c;
  7309. /* Move digits down. */
  7310. for (i = 0; i < a->used; i++, c++) {
  7311. a->dp[i] = a->dp[c];
  7312. }
  7313. }
  7314. }
  7315. }
  7316. #endif /* WOLFSSL_SP_MATH_ALL */
  7317. #if defined(WOLFSSL_SP_MATH_ALL) || !defined(NO_DH) || defined(HAVE_ECC) || \
  7318. (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  7319. defined(WOLFSSL_HAVE_SP_DH)
  7320. /* Shift a right by n bits into r: r = a >> n
  7321. *
  7322. * @param [in] a SP integer to shift.
  7323. * @param [in] n Number of bits to shift.
  7324. * @param [out] r SP integer to store result in.
  7325. */
  7326. int sp_rshb(const sp_int* a, int n, sp_int* r)
  7327. {
  7328. int err = MP_OKAY;
  7329. /* Number of digits to shift down. */
  7330. int i = n >> SP_WORD_SHIFT;
  7331. if ((a == NULL) || (n < 0)) {
  7332. err = MP_VAL;
  7333. }
  7334. /* Handle case where shifting out all digits. */
  7335. if ((err == MP_OKAY) && (i >= a->used)) {
  7336. _sp_zero(r);
  7337. }
  7338. /* Change callers when more error cases returned. */
  7339. else if ((err == MP_OKAY) && (a->used - i > r->size)) {
  7340. err = MP_VAL;
  7341. }
  7342. else if (err == MP_OKAY) {
  7343. int j;
  7344. /* Number of bits to shift in digits. */
  7345. n &= SP_WORD_SIZE - 1;
  7346. /* Handle simple case. */
  7347. if (n == 0) {
  7348. /* Set the count of used digits. */
  7349. r->used = a->used - i;
  7350. /* Move digits down. */
  7351. if (r == a) {
  7352. XMEMMOVE(r->dp, r->dp + i, SP_WORD_SIZEOF * r->used);
  7353. }
  7354. else {
  7355. XMEMCPY(r->dp, a->dp + i, SP_WORD_SIZEOF * r->used);
  7356. }
  7357. }
  7358. else {
  7359. /* Move the bits down starting at least significant digit. */
  7360. for (j = 0; i < a->used-1; i++, j++)
  7361. r->dp[j] = (a->dp[i] >> n) | (a->dp[i+1] << (SP_WORD_SIZE - n));
  7362. /* Most significant digit has no higher digit to pull from. */
  7363. r->dp[j] = a->dp[i] >> n;
  7364. /* Set the count of used digits. */
  7365. r->used = j + (r->dp[j] > 0);
  7366. }
  7367. #ifdef WOLFSSL_SP_INT_NEGATIVE
  7368. if (sp_iszero(r)) {
  7369. /* Set zero sign. */
  7370. r->sign = MP_ZPOS;
  7371. }
  7372. else {
  7373. /* Retain sign. */
  7374. r->sign = a->sign;
  7375. }
  7376. #endif
  7377. }
  7378. return err;
  7379. }
  7380. #endif /* WOLFSSL_SP_MATH_ALL || !NO_DH || HAVE_ECC ||
  7381. * (!NO_RSA && !WOLFSSL_RSA_VERIFY_ONLY) || WOLFSSL_HAVE_SP_DH */
  7382. #if defined(WOLFSSL_SP_MATH_ALL) || !defined(NO_DH) || defined(HAVE_ECC) || \
  7383. (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY) && \
  7384. !defined(WOLFSSL_RSA_PUBLIC_ONLY))
  7385. static void _sp_div_same_size(sp_int* a, const sp_int* d, sp_int* r)
  7386. {
  7387. int i;
  7388. /* Compare top digits of dividend with those of divisor up to last. */
  7389. for (i = d->used - 1; i > 0; i--) {
  7390. /* Break if top divisor is not equal to dividend. */
  7391. if (a->dp[a->used - d->used + i] != d->dp[i]) {
  7392. break;
  7393. }
  7394. }
  7395. /* Check if top dividend is greater than or equal to divisor. */
  7396. if (a->dp[a->used - d->used + i] >= d->dp[i]) {
  7397. /* Update quotient result. */
  7398. r->dp[a->used - d->used] += 1;
  7399. /* Get 'used' to restore - ensure zeros put into quotient. */
  7400. i = a->used;
  7401. /* Subtract d from top of a. */
  7402. _sp_sub_off(a, d, a, a->used - d->used);
  7403. /* Restore 'used' on remainder. */
  7404. a->used = i;
  7405. }
  7406. }
  7407. /* Divide a by d and return the quotient in r and the remainder in a.
  7408. * r = a / d; a = a % d
  7409. *
  7410. * Note: a is constantly having multiplies of d subtracted.
  7411. *
  7412. * @param [in, out] a SP integer to be divided and remainder on out.
  7413. * @param [in] d SP integer to divide by.
  7414. * @param [out] r SP integer that is the quotient.
  7415. * @param [out] trial SP integer that is product in trial division.
  7416. *
  7417. * @return MP_OKAY on success.
  7418. * @return MP_VAL when operation fails - only when compiling small code.
  7419. */
  7420. static int _sp_div(sp_int* a, const sp_int* d, sp_int* r, sp_int* trial)
  7421. {
  7422. int err = MP_OKAY;
  7423. int i;
  7424. #ifdef WOLFSSL_SP_SMALL
  7425. int c;
  7426. #else
  7427. int j;
  7428. int o;
  7429. #ifndef SQR_MUL_ASM
  7430. sp_int_sword sw;
  7431. #else
  7432. sp_int_digit sl;
  7433. sp_int_digit sh;
  7434. sp_int_digit st;
  7435. #endif
  7436. #endif /* WOLFSSL_SP_SMALL */
  7437. sp_int_digit t;
  7438. sp_int_digit dt;
  7439. /* Set result size to clear. */
  7440. r->used = a->used - d->used + 1;
  7441. /* Set all potentially used digits to zero. */
  7442. for (i = 0; i < r->used; i++) {
  7443. r->dp[i] = 0;
  7444. }
  7445. #ifdef WOLFSSL_SP_INT_NEGATIVE
  7446. r->sign = MP_ZPOS;
  7447. #endif
  7448. /* Get the most significant digit (will have top bit set). */
  7449. dt = d->dp[d->used-1];
  7450. /* Handle when a >= d ^ (2 ^ (SP_WORD_SIZE * x)). */
  7451. _sp_div_same_size(a, d, r);
  7452. /* Keep subtracting multiples of d as long as the digit count of a is
  7453. * greater than equal to d.
  7454. */
  7455. for (i = a->used - 1; i >= d->used; i--) {
  7456. /* When top digits equal, guestimate maximum multiplier.
  7457. * Worst case, multiplier is actually SP_DIGIT_MAX - 1.
  7458. * That is, for w (word size in bits) > 1, n > 1, let:
  7459. * a = 2^((n+1)*w-1), d = 2^(n*w-1) + 2^((n-1)*w) - 1, t = 2^w - 2
  7460. * Then,
  7461. * d * t
  7462. * = (2^(n*w-1) + 2^((n-1)*w) - 1) * (2^w - 2)
  7463. * = 2^((n+1)*w-1) - 2^(n*w) + 2^(n*w) - 2^((n-1)*w+1) - 2^w + 2
  7464. * = 2^((n+1)*w-1) - 2^((n-1)*w+1) - 2^w + 2
  7465. * = a - 2^((n-1)*w+1) - 2^w + 2
  7466. * d > 2^((n-1)*w+1) + 2^w - 2, when w > 1, n > 1
  7467. */
  7468. if (a->dp[i] == dt) {
  7469. t = SP_DIGIT_MAX;
  7470. }
  7471. else {
  7472. /* Calculate trail quotient by dividing top word of dividend by top
  7473. * digit of divisor.
  7474. * Some implementations segfault when quotient > SP_DIGIT_MAX.
  7475. * Implementations in assembly, using builtins or using
  7476. * digits only (WOLFSSL_SP_DIV_WORD_HALF).
  7477. */
  7478. t = sp_div_word(a->dp[i], a->dp[i-1], dt);
  7479. }
  7480. #ifdef WOLFSSL_SP_SMALL
  7481. do {
  7482. /* Calculate trial from trial quotient. */
  7483. err = _sp_mul_d(d, t, trial, i - d->used);
  7484. if (err != MP_OKAY) {
  7485. break;
  7486. }
  7487. /* Check if trial is bigger. */
  7488. c = _sp_cmp_abs(trial, a);
  7489. if (c == MP_GT) {
  7490. /* Decrement trial quotient and try again. */
  7491. t--;
  7492. }
  7493. }
  7494. while (c == MP_GT);
  7495. if (err != MP_OKAY) {
  7496. break;
  7497. }
  7498. /* Subtract the trial and add qoutient to result. */
  7499. _sp_sub_off(a, trial, a, 0);
  7500. r->dp[i - d->used] += t;
  7501. /* Handle overflow of digit. */
  7502. if (r->dp[i - d->used] < t) {
  7503. r->dp[i + 1 - d->used]++;
  7504. }
  7505. #else
  7506. /* Index of lowest digit trial is subtracted from. */
  7507. o = i - d->used;
  7508. do {
  7509. #ifndef SQR_MUL_ASM
  7510. sp_int_word tw = 0;
  7511. #else
  7512. sp_int_digit tl = 0;
  7513. sp_int_digit th = 0;
  7514. #endif
  7515. /* Multiply divisor by trial quotient. */
  7516. for (j = 0; j < d->used; j++) {
  7517. #ifndef SQR_MUL_ASM
  7518. tw += (sp_int_word)d->dp[j] * t;
  7519. trial->dp[j] = (sp_int_digit)tw;
  7520. tw >>= SP_WORD_SIZE;
  7521. #else
  7522. SP_ASM_MUL_ADD_NO(tl, th, d->dp[j], t);
  7523. trial->dp[j] = tl;
  7524. tl = th;
  7525. th = 0;
  7526. #endif
  7527. }
  7528. #ifndef SQR_MUL_ASM
  7529. trial->dp[j] = (sp_int_digit)tw;
  7530. #else
  7531. trial->dp[j] = tl;
  7532. #endif
  7533. /* Check trial quotient isn't larger than dividend. */
  7534. for (j = d->used; j > 0; j--) {
  7535. if (trial->dp[j] != a->dp[j + o]) {
  7536. break;
  7537. }
  7538. }
  7539. /* Decrement trial quotient if larger and try again. */
  7540. if (trial->dp[j] > a->dp[j + o]) {
  7541. t--;
  7542. }
  7543. }
  7544. while (trial->dp[j] > a->dp[j + o]);
  7545. #ifndef SQR_MUL_ASM
  7546. sw = 0;
  7547. #else
  7548. sl = 0;
  7549. sh = 0;
  7550. #endif
  7551. /* Subtract trial - don't need to update used. */
  7552. for (j = 0; j <= d->used; j++) {
  7553. #ifndef SQR_MUL_ASM
  7554. sw += a->dp[j + o];
  7555. sw -= trial->dp[j];
  7556. a->dp[j + o] = (sp_int_digit)sw;
  7557. sw >>= SP_WORD_SIZE;
  7558. #else
  7559. st = a->dp[j + o];
  7560. SP_ASM_ADDC(sl, sh, st);
  7561. st = trial->dp[j];
  7562. SP_ASM_SUBB(sl, sh, st);
  7563. a->dp[j + o] = sl;
  7564. sl = sh;
  7565. sh = (sp_int_digit)0 - (sl >> (SP_WORD_SIZE - 1));
  7566. #endif
  7567. }
  7568. r->dp[o] = t;
  7569. #endif /* WOLFSSL_SP_SMALL */
  7570. }
  7571. /* Update used. */
  7572. a->used = i + 1;
  7573. if (a->used == d->used) {
  7574. /* Finish div now that length of dividend is same as divisor. */
  7575. _sp_div_same_size(a, d, r);
  7576. }
  7577. return err;
  7578. }
  7579. /* Divide a by d and return the quotient in r and the remainder in rem.
  7580. * r = a / d; rem = a % d
  7581. *
  7582. * @param [in] a SP integer to be divided.
  7583. * @param [in] d SP integer to divide by.
  7584. * @param [out] r SP integer that is the quotient.
  7585. * @param [out] rem SP integer that is the remainder.
  7586. *
  7587. * @return MP_OKAY on success.
  7588. * @return MP_VAL when a or d is NULL, r and rem are NULL, or d is 0.
  7589. * @return MP_MEM when dynamic memory allocation fails.
  7590. */
  7591. int sp_div(const sp_int* a, const sp_int* d, sp_int* r, sp_int* rem)
  7592. {
  7593. int err = MP_OKAY;
  7594. int ret;
  7595. int done = 0;
  7596. int s = 0;
  7597. sp_int* sa = NULL;
  7598. sp_int* sd = NULL;
  7599. sp_int* tr = NULL;
  7600. sp_int* trial = NULL;
  7601. #ifdef WOLFSSL_SP_INT_NEGATIVE
  7602. int signA = MP_ZPOS;
  7603. int signD = MP_ZPOS;
  7604. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  7605. /* Intermediates will always be less than or equal to dividend. */
  7606. DECL_SP_INT_ARRAY(td, (a == NULL) ? 1 : a->used + 1, 4);
  7607. /* Validate parameters. */
  7608. if ((a == NULL) || (d == NULL) || ((r == NULL) && (rem == NULL))) {
  7609. err = MP_VAL;
  7610. }
  7611. /* a / 0 = infinity. */
  7612. if ((err == MP_OKAY) && sp_iszero(d)) {
  7613. err = MP_VAL;
  7614. }
  7615. /* Ensure quotient result has enough memory. */
  7616. if ((err == MP_OKAY) && (r != NULL) && (r->size < a->used - d->used + 2)) {
  7617. err = MP_VAL;
  7618. }
  7619. if ((err == MP_OKAY) && (rem != NULL)) {
  7620. /* Ensure remainder has enough memory. */
  7621. if ((a->used <= d->used) && (rem->size < a->used + 1)) {
  7622. err = MP_VAL;
  7623. }
  7624. else if ((a->used > d->used) && (rem->size < d->used + 1)) {
  7625. err = MP_VAL;
  7626. }
  7627. }
  7628. /* May need to shift number being divided left into a new word. */
  7629. if ((err == MP_OKAY) && (a->used == SP_INT_DIGITS)) {
  7630. int bits = SP_WORD_SIZE - (sp_count_bits(d) % SP_WORD_SIZE);
  7631. if ((bits != SP_WORD_SIZE) &&
  7632. (sp_count_bits(a) + bits > SP_INT_DIGITS * SP_WORD_SIZE)) {
  7633. err = MP_VAL;
  7634. }
  7635. }
  7636. #if 0
  7637. if (err == MP_OKAY) {
  7638. sp_print(a, "a");
  7639. sp_print(d, "b");
  7640. }
  7641. #endif
  7642. if (err == MP_OKAY) {
  7643. #ifdef WOLFSSL_SP_INT_NEGATIVE
  7644. /* Cache sign for results. */
  7645. signA = a->sign;
  7646. signD = d->sign;
  7647. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  7648. /* Handle simple case of: dividend < divisor. */
  7649. ret = _sp_cmp_abs(a, d);
  7650. if (ret == MP_LT) {
  7651. /* a = 0 * d + a */
  7652. if (rem != NULL) {
  7653. sp_copy(a, rem);
  7654. }
  7655. if (r != NULL) {
  7656. sp_set(r, 0);
  7657. }
  7658. done = 1;
  7659. }
  7660. /* Handle simple case of: dividend == divisor. */
  7661. else if (ret == MP_EQ) {
  7662. /* a = 1 * d + 0 */
  7663. if (rem != NULL) {
  7664. sp_set(rem, 0);
  7665. }
  7666. if (r != NULL) {
  7667. sp_set(r, 1);
  7668. #ifdef WOLFSSL_SP_INT_NEGATIVE
  7669. r->sign = (signA == signD) ? MP_ZPOS : MP_NEG;
  7670. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  7671. }
  7672. done = 1;
  7673. }
  7674. else if (sp_count_bits(a) == sp_count_bits(d)) {
  7675. /* a is greater than d but same bit length - subtract. */
  7676. if (rem != NULL) {
  7677. _sp_sub_off(a, d, rem, 0);
  7678. #ifdef WOLFSSL_SP_INT_NEGATIVE
  7679. rem->sign = signA;
  7680. #endif
  7681. }
  7682. if (r != NULL) {
  7683. sp_set(r, 1);
  7684. #ifdef WOLFSSL_SP_INT_NEGATIVE
  7685. r->sign = (signA == signD) ? MP_ZPOS : MP_NEG;
  7686. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  7687. }
  7688. done = 1;
  7689. }
  7690. }
  7691. /* Allocate temporary 'sp_int's and assign. */
  7692. if ((!done) && (err == MP_OKAY)) {
  7693. #if (defined(WOLFSSL_SMALL_STACK) || defined(SP_ALLOC)) && \
  7694. !defined(WOLFSSL_SP_NO_MALLOC)
  7695. int cnt = 4;
  7696. /* Reuse remainder sp_int where possible. */
  7697. if ((rem != NULL) && (rem != d) && (rem->size > a->used)) {
  7698. sa = rem;
  7699. cnt--;
  7700. }
  7701. /* Reuse result sp_int where possible. */
  7702. if ((r != NULL) && (r != d)) {
  7703. tr = r;
  7704. cnt--;
  7705. }
  7706. /* Macro always has code associated with it and checks err first. */
  7707. ALLOC_SP_INT_ARRAY(td, a->used + 1, cnt, err, NULL);
  7708. #else
  7709. ALLOC_SP_INT_ARRAY(td, a->used + 1, 4, err, NULL);
  7710. #endif
  7711. }
  7712. if ((!done) && (err == MP_OKAY)) {
  7713. #if (defined(WOLFSSL_SMALL_STACK) || defined(SP_ALLOC)) && \
  7714. !defined(WOLFSSL_SP_NO_MALLOC)
  7715. int i = 2;
  7716. /* Set to temporary when not reusing. */
  7717. if (sa == NULL) {
  7718. sa = td[i++];
  7719. }
  7720. if (tr == NULL) {
  7721. tr = td[i];
  7722. }
  7723. #else
  7724. sa = td[2];
  7725. tr = td[3];
  7726. #endif
  7727. sd = td[0];
  7728. trial = td[1];
  7729. /* Initialize sizes to minimal values. */
  7730. _sp_init_size(sd, d->used + 1);
  7731. _sp_init_size(trial, a->used + 1);
  7732. #if (defined(WOLFSSL_SMALL_STACK) || defined(SP_ALLOC)) && \
  7733. !defined(WOLFSSL_SP_NO_MALLOC)
  7734. if (sa != rem) {
  7735. _sp_init_size(sa, a->used + 1);
  7736. }
  7737. if (tr != r) {
  7738. _sp_init_size(tr, a->used - d->used + 2);
  7739. }
  7740. #else
  7741. _sp_init_size(sa, a->used + 1);
  7742. _sp_init_size(tr, a->used - d->used + 2);
  7743. #endif
  7744. /* Move divisor to top of word. Adjust dividend as well. */
  7745. s = sp_count_bits(d);
  7746. s = SP_WORD_SIZE - (s & SP_WORD_MASK);
  7747. sp_copy(a, sa);
  7748. /* Only shift if top bit of divisor no set. */
  7749. if (s != SP_WORD_SIZE) {
  7750. err = sp_lshb(sa, s);
  7751. if (err == MP_OKAY) {
  7752. sp_copy(d, sd);
  7753. d = sd;
  7754. err = sp_lshb(sd, s);
  7755. }
  7756. }
  7757. }
  7758. if ((!done) && (err == MP_OKAY) && (d->used > 0)) {
  7759. /* Do division: tr = sa / d, sa = sa % d. */
  7760. err = _sp_div(sa, d, tr, trial);
  7761. /* Return the remainder if required. */
  7762. if ((err == MP_OKAY) && (rem != NULL)) {
  7763. /* Move result back down if moved up for divisor value. */
  7764. if (s != SP_WORD_SIZE) {
  7765. (void)sp_rshb(sa, s, sa);
  7766. }
  7767. sp_copy(sa, rem);
  7768. sp_clamp(rem);
  7769. #ifdef WOLFSSL_SP_INT_NEGATIVE
  7770. rem->sign = (rem->used == 0) ? MP_ZPOS : signA;
  7771. #endif
  7772. }
  7773. /* Return the quotient if required. */
  7774. if ((err == MP_OKAY) && (r != NULL)) {
  7775. sp_copy(tr, r);
  7776. sp_clamp(r);
  7777. #ifdef WOLFSSL_SP_INT_NEGATIVE
  7778. if ((r->used == 0) || (signA == signD)) {
  7779. r->sign = MP_ZPOS;
  7780. }
  7781. else {
  7782. r->sign = MP_NEG;
  7783. }
  7784. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  7785. }
  7786. }
  7787. #if 0
  7788. if (err == MP_OKAY) {
  7789. if (rem != NULL) {
  7790. sp_print(rem, "rdr");
  7791. }
  7792. if (r != NULL) {
  7793. sp_print(r, "rdw");
  7794. }
  7795. }
  7796. #endif
  7797. FREE_SP_INT_ARRAY(td, NULL);
  7798. return err;
  7799. }
  7800. #endif /* WOLFSSL_SP_MATH_ALL || !NO_DH || HAVE_ECC || \
  7801. * (!NO_RSA && !WOLFSSL_RSA_VERIFY_ONLY) */
  7802. #if defined(WOLFSSL_SP_MATH_ALL) || !defined(NO_DH) || defined(HAVE_ECC) || \
  7803. (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY) && \
  7804. !defined(WOLFSSL_RSA_PUBLIC_ONLY))
  7805. #ifndef FREESCALE_LTC_TFM
  7806. /* Calculate the remainder of dividing a by m: r = a mod m.
  7807. *
  7808. * @param [in] a SP integer to reduce.
  7809. * @param [in] m SP integer that is the modulus.
  7810. * @param [out] r SP integer to store result in.
  7811. *
  7812. * @return MP_OKAY on success.
  7813. * @return MP_VAL when a, m or r is NULL or m is 0.
  7814. */
  7815. int sp_mod(const sp_int* a, const sp_int* m, sp_int* r)
  7816. {
  7817. int err = MP_OKAY;
  7818. #ifdef WOLFSSL_SP_INT_NEGATIVE
  7819. /* Remainder will start as a. */
  7820. DECL_SP_INT(t, (a == NULL) ? 1 : a->used + 1);
  7821. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  7822. /* Validate parameters. */
  7823. if ((a == NULL) || (m == NULL) || (r == NULL)) {
  7824. err = MP_VAL;
  7825. }
  7826. /* Ensure a isn't too big a number to operate on. */
  7827. else if (a->used >= SP_INT_DIGITS) {
  7828. err = MP_VAL;
  7829. }
  7830. #ifndef WOLFSSL_SP_INT_NEGATIVE
  7831. if (err == MP_OKAY) {
  7832. /* Use divide to calculate remainder and don't get quotient. */
  7833. err = sp_div(a, m, NULL, r);
  7834. }
  7835. #else
  7836. /* In case remainder is modulus - allocate temporary. */
  7837. ALLOC_SP_INT(t, a->used + 1, err, NULL);
  7838. if (err == MP_OKAY) {
  7839. _sp_init_size(t, a->used + 1);
  7840. /* Use divide to calculate remainder and don't get quotient. */
  7841. err = sp_div(a, m, NULL, t);
  7842. }
  7843. if (err == MP_OKAY) {
  7844. /* Make remainder positive and copy into result. */
  7845. if ((!sp_iszero(t)) && (t->sign != m->sign)) {
  7846. err = sp_add(t, m, r);
  7847. }
  7848. else {
  7849. err = sp_copy(t, r);
  7850. }
  7851. }
  7852. FREE_SP_INT(t, NULL);
  7853. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  7854. return err;
  7855. }
  7856. #endif /* !FREESCALE_LTC_TFM */
  7857. #endif /* WOLFSSL_SP_MATH_ALL || !NO_DH || HAVE_ECC || \
  7858. * (!NO_RSA && !WOLFSSL_RSA_VERIFY_ONLY) */
  7859. /* START SP_MUL implementations. */
  7860. /* This code is generated.
  7861. * To generate:
  7862. * cd scripts/sp/sp_int
  7863. * ./gen.sh
  7864. * File sp_mul.c contains code.
  7865. */
  7866. #ifdef SQR_MUL_ASM
  7867. /* Multiply a by b into r where a and b have same no. digits. r = a * b
  7868. *
  7869. * Optimised code for when number of digits in a and b are the same.
  7870. *
  7871. * @param [in] a SP integer to mulitply.
  7872. * @param [in] b SP integer to mulitply by.
  7873. * @param [out] r SP integer to hod reult.
  7874. *
  7875. * @return MP_OKAY otherwise.
  7876. * @return MP_MEM when dynamic memory allocation fails.
  7877. */
  7878. static int _sp_mul_nxn(const sp_int* a, const sp_int* b, sp_int* r)
  7879. {
  7880. int err = MP_OKAY;
  7881. int i;
  7882. int j;
  7883. int k;
  7884. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  7885. sp_int_digit* t = NULL;
  7886. #elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
  7887. defined(WOLFSSL_SP_SMALL) && !defined(WOLFSSL_SP_NO_DYN_STACK)
  7888. sp_int_digit t[a->used * 2];
  7889. #else
  7890. sp_int_digit t[SP_INT_DIGITS];
  7891. #endif
  7892. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  7893. t = (sp_int_digit*)XMALLOC(sizeof(sp_int_digit) * (a->used * 2), NULL,
  7894. DYNAMIC_TYPE_BIGINT);
  7895. if (t == NULL) {
  7896. err = MP_MEM;
  7897. }
  7898. #endif
  7899. if (err == MP_OKAY) {
  7900. sp_int_digit l;
  7901. sp_int_digit h;
  7902. sp_int_digit o;
  7903. const sp_int_digit* dp;
  7904. h = 0;
  7905. l = 0;
  7906. SP_ASM_MUL(h, l, a->dp[0], b->dp[0]);
  7907. t[0] = h;
  7908. h = 0;
  7909. o = 0;
  7910. for (k = 1; k <= a->used - 1; k++) {
  7911. j = k;
  7912. dp = a->dp;
  7913. for (; j >= 0; dp++, j--) {
  7914. SP_ASM_MUL_ADD(l, h, o, dp[0], b->dp[j]);
  7915. }
  7916. t[k] = l;
  7917. l = h;
  7918. h = o;
  7919. o = 0;
  7920. }
  7921. for (; k <= (a->used - 1) * 2; k++) {
  7922. i = k - (b->used - 1);
  7923. dp = &b->dp[b->used - 1];
  7924. for (; i < a->used; i++, dp--) {
  7925. SP_ASM_MUL_ADD(l, h, o, a->dp[i], dp[0]);
  7926. }
  7927. t[k] = l;
  7928. l = h;
  7929. h = o;
  7930. o = 0;
  7931. }
  7932. t[k] = l;
  7933. r->used = k + 1;
  7934. XMEMCPY(r->dp, t, r->used * sizeof(sp_int_digit));
  7935. sp_clamp(r);
  7936. }
  7937. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  7938. if (t != NULL) {
  7939. XFREE(t, NULL, DYNAMIC_TYPE_BIGINT);
  7940. }
  7941. #endif
  7942. return err;
  7943. }
  7944. /* Multiply a by b into r. r = a * b
  7945. *
  7946. * @param [in] a SP integer to mulitply.
  7947. * @param [in] b SP integer to mulitply by.
  7948. * @param [out] r SP integer to hod reult.
  7949. *
  7950. * @return MP_OKAY otherwise.
  7951. * @return MP_MEM when dynamic memory allocation fails.
  7952. */
  7953. static int _sp_mul(const sp_int* a, const sp_int* b, sp_int* r)
  7954. {
  7955. int err = MP_OKAY;
  7956. int i;
  7957. int j;
  7958. int k;
  7959. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  7960. sp_int_digit* t = NULL;
  7961. #elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
  7962. defined(WOLFSSL_SP_SMALL) && !defined(WOLFSSL_SP_NO_DYN_STACK)
  7963. sp_int_digit t[a->used + b->used];
  7964. #else
  7965. sp_int_digit t[SP_INT_DIGITS];
  7966. #endif
  7967. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  7968. t = (sp_int_digit*)XMALLOC(sizeof(sp_int_digit) * (a->used + b->used), NULL,
  7969. DYNAMIC_TYPE_BIGINT);
  7970. if (t == NULL) {
  7971. err = MP_MEM;
  7972. }
  7973. #endif
  7974. if (err == MP_OKAY) {
  7975. sp_int_digit l;
  7976. sp_int_digit h;
  7977. sp_int_digit o;
  7978. h = 0;
  7979. l = 0;
  7980. SP_ASM_MUL(h, l, a->dp[0], b->dp[0]);
  7981. t[0] = h;
  7982. h = 0;
  7983. o = 0;
  7984. for (k = 1; k <= b->used - 1; k++) {
  7985. i = 0;
  7986. j = k;
  7987. for (; (i < a->used) && (j >= 0); i++, j--) {
  7988. SP_ASM_MUL_ADD(l, h, o, a->dp[i], b->dp[j]);
  7989. }
  7990. t[k] = l;
  7991. l = h;
  7992. h = o;
  7993. o = 0;
  7994. }
  7995. for (; k <= (a->used - 1) + (b->used - 1); k++) {
  7996. j = b->used - 1;
  7997. i = k - j;
  7998. for (; (i < a->used) && (j >= 0); i++, j--) {
  7999. SP_ASM_MUL_ADD(l, h, o, a->dp[i], b->dp[j]);
  8000. }
  8001. t[k] = l;
  8002. l = h;
  8003. h = o;
  8004. o = 0;
  8005. }
  8006. t[k] = l;
  8007. r->used = k + 1;
  8008. XMEMCPY(r->dp, t, r->used * sizeof(sp_int_digit));
  8009. sp_clamp(r);
  8010. }
  8011. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  8012. if (t != NULL) {
  8013. XFREE(t, NULL, DYNAMIC_TYPE_BIGINT);
  8014. }
  8015. #endif
  8016. return err;
  8017. }
  8018. #else
  8019. /* Multiply a by b into r. r = a * b
  8020. *
  8021. * @param [in] a SP integer to mulitply.
  8022. * @param [in] b SP integer to mulitply by.
  8023. * @param [out] r SP integer to hod reult.
  8024. *
  8025. * @return MP_OKAY otherwise.
  8026. * @return MP_MEM when dynamic memory allocation fails.
  8027. */
  8028. static int _sp_mul(const sp_int* a, const sp_int* b, sp_int* r)
  8029. {
  8030. int err = MP_OKAY;
  8031. int i;
  8032. int j;
  8033. int k;
  8034. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  8035. sp_int_digit* t = NULL;
  8036. #elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
  8037. defined(WOLFSSL_SP_SMALL) && !defined(WOLFSSL_SP_NO_DYN_STACK)
  8038. sp_int_digit t[a->used + b->used];
  8039. #else
  8040. sp_int_digit t[SP_INT_DIGITS];
  8041. #endif
  8042. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  8043. t = (sp_int_digit*)XMALLOC(sizeof(sp_int_digit) * (a->used + b->used), NULL,
  8044. DYNAMIC_TYPE_BIGINT);
  8045. if (t == NULL) {
  8046. err = MP_MEM;
  8047. }
  8048. #endif
  8049. if (err == MP_OKAY) {
  8050. sp_int_word w;
  8051. sp_int_word l;
  8052. sp_int_word h;
  8053. #ifdef SP_WORD_OVERFLOW
  8054. sp_int_word o;
  8055. #endif
  8056. w = (sp_int_word)a->dp[0] * b->dp[0];
  8057. t[0] = (sp_int_digit)w;
  8058. l = (sp_int_digit)(w >> SP_WORD_SIZE);
  8059. h = 0;
  8060. #ifdef SP_WORD_OVERFLOW
  8061. o = 0;
  8062. #endif
  8063. for (k = 1; k <= (a->used - 1) + (b->used - 1); k++) {
  8064. i = k - (b->used - 1);
  8065. i &= (((unsigned int)i >> (sizeof(i) * 8 - 1)) - 1U);
  8066. j = k - i;
  8067. for (; (i < a->used) && (j >= 0); i++, j--) {
  8068. w = (sp_int_word)a->dp[i] * b->dp[j];
  8069. l += (sp_int_digit)w;
  8070. h += (sp_int_digit)(w >> SP_WORD_SIZE);
  8071. #ifdef SP_WORD_OVERFLOW
  8072. h += (sp_int_digit)(l >> SP_WORD_SIZE);
  8073. l &= SP_MASK;
  8074. o += (sp_int_digit)(h >> SP_WORD_SIZE);
  8075. h &= SP_MASK;
  8076. #endif
  8077. }
  8078. t[k] = (sp_int_digit)l;
  8079. l >>= SP_WORD_SIZE;
  8080. l += (sp_int_digit)h;
  8081. h >>= SP_WORD_SIZE;
  8082. #ifdef SP_WORD_OVERFLOW
  8083. h += o & SP_MASK;
  8084. o >>= SP_WORD_SIZE;
  8085. #endif
  8086. }
  8087. t[k] = (sp_int_digit)l;
  8088. r->used = k + 1;
  8089. XMEMCPY(r->dp, t, r->used * sizeof(sp_int_digit));
  8090. sp_clamp(r);
  8091. }
  8092. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  8093. if (t != NULL) {
  8094. XFREE(t, NULL, DYNAMIC_TYPE_BIGINT);
  8095. }
  8096. #endif
  8097. return err;
  8098. }
  8099. #endif
  8100. #ifndef WOLFSSL_SP_SMALL
  8101. #if !defined(WOLFSSL_HAVE_SP_ECC) && defined(HAVE_ECC)
  8102. #if SP_WORD_SIZE == 64
  8103. #ifndef SQR_MUL_ASM
  8104. /* Multiply a by b and store in r: r = a * b
  8105. *
  8106. * Long-hand implementation.
  8107. *
  8108. * @param [in] a SP integer to multiply.
  8109. * @param [in] b SP integer to multiply.
  8110. * @param [out] r SP integer result.
  8111. *
  8112. * @return MP_OKAY on success.
  8113. * @return MP_MEM when dynamic memory allocation fails.
  8114. */
  8115. static int _sp_mul_4(const sp_int* a, const sp_int* b, sp_int* r)
  8116. {
  8117. int err = MP_OKAY;
  8118. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  8119. sp_int_word* w = NULL;
  8120. #else
  8121. sp_int_word w[16];
  8122. #endif
  8123. const sp_int_digit* da = a->dp;
  8124. const sp_int_digit* db = b->dp;
  8125. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  8126. w = (sp_int_word*)XMALLOC(sizeof(sp_int_word) * 16, NULL,
  8127. DYNAMIC_TYPE_BIGINT);
  8128. if (w == NULL) {
  8129. err = MP_MEM;
  8130. }
  8131. #endif
  8132. if (err == MP_OKAY) {
  8133. w[0] = (sp_int_word)da[0] * db[0];
  8134. w[1] = (sp_int_word)da[0] * db[1];
  8135. w[2] = (sp_int_word)da[1] * db[0];
  8136. w[3] = (sp_int_word)da[0] * db[2];
  8137. w[4] = (sp_int_word)da[1] * db[1];
  8138. w[5] = (sp_int_word)da[2] * db[0];
  8139. w[6] = (sp_int_word)da[0] * db[3];
  8140. w[7] = (sp_int_word)da[1] * db[2];
  8141. w[8] = (sp_int_word)da[2] * db[1];
  8142. w[9] = (sp_int_word)da[3] * db[0];
  8143. w[10] = (sp_int_word)da[1] * db[3];
  8144. w[11] = (sp_int_word)da[2] * db[2];
  8145. w[12] = (sp_int_word)da[3] * db[1];
  8146. w[13] = (sp_int_word)da[2] * db[3];
  8147. w[14] = (sp_int_word)da[3] * db[2];
  8148. w[15] = (sp_int_word)da[3] * db[3];
  8149. r->dp[0] = w[0];
  8150. w[0] >>= SP_WORD_SIZE;
  8151. w[0] += (sp_int_digit)w[1];
  8152. w[0] += (sp_int_digit)w[2];
  8153. r->dp[1] = w[0];
  8154. w[0] >>= SP_WORD_SIZE;
  8155. w[1] >>= SP_WORD_SIZE;
  8156. w[0] += (sp_int_digit)w[1];
  8157. w[2] >>= SP_WORD_SIZE;
  8158. w[0] += (sp_int_digit)w[2];
  8159. w[0] += (sp_int_digit)w[3];
  8160. w[0] += (sp_int_digit)w[4];
  8161. w[0] += (sp_int_digit)w[5];
  8162. r->dp[2] = w[0];
  8163. w[0] >>= SP_WORD_SIZE;
  8164. w[3] >>= SP_WORD_SIZE;
  8165. w[0] += (sp_int_digit)w[3];
  8166. w[4] >>= SP_WORD_SIZE;
  8167. w[0] += (sp_int_digit)w[4];
  8168. w[5] >>= SP_WORD_SIZE;
  8169. w[0] += (sp_int_digit)w[5];
  8170. w[0] += (sp_int_digit)w[6];
  8171. w[0] += (sp_int_digit)w[7];
  8172. w[0] += (sp_int_digit)w[8];
  8173. w[0] += (sp_int_digit)w[9];
  8174. r->dp[3] = w[0];
  8175. w[0] >>= SP_WORD_SIZE;
  8176. w[6] >>= SP_WORD_SIZE;
  8177. w[0] += (sp_int_digit)w[6];
  8178. w[7] >>= SP_WORD_SIZE;
  8179. w[0] += (sp_int_digit)w[7];
  8180. w[8] >>= SP_WORD_SIZE;
  8181. w[0] += (sp_int_digit)w[8];
  8182. w[9] >>= SP_WORD_SIZE;
  8183. w[0] += (sp_int_digit)w[9];
  8184. w[0] += (sp_int_digit)w[10];
  8185. w[0] += (sp_int_digit)w[11];
  8186. w[0] += (sp_int_digit)w[12];
  8187. r->dp[4] = w[0];
  8188. w[0] >>= SP_WORD_SIZE;
  8189. w[10] >>= SP_WORD_SIZE;
  8190. w[0] += (sp_int_digit)w[10];
  8191. w[11] >>= SP_WORD_SIZE;
  8192. w[0] += (sp_int_digit)w[11];
  8193. w[12] >>= SP_WORD_SIZE;
  8194. w[0] += (sp_int_digit)w[12];
  8195. w[0] += (sp_int_digit)w[13];
  8196. w[0] += (sp_int_digit)w[14];
  8197. r->dp[5] = w[0];
  8198. w[0] >>= SP_WORD_SIZE;
  8199. w[13] >>= SP_WORD_SIZE;
  8200. w[0] += (sp_int_digit)w[13];
  8201. w[14] >>= SP_WORD_SIZE;
  8202. w[0] += (sp_int_digit)w[14];
  8203. w[0] += (sp_int_digit)w[15];
  8204. r->dp[6] = w[0];
  8205. w[0] >>= SP_WORD_SIZE;
  8206. w[15] >>= SP_WORD_SIZE;
  8207. w[0] += (sp_int_digit)w[15];
  8208. r->dp[7] = w[0];
  8209. r->used = 8;
  8210. sp_clamp(r);
  8211. }
  8212. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  8213. if (w != NULL) {
  8214. XFREE(w, NULL, DYNAMIC_TYPE_BIGINT);
  8215. }
  8216. #endif
  8217. return err;
  8218. }
  8219. #else /* SQR_MUL_ASM */
  8220. /* Multiply a by b and store in r: r = a * b
  8221. *
  8222. * Comba implementation.
  8223. *
  8224. * @param [in] a SP integer to multiply.
  8225. * @param [in] b SP integer to multiply.
  8226. * @param [out] r SP integer result.
  8227. *
  8228. * @return MP_OKAY on success.
  8229. * @return MP_MEM when dynamic memory allocation fails.
  8230. */
  8231. static int _sp_mul_4(const sp_int* a, const sp_int* b, sp_int* r)
  8232. {
  8233. sp_int_digit l = 0;
  8234. sp_int_digit h = 0;
  8235. sp_int_digit o = 0;
  8236. sp_int_digit t[4];
  8237. SP_ASM_MUL(h, l, a->dp[0], b->dp[0]);
  8238. t[0] = h;
  8239. h = 0;
  8240. SP_ASM_MUL_ADD_NO(l, h, a->dp[0], b->dp[1]);
  8241. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[0]);
  8242. t[1] = l;
  8243. l = h;
  8244. h = o;
  8245. o = 0;
  8246. SP_ASM_MUL_ADD_NO(l, h, a->dp[0], b->dp[2]);
  8247. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[1]);
  8248. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[0]);
  8249. t[2] = l;
  8250. l = h;
  8251. h = o;
  8252. o = 0;
  8253. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[3]);
  8254. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[2]);
  8255. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[1]);
  8256. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[0]);
  8257. t[3] = l;
  8258. l = h;
  8259. h = o;
  8260. o = 0;
  8261. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[3]);
  8262. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[2]);
  8263. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[1]);
  8264. r->dp[4] = l;
  8265. l = h;
  8266. h = o;
  8267. o = 0;
  8268. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[3]);
  8269. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[2]);
  8270. r->dp[5] = l;
  8271. l = h;
  8272. h = o;
  8273. SP_ASM_MUL_ADD_NO(l, h, a->dp[3], b->dp[3]);
  8274. r->dp[6] = l;
  8275. r->dp[7] = h;
  8276. XMEMCPY(r->dp, t, 4 * sizeof(sp_int_digit));
  8277. r->used = 8;
  8278. sp_clamp(r);
  8279. return MP_OKAY;
  8280. }
  8281. #endif /* SQR_MUL_ASM */
  8282. #endif /* SP_WORD_SIZE == 64 */
  8283. #if SP_WORD_SIZE == 64
  8284. #ifdef SQR_MUL_ASM
  8285. /* Multiply a by b and store in r: r = a * b
  8286. *
  8287. * Comba implementation.
  8288. *
  8289. * @param [in] a SP integer to multiply.
  8290. * @param [in] b SP integer to multiply.
  8291. * @param [out] r SP integer result.
  8292. *
  8293. * @return MP_OKAY on success.
  8294. * @return MP_MEM when dynamic memory allocation fails.
  8295. */
  8296. static int _sp_mul_6(const sp_int* a, const sp_int* b, sp_int* r)
  8297. {
  8298. sp_int_digit l = 0;
  8299. sp_int_digit h = 0;
  8300. sp_int_digit o = 0;
  8301. sp_int_digit t[6];
  8302. SP_ASM_MUL(h, l, a->dp[0], b->dp[0]);
  8303. t[0] = h;
  8304. h = 0;
  8305. SP_ASM_MUL_ADD_NO(l, h, a->dp[0], b->dp[1]);
  8306. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[0]);
  8307. t[1] = l;
  8308. l = h;
  8309. h = o;
  8310. o = 0;
  8311. SP_ASM_MUL_ADD_NO(l, h, a->dp[0], b->dp[2]);
  8312. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[1]);
  8313. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[0]);
  8314. t[2] = l;
  8315. l = h;
  8316. h = o;
  8317. o = 0;
  8318. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[3]);
  8319. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[2]);
  8320. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[1]);
  8321. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[0]);
  8322. t[3] = l;
  8323. l = h;
  8324. h = o;
  8325. o = 0;
  8326. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[4]);
  8327. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[3]);
  8328. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[2]);
  8329. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[1]);
  8330. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[0]);
  8331. t[4] = l;
  8332. l = h;
  8333. h = o;
  8334. o = 0;
  8335. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[5]);
  8336. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[4]);
  8337. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[3]);
  8338. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[2]);
  8339. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[1]);
  8340. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[0]);
  8341. t[5] = l;
  8342. l = h;
  8343. h = o;
  8344. o = 0;
  8345. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[5]);
  8346. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[4]);
  8347. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[3]);
  8348. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[2]);
  8349. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[1]);
  8350. r->dp[6] = l;
  8351. l = h;
  8352. h = o;
  8353. o = 0;
  8354. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[5]);
  8355. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[4]);
  8356. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[3]);
  8357. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[2]);
  8358. r->dp[7] = l;
  8359. l = h;
  8360. h = o;
  8361. o = 0;
  8362. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[5]);
  8363. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[4]);
  8364. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[3]);
  8365. r->dp[8] = l;
  8366. l = h;
  8367. h = o;
  8368. o = 0;
  8369. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[5]);
  8370. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[4]);
  8371. r->dp[9] = l;
  8372. l = h;
  8373. h = o;
  8374. SP_ASM_MUL_ADD_NO(l, h, a->dp[5], b->dp[5]);
  8375. r->dp[10] = l;
  8376. r->dp[11] = h;
  8377. XMEMCPY(r->dp, t, 6 * sizeof(sp_int_digit));
  8378. r->used = 12;
  8379. sp_clamp(r);
  8380. return MP_OKAY;
  8381. }
  8382. #endif /* SQR_MUL_ASM */
  8383. #endif /* SP_WORD_SIZE == 64 */
  8384. #if SP_WORD_SIZE == 32
  8385. #ifdef SQR_MUL_ASM
  8386. /* Multiply a by b and store in r: r = a * b
  8387. *
  8388. * Comba implementation.
  8389. *
  8390. * @param [in] a SP integer to multiply.
  8391. * @param [in] b SP integer to multiply.
  8392. * @param [out] r SP integer result.
  8393. *
  8394. * @return MP_OKAY on success.
  8395. * @return MP_MEM when dynamic memory allocation fails.
  8396. */
  8397. static int _sp_mul_8(const sp_int* a, const sp_int* b, sp_int* r)
  8398. {
  8399. sp_int_digit l = 0;
  8400. sp_int_digit h = 0;
  8401. sp_int_digit o = 0;
  8402. sp_int_digit t[8];
  8403. SP_ASM_MUL(h, l, a->dp[0], b->dp[0]);
  8404. t[0] = h;
  8405. h = 0;
  8406. SP_ASM_MUL_ADD_NO(l, h, a->dp[0], b->dp[1]);
  8407. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[0]);
  8408. t[1] = l;
  8409. l = h;
  8410. h = o;
  8411. o = 0;
  8412. SP_ASM_MUL_ADD_NO(l, h, a->dp[0], b->dp[2]);
  8413. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[1]);
  8414. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[0]);
  8415. t[2] = l;
  8416. l = h;
  8417. h = o;
  8418. o = 0;
  8419. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[3]);
  8420. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[2]);
  8421. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[1]);
  8422. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[0]);
  8423. t[3] = l;
  8424. l = h;
  8425. h = o;
  8426. o = 0;
  8427. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[4]);
  8428. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[3]);
  8429. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[2]);
  8430. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[1]);
  8431. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[0]);
  8432. t[4] = l;
  8433. l = h;
  8434. h = o;
  8435. o = 0;
  8436. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[5]);
  8437. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[4]);
  8438. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[3]);
  8439. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[2]);
  8440. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[1]);
  8441. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[0]);
  8442. t[5] = l;
  8443. l = h;
  8444. h = o;
  8445. o = 0;
  8446. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[6]);
  8447. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[5]);
  8448. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[4]);
  8449. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[3]);
  8450. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[2]);
  8451. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[1]);
  8452. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[0]);
  8453. t[6] = l;
  8454. l = h;
  8455. h = o;
  8456. o = 0;
  8457. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[7]);
  8458. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[6]);
  8459. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[5]);
  8460. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[4]);
  8461. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[3]);
  8462. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[2]);
  8463. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[1]);
  8464. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[0]);
  8465. t[7] = l;
  8466. l = h;
  8467. h = o;
  8468. o = 0;
  8469. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[7]);
  8470. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[6]);
  8471. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[5]);
  8472. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[4]);
  8473. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[3]);
  8474. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[2]);
  8475. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[1]);
  8476. r->dp[8] = l;
  8477. l = h;
  8478. h = o;
  8479. o = 0;
  8480. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[7]);
  8481. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[6]);
  8482. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[5]);
  8483. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[4]);
  8484. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[3]);
  8485. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[2]);
  8486. r->dp[9] = l;
  8487. l = h;
  8488. h = o;
  8489. o = 0;
  8490. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[7]);
  8491. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[6]);
  8492. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[5]);
  8493. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[4]);
  8494. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[3]);
  8495. r->dp[10] = l;
  8496. l = h;
  8497. h = o;
  8498. o = 0;
  8499. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[7]);
  8500. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[6]);
  8501. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[5]);
  8502. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[4]);
  8503. r->dp[11] = l;
  8504. l = h;
  8505. h = o;
  8506. o = 0;
  8507. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[7]);
  8508. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[6]);
  8509. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[5]);
  8510. r->dp[12] = l;
  8511. l = h;
  8512. h = o;
  8513. o = 0;
  8514. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[7]);
  8515. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[6]);
  8516. r->dp[13] = l;
  8517. l = h;
  8518. h = o;
  8519. SP_ASM_MUL_ADD_NO(l, h, a->dp[7], b->dp[7]);
  8520. r->dp[14] = l;
  8521. r->dp[15] = h;
  8522. XMEMCPY(r->dp, t, 8 * sizeof(sp_int_digit));
  8523. r->used = 16;
  8524. sp_clamp(r);
  8525. return MP_OKAY;
  8526. }
  8527. #endif /* SQR_MUL_ASM */
  8528. #endif /* SP_WORD_SIZE == 32 */
  8529. #if SP_WORD_SIZE == 32
  8530. #ifdef SQR_MUL_ASM
  8531. /* Multiply a by b and store in r: r = a * b
  8532. *
  8533. * Comba implementation.
  8534. *
  8535. * @param [in] a SP integer to multiply.
  8536. * @param [in] b SP integer to multiply.
  8537. * @param [out] r SP integer result.
  8538. *
  8539. * @return MP_OKAY on success.
  8540. * @return MP_MEM when dynamic memory allocation fails.
  8541. */
  8542. static int _sp_mul_12(const sp_int* a, const sp_int* b, sp_int* r)
  8543. {
  8544. sp_int_digit l = 0;
  8545. sp_int_digit h = 0;
  8546. sp_int_digit o = 0;
  8547. sp_int_digit t[12];
  8548. SP_ASM_MUL(h, l, a->dp[0], b->dp[0]);
  8549. t[0] = h;
  8550. h = 0;
  8551. SP_ASM_MUL_ADD_NO(l, h, a->dp[0], b->dp[1]);
  8552. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[0]);
  8553. t[1] = l;
  8554. l = h;
  8555. h = o;
  8556. o = 0;
  8557. SP_ASM_MUL_ADD_NO(l, h, a->dp[0], b->dp[2]);
  8558. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[1]);
  8559. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[0]);
  8560. t[2] = l;
  8561. l = h;
  8562. h = o;
  8563. o = 0;
  8564. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[3]);
  8565. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[2]);
  8566. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[1]);
  8567. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[0]);
  8568. t[3] = l;
  8569. l = h;
  8570. h = o;
  8571. o = 0;
  8572. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[4]);
  8573. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[3]);
  8574. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[2]);
  8575. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[1]);
  8576. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[0]);
  8577. t[4] = l;
  8578. l = h;
  8579. h = o;
  8580. o = 0;
  8581. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[5]);
  8582. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[4]);
  8583. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[3]);
  8584. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[2]);
  8585. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[1]);
  8586. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[0]);
  8587. t[5] = l;
  8588. l = h;
  8589. h = o;
  8590. o = 0;
  8591. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[6]);
  8592. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[5]);
  8593. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[4]);
  8594. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[3]);
  8595. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[2]);
  8596. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[1]);
  8597. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[0]);
  8598. t[6] = l;
  8599. l = h;
  8600. h = o;
  8601. o = 0;
  8602. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[7]);
  8603. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[6]);
  8604. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[5]);
  8605. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[4]);
  8606. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[3]);
  8607. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[2]);
  8608. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[1]);
  8609. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[0]);
  8610. t[7] = l;
  8611. l = h;
  8612. h = o;
  8613. o = 0;
  8614. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[8]);
  8615. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[7]);
  8616. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[6]);
  8617. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[5]);
  8618. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[4]);
  8619. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[3]);
  8620. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[2]);
  8621. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[1]);
  8622. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[0]);
  8623. t[8] = l;
  8624. l = h;
  8625. h = o;
  8626. o = 0;
  8627. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[9]);
  8628. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[8]);
  8629. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[7]);
  8630. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[6]);
  8631. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[5]);
  8632. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[4]);
  8633. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[3]);
  8634. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[2]);
  8635. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[1]);
  8636. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[0]);
  8637. t[9] = l;
  8638. l = h;
  8639. h = o;
  8640. o = 0;
  8641. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[10]);
  8642. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[9]);
  8643. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[8]);
  8644. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[7]);
  8645. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[6]);
  8646. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[5]);
  8647. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[4]);
  8648. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[3]);
  8649. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[2]);
  8650. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[1]);
  8651. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[0]);
  8652. t[10] = l;
  8653. l = h;
  8654. h = o;
  8655. o = 0;
  8656. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[11]);
  8657. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[10]);
  8658. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[9]);
  8659. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[8]);
  8660. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[7]);
  8661. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[6]);
  8662. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[5]);
  8663. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[4]);
  8664. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[3]);
  8665. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[2]);
  8666. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[1]);
  8667. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[0]);
  8668. t[11] = l;
  8669. l = h;
  8670. h = o;
  8671. o = 0;
  8672. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[11]);
  8673. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[10]);
  8674. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[9]);
  8675. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[8]);
  8676. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[7]);
  8677. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[6]);
  8678. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[5]);
  8679. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[4]);
  8680. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[3]);
  8681. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[2]);
  8682. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[1]);
  8683. r->dp[12] = l;
  8684. l = h;
  8685. h = o;
  8686. o = 0;
  8687. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[11]);
  8688. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[10]);
  8689. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[9]);
  8690. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[8]);
  8691. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[7]);
  8692. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[6]);
  8693. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[5]);
  8694. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[4]);
  8695. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[3]);
  8696. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[2]);
  8697. r->dp[13] = l;
  8698. l = h;
  8699. h = o;
  8700. o = 0;
  8701. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[11]);
  8702. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[10]);
  8703. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[9]);
  8704. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[8]);
  8705. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[7]);
  8706. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[6]);
  8707. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[5]);
  8708. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[4]);
  8709. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[3]);
  8710. r->dp[14] = l;
  8711. l = h;
  8712. h = o;
  8713. o = 0;
  8714. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[11]);
  8715. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[10]);
  8716. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[9]);
  8717. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[8]);
  8718. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[7]);
  8719. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[6]);
  8720. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[5]);
  8721. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[4]);
  8722. r->dp[15] = l;
  8723. l = h;
  8724. h = o;
  8725. o = 0;
  8726. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[11]);
  8727. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[10]);
  8728. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[9]);
  8729. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[8]);
  8730. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[7]);
  8731. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[6]);
  8732. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[5]);
  8733. r->dp[16] = l;
  8734. l = h;
  8735. h = o;
  8736. o = 0;
  8737. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[11]);
  8738. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[10]);
  8739. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[9]);
  8740. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[8]);
  8741. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[7]);
  8742. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[6]);
  8743. r->dp[17] = l;
  8744. l = h;
  8745. h = o;
  8746. o = 0;
  8747. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[11]);
  8748. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[10]);
  8749. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[9]);
  8750. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[8]);
  8751. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[7]);
  8752. r->dp[18] = l;
  8753. l = h;
  8754. h = o;
  8755. o = 0;
  8756. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[11]);
  8757. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[10]);
  8758. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[9]);
  8759. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[8]);
  8760. r->dp[19] = l;
  8761. l = h;
  8762. h = o;
  8763. o = 0;
  8764. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[11]);
  8765. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[10]);
  8766. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[9]);
  8767. r->dp[20] = l;
  8768. l = h;
  8769. h = o;
  8770. o = 0;
  8771. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[11]);
  8772. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[10]);
  8773. r->dp[21] = l;
  8774. l = h;
  8775. h = o;
  8776. SP_ASM_MUL_ADD_NO(l, h, a->dp[11], b->dp[11]);
  8777. r->dp[22] = l;
  8778. r->dp[23] = h;
  8779. XMEMCPY(r->dp, t, 12 * sizeof(sp_int_digit));
  8780. r->used = 24;
  8781. sp_clamp(r);
  8782. return MP_OKAY;
  8783. }
  8784. #endif /* SQR_MUL_ASM */
  8785. #endif /* SP_WORD_SIZE == 32 */
  8786. #endif /* !WOLFSSL_HAVE_SP_ECC && HAVE_ECC */
  8787. #if defined(SQR_MUL_ASM) && (defined(WOLFSSL_SP_INT_LARGE_COMBA) || \
  8788. (!defined(WOLFSSL_SP_MATH) && defined(WOLFCRYPT_HAVE_SAKKE) && \
  8789. (SP_WORD_SIZE == 64)))
  8790. #if SP_INT_DIGITS >= 32
  8791. /* Multiply a by b and store in r: r = a * b
  8792. *
  8793. * Comba implementation.
  8794. *
  8795. * @param [in] a SP integer to multiply.
  8796. * @param [in] b SP integer to multiply.
  8797. * @param [out] r SP integer result.
  8798. *
  8799. * @return MP_OKAY on success.
  8800. * @return MP_MEM when dynamic memory allocation fails.
  8801. */
  8802. static int _sp_mul_16(const sp_int* a, const sp_int* b, sp_int* r)
  8803. {
  8804. int err = MP_OKAY;
  8805. sp_int_digit l = 0;
  8806. sp_int_digit h = 0;
  8807. sp_int_digit o = 0;
  8808. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  8809. sp_int_digit* t = NULL;
  8810. #else
  8811. sp_int_digit t[16];
  8812. #endif
  8813. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  8814. t = (sp_int_digit*)XMALLOC(sizeof(sp_int_digit) * 16, NULL,
  8815. DYNAMIC_TYPE_BIGINT);
  8816. if (t == NULL) {
  8817. err = MP_MEM;
  8818. }
  8819. #endif
  8820. if (err == MP_OKAY) {
  8821. SP_ASM_MUL(h, l, a->dp[0], b->dp[0]);
  8822. t[0] = h;
  8823. h = 0;
  8824. SP_ASM_MUL_ADD_NO(l, h, a->dp[0], b->dp[1]);
  8825. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[0]);
  8826. t[1] = l;
  8827. l = h;
  8828. h = o;
  8829. o = 0;
  8830. SP_ASM_MUL_ADD_NO(l, h, a->dp[0], b->dp[2]);
  8831. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[1]);
  8832. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[0]);
  8833. t[2] = l;
  8834. l = h;
  8835. h = o;
  8836. o = 0;
  8837. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[3]);
  8838. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[2]);
  8839. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[1]);
  8840. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[0]);
  8841. t[3] = l;
  8842. l = h;
  8843. h = o;
  8844. o = 0;
  8845. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[4]);
  8846. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[3]);
  8847. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[2]);
  8848. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[1]);
  8849. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[0]);
  8850. t[4] = l;
  8851. l = h;
  8852. h = o;
  8853. o = 0;
  8854. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[5]);
  8855. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[4]);
  8856. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[3]);
  8857. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[2]);
  8858. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[1]);
  8859. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[0]);
  8860. t[5] = l;
  8861. l = h;
  8862. h = o;
  8863. o = 0;
  8864. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[6]);
  8865. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[5]);
  8866. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[4]);
  8867. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[3]);
  8868. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[2]);
  8869. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[1]);
  8870. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[0]);
  8871. t[6] = l;
  8872. l = h;
  8873. h = o;
  8874. o = 0;
  8875. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[7]);
  8876. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[6]);
  8877. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[5]);
  8878. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[4]);
  8879. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[3]);
  8880. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[2]);
  8881. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[1]);
  8882. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[0]);
  8883. t[7] = l;
  8884. l = h;
  8885. h = o;
  8886. o = 0;
  8887. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[8]);
  8888. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[7]);
  8889. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[6]);
  8890. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[5]);
  8891. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[4]);
  8892. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[3]);
  8893. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[2]);
  8894. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[1]);
  8895. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[0]);
  8896. t[8] = l;
  8897. l = h;
  8898. h = o;
  8899. o = 0;
  8900. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[9]);
  8901. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[8]);
  8902. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[7]);
  8903. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[6]);
  8904. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[5]);
  8905. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[4]);
  8906. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[3]);
  8907. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[2]);
  8908. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[1]);
  8909. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[0]);
  8910. t[9] = l;
  8911. l = h;
  8912. h = o;
  8913. o = 0;
  8914. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[10]);
  8915. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[9]);
  8916. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[8]);
  8917. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[7]);
  8918. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[6]);
  8919. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[5]);
  8920. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[4]);
  8921. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[3]);
  8922. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[2]);
  8923. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[1]);
  8924. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[0]);
  8925. t[10] = l;
  8926. l = h;
  8927. h = o;
  8928. o = 0;
  8929. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[11]);
  8930. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[10]);
  8931. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[9]);
  8932. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[8]);
  8933. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[7]);
  8934. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[6]);
  8935. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[5]);
  8936. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[4]);
  8937. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[3]);
  8938. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[2]);
  8939. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[1]);
  8940. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[0]);
  8941. t[11] = l;
  8942. l = h;
  8943. h = o;
  8944. o = 0;
  8945. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[12]);
  8946. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[11]);
  8947. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[10]);
  8948. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[9]);
  8949. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[8]);
  8950. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[7]);
  8951. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[6]);
  8952. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[5]);
  8953. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[4]);
  8954. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[3]);
  8955. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[2]);
  8956. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[1]);
  8957. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[0]);
  8958. t[12] = l;
  8959. l = h;
  8960. h = o;
  8961. o = 0;
  8962. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[13]);
  8963. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[12]);
  8964. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[11]);
  8965. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[10]);
  8966. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[9]);
  8967. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[8]);
  8968. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[7]);
  8969. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[6]);
  8970. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[5]);
  8971. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[4]);
  8972. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[3]);
  8973. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[2]);
  8974. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[1]);
  8975. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[0]);
  8976. t[13] = l;
  8977. l = h;
  8978. h = o;
  8979. o = 0;
  8980. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[14]);
  8981. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[13]);
  8982. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[12]);
  8983. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[11]);
  8984. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[10]);
  8985. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[9]);
  8986. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[8]);
  8987. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[7]);
  8988. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[6]);
  8989. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[5]);
  8990. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[4]);
  8991. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[3]);
  8992. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[2]);
  8993. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[1]);
  8994. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[0]);
  8995. t[14] = l;
  8996. l = h;
  8997. h = o;
  8998. o = 0;
  8999. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[15]);
  9000. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[14]);
  9001. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[13]);
  9002. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[12]);
  9003. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[11]);
  9004. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[10]);
  9005. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[9]);
  9006. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[8]);
  9007. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[7]);
  9008. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[6]);
  9009. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[5]);
  9010. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[4]);
  9011. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[3]);
  9012. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[2]);
  9013. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[1]);
  9014. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[0]);
  9015. t[15] = l;
  9016. l = h;
  9017. h = o;
  9018. o = 0;
  9019. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[15]);
  9020. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[14]);
  9021. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[13]);
  9022. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[12]);
  9023. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[11]);
  9024. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[10]);
  9025. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[9]);
  9026. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[8]);
  9027. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[7]);
  9028. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[6]);
  9029. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[5]);
  9030. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[4]);
  9031. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[3]);
  9032. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[2]);
  9033. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[1]);
  9034. r->dp[16] = l;
  9035. l = h;
  9036. h = o;
  9037. o = 0;
  9038. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[15]);
  9039. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[14]);
  9040. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[13]);
  9041. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[12]);
  9042. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[11]);
  9043. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[10]);
  9044. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[9]);
  9045. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[8]);
  9046. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[7]);
  9047. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[6]);
  9048. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[5]);
  9049. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[4]);
  9050. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[3]);
  9051. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[2]);
  9052. r->dp[17] = l;
  9053. l = h;
  9054. h = o;
  9055. o = 0;
  9056. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[15]);
  9057. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[14]);
  9058. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[13]);
  9059. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[12]);
  9060. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[11]);
  9061. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[10]);
  9062. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[9]);
  9063. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[8]);
  9064. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[7]);
  9065. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[6]);
  9066. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[5]);
  9067. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[4]);
  9068. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[3]);
  9069. r->dp[18] = l;
  9070. l = h;
  9071. h = o;
  9072. o = 0;
  9073. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[15]);
  9074. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[14]);
  9075. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[13]);
  9076. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[12]);
  9077. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[11]);
  9078. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[10]);
  9079. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[9]);
  9080. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[8]);
  9081. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[7]);
  9082. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[6]);
  9083. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[5]);
  9084. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[4]);
  9085. r->dp[19] = l;
  9086. l = h;
  9087. h = o;
  9088. o = 0;
  9089. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[15]);
  9090. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[14]);
  9091. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[13]);
  9092. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[12]);
  9093. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[11]);
  9094. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[10]);
  9095. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[9]);
  9096. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[8]);
  9097. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[7]);
  9098. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[6]);
  9099. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[5]);
  9100. r->dp[20] = l;
  9101. l = h;
  9102. h = o;
  9103. o = 0;
  9104. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[15]);
  9105. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[14]);
  9106. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[13]);
  9107. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[12]);
  9108. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[11]);
  9109. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[10]);
  9110. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[9]);
  9111. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[8]);
  9112. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[7]);
  9113. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[6]);
  9114. r->dp[21] = l;
  9115. l = h;
  9116. h = o;
  9117. o = 0;
  9118. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[15]);
  9119. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[14]);
  9120. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[13]);
  9121. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[12]);
  9122. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[11]);
  9123. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[10]);
  9124. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[9]);
  9125. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[8]);
  9126. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[7]);
  9127. r->dp[22] = l;
  9128. l = h;
  9129. h = o;
  9130. o = 0;
  9131. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[15]);
  9132. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[14]);
  9133. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[13]);
  9134. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[12]);
  9135. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[11]);
  9136. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[10]);
  9137. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[9]);
  9138. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[8]);
  9139. r->dp[23] = l;
  9140. l = h;
  9141. h = o;
  9142. o = 0;
  9143. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[15]);
  9144. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[14]);
  9145. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[13]);
  9146. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[12]);
  9147. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[11]);
  9148. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[10]);
  9149. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[9]);
  9150. r->dp[24] = l;
  9151. l = h;
  9152. h = o;
  9153. o = 0;
  9154. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[15]);
  9155. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[14]);
  9156. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[13]);
  9157. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[12]);
  9158. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[11]);
  9159. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[10]);
  9160. r->dp[25] = l;
  9161. l = h;
  9162. h = o;
  9163. o = 0;
  9164. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[15]);
  9165. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[14]);
  9166. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[13]);
  9167. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[12]);
  9168. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[11]);
  9169. r->dp[26] = l;
  9170. l = h;
  9171. h = o;
  9172. o = 0;
  9173. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[15]);
  9174. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[14]);
  9175. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[13]);
  9176. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[12]);
  9177. r->dp[27] = l;
  9178. l = h;
  9179. h = o;
  9180. o = 0;
  9181. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[15]);
  9182. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[14]);
  9183. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[13]);
  9184. r->dp[28] = l;
  9185. l = h;
  9186. h = o;
  9187. o = 0;
  9188. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[15]);
  9189. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[14]);
  9190. r->dp[29] = l;
  9191. l = h;
  9192. h = o;
  9193. SP_ASM_MUL_ADD_NO(l, h, a->dp[15], b->dp[15]);
  9194. r->dp[30] = l;
  9195. r->dp[31] = h;
  9196. XMEMCPY(r->dp, t, 16 * sizeof(sp_int_digit));
  9197. r->used = 32;
  9198. sp_clamp(r);
  9199. }
  9200. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  9201. if (t != NULL) {
  9202. XFREE(t, NULL, DYNAMIC_TYPE_BIGINT);
  9203. }
  9204. #endif
  9205. return err;
  9206. }
  9207. #endif /* SP_INT_DIGITS >= 32 */
  9208. #endif /* SQR_MUL_ASM && (WOLFSSL_SP_INT_LARGE_COMBA || !WOLFSSL_SP_MATH &&
  9209. * WOLFCRYPT_HAVE_SAKKE && SP_WORD_SIZE == 64 */
  9210. #if defined(SQR_MUL_ASM) && defined(WOLFSSL_SP_INT_LARGE_COMBA)
  9211. #if SP_INT_DIGITS >= 48
  9212. /* Multiply a by b and store in r: r = a * b
  9213. *
  9214. * Comba implementation.
  9215. *
  9216. * @param [in] a SP integer to multiply.
  9217. * @param [in] b SP integer to multiply.
  9218. * @param [out] r SP integer result.
  9219. *
  9220. * @return MP_OKAY on success.
  9221. * @return MP_MEM when dynamic memory allocation fails.
  9222. */
  9223. static int _sp_mul_24(const sp_int* a, const sp_int* b, sp_int* r)
  9224. {
  9225. int err = MP_OKAY;
  9226. sp_int_digit l = 0;
  9227. sp_int_digit h = 0;
  9228. sp_int_digit o = 0;
  9229. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  9230. sp_int_digit* t = NULL;
  9231. #else
  9232. sp_int_digit t[24];
  9233. #endif
  9234. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  9235. t = (sp_int_digit*)XMALLOC(sizeof(sp_int_digit) * 24, NULL,
  9236. DYNAMIC_TYPE_BIGINT);
  9237. if (t == NULL) {
  9238. err = MP_MEM;
  9239. }
  9240. #endif
  9241. if (err == MP_OKAY) {
  9242. SP_ASM_MUL(h, l, a->dp[0], b->dp[0]);
  9243. t[0] = h;
  9244. h = 0;
  9245. SP_ASM_MUL_ADD_NO(l, h, a->dp[0], b->dp[1]);
  9246. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[0]);
  9247. t[1] = l;
  9248. l = h;
  9249. h = o;
  9250. o = 0;
  9251. SP_ASM_MUL_ADD_NO(l, h, a->dp[0], b->dp[2]);
  9252. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[1]);
  9253. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[0]);
  9254. t[2] = l;
  9255. l = h;
  9256. h = o;
  9257. o = 0;
  9258. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[3]);
  9259. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[2]);
  9260. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[1]);
  9261. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[0]);
  9262. t[3] = l;
  9263. l = h;
  9264. h = o;
  9265. o = 0;
  9266. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[4]);
  9267. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[3]);
  9268. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[2]);
  9269. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[1]);
  9270. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[0]);
  9271. t[4] = l;
  9272. l = h;
  9273. h = o;
  9274. o = 0;
  9275. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[5]);
  9276. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[4]);
  9277. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[3]);
  9278. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[2]);
  9279. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[1]);
  9280. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[0]);
  9281. t[5] = l;
  9282. l = h;
  9283. h = o;
  9284. o = 0;
  9285. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[6]);
  9286. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[5]);
  9287. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[4]);
  9288. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[3]);
  9289. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[2]);
  9290. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[1]);
  9291. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[0]);
  9292. t[6] = l;
  9293. l = h;
  9294. h = o;
  9295. o = 0;
  9296. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[7]);
  9297. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[6]);
  9298. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[5]);
  9299. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[4]);
  9300. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[3]);
  9301. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[2]);
  9302. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[1]);
  9303. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[0]);
  9304. t[7] = l;
  9305. l = h;
  9306. h = o;
  9307. o = 0;
  9308. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[8]);
  9309. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[7]);
  9310. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[6]);
  9311. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[5]);
  9312. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[4]);
  9313. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[3]);
  9314. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[2]);
  9315. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[1]);
  9316. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[0]);
  9317. t[8] = l;
  9318. l = h;
  9319. h = o;
  9320. o = 0;
  9321. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[9]);
  9322. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[8]);
  9323. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[7]);
  9324. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[6]);
  9325. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[5]);
  9326. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[4]);
  9327. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[3]);
  9328. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[2]);
  9329. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[1]);
  9330. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[0]);
  9331. t[9] = l;
  9332. l = h;
  9333. h = o;
  9334. o = 0;
  9335. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[10]);
  9336. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[9]);
  9337. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[8]);
  9338. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[7]);
  9339. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[6]);
  9340. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[5]);
  9341. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[4]);
  9342. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[3]);
  9343. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[2]);
  9344. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[1]);
  9345. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[0]);
  9346. t[10] = l;
  9347. l = h;
  9348. h = o;
  9349. o = 0;
  9350. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[11]);
  9351. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[10]);
  9352. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[9]);
  9353. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[8]);
  9354. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[7]);
  9355. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[6]);
  9356. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[5]);
  9357. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[4]);
  9358. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[3]);
  9359. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[2]);
  9360. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[1]);
  9361. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[0]);
  9362. t[11] = l;
  9363. l = h;
  9364. h = o;
  9365. o = 0;
  9366. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[12]);
  9367. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[11]);
  9368. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[10]);
  9369. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[9]);
  9370. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[8]);
  9371. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[7]);
  9372. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[6]);
  9373. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[5]);
  9374. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[4]);
  9375. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[3]);
  9376. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[2]);
  9377. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[1]);
  9378. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[0]);
  9379. t[12] = l;
  9380. l = h;
  9381. h = o;
  9382. o = 0;
  9383. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[13]);
  9384. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[12]);
  9385. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[11]);
  9386. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[10]);
  9387. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[9]);
  9388. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[8]);
  9389. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[7]);
  9390. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[6]);
  9391. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[5]);
  9392. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[4]);
  9393. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[3]);
  9394. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[2]);
  9395. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[1]);
  9396. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[0]);
  9397. t[13] = l;
  9398. l = h;
  9399. h = o;
  9400. o = 0;
  9401. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[14]);
  9402. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[13]);
  9403. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[12]);
  9404. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[11]);
  9405. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[10]);
  9406. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[9]);
  9407. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[8]);
  9408. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[7]);
  9409. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[6]);
  9410. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[5]);
  9411. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[4]);
  9412. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[3]);
  9413. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[2]);
  9414. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[1]);
  9415. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[0]);
  9416. t[14] = l;
  9417. l = h;
  9418. h = o;
  9419. o = 0;
  9420. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[15]);
  9421. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[14]);
  9422. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[13]);
  9423. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[12]);
  9424. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[11]);
  9425. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[10]);
  9426. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[9]);
  9427. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[8]);
  9428. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[7]);
  9429. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[6]);
  9430. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[5]);
  9431. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[4]);
  9432. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[3]);
  9433. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[2]);
  9434. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[1]);
  9435. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[0]);
  9436. t[15] = l;
  9437. l = h;
  9438. h = o;
  9439. o = 0;
  9440. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[16]);
  9441. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[15]);
  9442. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[14]);
  9443. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[13]);
  9444. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[12]);
  9445. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[11]);
  9446. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[10]);
  9447. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[9]);
  9448. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[8]);
  9449. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[7]);
  9450. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[6]);
  9451. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[5]);
  9452. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[4]);
  9453. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[3]);
  9454. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[2]);
  9455. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[1]);
  9456. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[0]);
  9457. t[16] = l;
  9458. l = h;
  9459. h = o;
  9460. o = 0;
  9461. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[17]);
  9462. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[16]);
  9463. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[15]);
  9464. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[14]);
  9465. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[13]);
  9466. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[12]);
  9467. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[11]);
  9468. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[10]);
  9469. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[9]);
  9470. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[8]);
  9471. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[7]);
  9472. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[6]);
  9473. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[5]);
  9474. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[4]);
  9475. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[3]);
  9476. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[2]);
  9477. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[1]);
  9478. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[0]);
  9479. t[17] = l;
  9480. l = h;
  9481. h = o;
  9482. o = 0;
  9483. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[18]);
  9484. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[17]);
  9485. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[16]);
  9486. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[15]);
  9487. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[14]);
  9488. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[13]);
  9489. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[12]);
  9490. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[11]);
  9491. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[10]);
  9492. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[9]);
  9493. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[8]);
  9494. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[7]);
  9495. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[6]);
  9496. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[5]);
  9497. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[4]);
  9498. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[3]);
  9499. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[2]);
  9500. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[1]);
  9501. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[0]);
  9502. t[18] = l;
  9503. l = h;
  9504. h = o;
  9505. o = 0;
  9506. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[19]);
  9507. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[18]);
  9508. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[17]);
  9509. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[16]);
  9510. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[15]);
  9511. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[14]);
  9512. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[13]);
  9513. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[12]);
  9514. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[11]);
  9515. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[10]);
  9516. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[9]);
  9517. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[8]);
  9518. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[7]);
  9519. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[6]);
  9520. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[5]);
  9521. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[4]);
  9522. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[3]);
  9523. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[2]);
  9524. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[1]);
  9525. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[0]);
  9526. t[19] = l;
  9527. l = h;
  9528. h = o;
  9529. o = 0;
  9530. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[20]);
  9531. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[19]);
  9532. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[18]);
  9533. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[17]);
  9534. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[16]);
  9535. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[15]);
  9536. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[14]);
  9537. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[13]);
  9538. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[12]);
  9539. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[11]);
  9540. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[10]);
  9541. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[9]);
  9542. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[8]);
  9543. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[7]);
  9544. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[6]);
  9545. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[5]);
  9546. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[4]);
  9547. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[3]);
  9548. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[2]);
  9549. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[1]);
  9550. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[0]);
  9551. t[20] = l;
  9552. l = h;
  9553. h = o;
  9554. o = 0;
  9555. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[21]);
  9556. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[20]);
  9557. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[19]);
  9558. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[18]);
  9559. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[17]);
  9560. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[16]);
  9561. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[15]);
  9562. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[14]);
  9563. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[13]);
  9564. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[12]);
  9565. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[11]);
  9566. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[10]);
  9567. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[9]);
  9568. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[8]);
  9569. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[7]);
  9570. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[6]);
  9571. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[5]);
  9572. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[4]);
  9573. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[3]);
  9574. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[2]);
  9575. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[1]);
  9576. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[0]);
  9577. t[21] = l;
  9578. l = h;
  9579. h = o;
  9580. o = 0;
  9581. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[22]);
  9582. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[21]);
  9583. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[20]);
  9584. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[19]);
  9585. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[18]);
  9586. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[17]);
  9587. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[16]);
  9588. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[15]);
  9589. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[14]);
  9590. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[13]);
  9591. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[12]);
  9592. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[11]);
  9593. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[10]);
  9594. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[9]);
  9595. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[8]);
  9596. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[7]);
  9597. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[6]);
  9598. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[5]);
  9599. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[4]);
  9600. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[3]);
  9601. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[2]);
  9602. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[1]);
  9603. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[0]);
  9604. t[22] = l;
  9605. l = h;
  9606. h = o;
  9607. o = 0;
  9608. SP_ASM_MUL_ADD(l, h, o, a->dp[0], b->dp[23]);
  9609. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[22]);
  9610. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[21]);
  9611. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[20]);
  9612. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[19]);
  9613. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[18]);
  9614. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[17]);
  9615. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[16]);
  9616. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[15]);
  9617. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[14]);
  9618. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[13]);
  9619. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[12]);
  9620. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[11]);
  9621. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[10]);
  9622. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[9]);
  9623. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[8]);
  9624. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[7]);
  9625. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[6]);
  9626. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[5]);
  9627. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[4]);
  9628. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[3]);
  9629. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[2]);
  9630. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[1]);
  9631. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[0]);
  9632. t[23] = l;
  9633. l = h;
  9634. h = o;
  9635. o = 0;
  9636. SP_ASM_MUL_ADD(l, h, o, a->dp[1], b->dp[23]);
  9637. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[22]);
  9638. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[21]);
  9639. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[20]);
  9640. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[19]);
  9641. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[18]);
  9642. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[17]);
  9643. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[16]);
  9644. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[15]);
  9645. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[14]);
  9646. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[13]);
  9647. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[12]);
  9648. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[11]);
  9649. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[10]);
  9650. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[9]);
  9651. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[8]);
  9652. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[7]);
  9653. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[6]);
  9654. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[5]);
  9655. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[4]);
  9656. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[3]);
  9657. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[2]);
  9658. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[1]);
  9659. r->dp[24] = l;
  9660. l = h;
  9661. h = o;
  9662. o = 0;
  9663. SP_ASM_MUL_ADD(l, h, o, a->dp[2], b->dp[23]);
  9664. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[22]);
  9665. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[21]);
  9666. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[20]);
  9667. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[19]);
  9668. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[18]);
  9669. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[17]);
  9670. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[16]);
  9671. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[15]);
  9672. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[14]);
  9673. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[13]);
  9674. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[12]);
  9675. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[11]);
  9676. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[10]);
  9677. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[9]);
  9678. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[8]);
  9679. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[7]);
  9680. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[6]);
  9681. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[5]);
  9682. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[4]);
  9683. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[3]);
  9684. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[2]);
  9685. r->dp[25] = l;
  9686. l = h;
  9687. h = o;
  9688. o = 0;
  9689. SP_ASM_MUL_ADD(l, h, o, a->dp[3], b->dp[23]);
  9690. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[22]);
  9691. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[21]);
  9692. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[20]);
  9693. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[19]);
  9694. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[18]);
  9695. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[17]);
  9696. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[16]);
  9697. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[15]);
  9698. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[14]);
  9699. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[13]);
  9700. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[12]);
  9701. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[11]);
  9702. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[10]);
  9703. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[9]);
  9704. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[8]);
  9705. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[7]);
  9706. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[6]);
  9707. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[5]);
  9708. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[4]);
  9709. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[3]);
  9710. r->dp[26] = l;
  9711. l = h;
  9712. h = o;
  9713. o = 0;
  9714. SP_ASM_MUL_ADD(l, h, o, a->dp[4], b->dp[23]);
  9715. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[22]);
  9716. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[21]);
  9717. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[20]);
  9718. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[19]);
  9719. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[18]);
  9720. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[17]);
  9721. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[16]);
  9722. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[15]);
  9723. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[14]);
  9724. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[13]);
  9725. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[12]);
  9726. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[11]);
  9727. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[10]);
  9728. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[9]);
  9729. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[8]);
  9730. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[7]);
  9731. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[6]);
  9732. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[5]);
  9733. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[4]);
  9734. r->dp[27] = l;
  9735. l = h;
  9736. h = o;
  9737. o = 0;
  9738. SP_ASM_MUL_ADD(l, h, o, a->dp[5], b->dp[23]);
  9739. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[22]);
  9740. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[21]);
  9741. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[20]);
  9742. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[19]);
  9743. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[18]);
  9744. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[17]);
  9745. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[16]);
  9746. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[15]);
  9747. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[14]);
  9748. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[13]);
  9749. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[12]);
  9750. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[11]);
  9751. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[10]);
  9752. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[9]);
  9753. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[8]);
  9754. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[7]);
  9755. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[6]);
  9756. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[5]);
  9757. r->dp[28] = l;
  9758. l = h;
  9759. h = o;
  9760. o = 0;
  9761. SP_ASM_MUL_ADD(l, h, o, a->dp[6], b->dp[23]);
  9762. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[22]);
  9763. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[21]);
  9764. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[20]);
  9765. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[19]);
  9766. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[18]);
  9767. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[17]);
  9768. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[16]);
  9769. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[15]);
  9770. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[14]);
  9771. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[13]);
  9772. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[12]);
  9773. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[11]);
  9774. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[10]);
  9775. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[9]);
  9776. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[8]);
  9777. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[7]);
  9778. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[6]);
  9779. r->dp[29] = l;
  9780. l = h;
  9781. h = o;
  9782. o = 0;
  9783. SP_ASM_MUL_ADD(l, h, o, a->dp[7], b->dp[23]);
  9784. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[22]);
  9785. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[21]);
  9786. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[20]);
  9787. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[19]);
  9788. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[18]);
  9789. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[17]);
  9790. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[16]);
  9791. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[15]);
  9792. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[14]);
  9793. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[13]);
  9794. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[12]);
  9795. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[11]);
  9796. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[10]);
  9797. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[9]);
  9798. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[8]);
  9799. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[7]);
  9800. r->dp[30] = l;
  9801. l = h;
  9802. h = o;
  9803. o = 0;
  9804. SP_ASM_MUL_ADD(l, h, o, a->dp[8], b->dp[23]);
  9805. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[22]);
  9806. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[21]);
  9807. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[20]);
  9808. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[19]);
  9809. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[18]);
  9810. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[17]);
  9811. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[16]);
  9812. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[15]);
  9813. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[14]);
  9814. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[13]);
  9815. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[12]);
  9816. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[11]);
  9817. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[10]);
  9818. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[9]);
  9819. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[8]);
  9820. r->dp[31] = l;
  9821. l = h;
  9822. h = o;
  9823. o = 0;
  9824. SP_ASM_MUL_ADD(l, h, o, a->dp[9], b->dp[23]);
  9825. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[22]);
  9826. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[21]);
  9827. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[20]);
  9828. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[19]);
  9829. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[18]);
  9830. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[17]);
  9831. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[16]);
  9832. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[15]);
  9833. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[14]);
  9834. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[13]);
  9835. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[12]);
  9836. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[11]);
  9837. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[10]);
  9838. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[9]);
  9839. r->dp[32] = l;
  9840. l = h;
  9841. h = o;
  9842. o = 0;
  9843. SP_ASM_MUL_ADD(l, h, o, a->dp[10], b->dp[23]);
  9844. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[22]);
  9845. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[21]);
  9846. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[20]);
  9847. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[19]);
  9848. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[18]);
  9849. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[17]);
  9850. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[16]);
  9851. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[15]);
  9852. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[14]);
  9853. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[13]);
  9854. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[12]);
  9855. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[11]);
  9856. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[10]);
  9857. r->dp[33] = l;
  9858. l = h;
  9859. h = o;
  9860. o = 0;
  9861. SP_ASM_MUL_ADD(l, h, o, a->dp[11], b->dp[23]);
  9862. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[22]);
  9863. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[21]);
  9864. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[20]);
  9865. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[19]);
  9866. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[18]);
  9867. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[17]);
  9868. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[16]);
  9869. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[15]);
  9870. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[14]);
  9871. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[13]);
  9872. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[12]);
  9873. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[11]);
  9874. r->dp[34] = l;
  9875. l = h;
  9876. h = o;
  9877. o = 0;
  9878. SP_ASM_MUL_ADD(l, h, o, a->dp[12], b->dp[23]);
  9879. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[22]);
  9880. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[21]);
  9881. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[20]);
  9882. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[19]);
  9883. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[18]);
  9884. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[17]);
  9885. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[16]);
  9886. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[15]);
  9887. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[14]);
  9888. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[13]);
  9889. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[12]);
  9890. r->dp[35] = l;
  9891. l = h;
  9892. h = o;
  9893. o = 0;
  9894. SP_ASM_MUL_ADD(l, h, o, a->dp[13], b->dp[23]);
  9895. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[22]);
  9896. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[21]);
  9897. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[20]);
  9898. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[19]);
  9899. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[18]);
  9900. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[17]);
  9901. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[16]);
  9902. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[15]);
  9903. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[14]);
  9904. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[13]);
  9905. r->dp[36] = l;
  9906. l = h;
  9907. h = o;
  9908. o = 0;
  9909. SP_ASM_MUL_ADD(l, h, o, a->dp[14], b->dp[23]);
  9910. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[22]);
  9911. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[21]);
  9912. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[20]);
  9913. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[19]);
  9914. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[18]);
  9915. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[17]);
  9916. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[16]);
  9917. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[15]);
  9918. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[14]);
  9919. r->dp[37] = l;
  9920. l = h;
  9921. h = o;
  9922. o = 0;
  9923. SP_ASM_MUL_ADD(l, h, o, a->dp[15], b->dp[23]);
  9924. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[22]);
  9925. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[21]);
  9926. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[20]);
  9927. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[19]);
  9928. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[18]);
  9929. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[17]);
  9930. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[16]);
  9931. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[15]);
  9932. r->dp[38] = l;
  9933. l = h;
  9934. h = o;
  9935. o = 0;
  9936. SP_ASM_MUL_ADD(l, h, o, a->dp[16], b->dp[23]);
  9937. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[22]);
  9938. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[21]);
  9939. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[20]);
  9940. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[19]);
  9941. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[18]);
  9942. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[17]);
  9943. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[16]);
  9944. r->dp[39] = l;
  9945. l = h;
  9946. h = o;
  9947. o = 0;
  9948. SP_ASM_MUL_ADD(l, h, o, a->dp[17], b->dp[23]);
  9949. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[22]);
  9950. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[21]);
  9951. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[20]);
  9952. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[19]);
  9953. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[18]);
  9954. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[17]);
  9955. r->dp[40] = l;
  9956. l = h;
  9957. h = o;
  9958. o = 0;
  9959. SP_ASM_MUL_ADD(l, h, o, a->dp[18], b->dp[23]);
  9960. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[22]);
  9961. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[21]);
  9962. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[20]);
  9963. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[19]);
  9964. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[18]);
  9965. r->dp[41] = l;
  9966. l = h;
  9967. h = o;
  9968. o = 0;
  9969. SP_ASM_MUL_ADD(l, h, o, a->dp[19], b->dp[23]);
  9970. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[22]);
  9971. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[21]);
  9972. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[20]);
  9973. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[19]);
  9974. r->dp[42] = l;
  9975. l = h;
  9976. h = o;
  9977. o = 0;
  9978. SP_ASM_MUL_ADD(l, h, o, a->dp[20], b->dp[23]);
  9979. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[22]);
  9980. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[21]);
  9981. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[20]);
  9982. r->dp[43] = l;
  9983. l = h;
  9984. h = o;
  9985. o = 0;
  9986. SP_ASM_MUL_ADD(l, h, o, a->dp[21], b->dp[23]);
  9987. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[22]);
  9988. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[21]);
  9989. r->dp[44] = l;
  9990. l = h;
  9991. h = o;
  9992. o = 0;
  9993. SP_ASM_MUL_ADD(l, h, o, a->dp[22], b->dp[23]);
  9994. SP_ASM_MUL_ADD(l, h, o, a->dp[23], b->dp[22]);
  9995. r->dp[45] = l;
  9996. l = h;
  9997. h = o;
  9998. SP_ASM_MUL_ADD_NO(l, h, a->dp[23], b->dp[23]);
  9999. r->dp[46] = l;
  10000. r->dp[47] = h;
  10001. XMEMCPY(r->dp, t, 24 * sizeof(sp_int_digit));
  10002. r->used = 48;
  10003. sp_clamp(r);
  10004. }
  10005. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  10006. if (t != NULL) {
  10007. XFREE(t, NULL, DYNAMIC_TYPE_BIGINT);
  10008. }
  10009. #endif
  10010. return err;
  10011. }
  10012. #endif /* SP_INT_DIGITS >= 48 */
  10013. #if SP_INT_DIGITS >= 64
  10014. /* Multiply a by b and store in r: r = a * b
  10015. *
  10016. * Karatsuba implementation.
  10017. *
  10018. * @param [in] a SP integer to multiply.
  10019. * @param [in] b SP integer to multiply.
  10020. * @param [out] r SP integer result.
  10021. *
  10022. * @return MP_OKAY on success.
  10023. * @return MP_MEM when dynamic memory allocation fails.
  10024. */
  10025. static int _sp_mul_32(const sp_int* a, const sp_int* b, sp_int* r)
  10026. {
  10027. int err = MP_OKAY;
  10028. int i;
  10029. sp_int_digit l;
  10030. sp_int_digit h;
  10031. sp_int* a1;
  10032. sp_int* b1;
  10033. sp_int* z0;
  10034. sp_int* z1;
  10035. sp_int* z2;
  10036. sp_int_digit ca;
  10037. sp_int_digit cb;
  10038. DECL_SP_INT_ARRAY(t, 16, 2);
  10039. DECL_SP_INT_ARRAY(z, 33, 2);
  10040. ALLOC_SP_INT_ARRAY(t, 16, 2, err, NULL);
  10041. ALLOC_SP_INT_ARRAY(z, 33, 2, err, NULL);
  10042. if (err == MP_OKAY) {
  10043. a1 = t[0];
  10044. b1 = t[1];
  10045. z1 = z[0];
  10046. z2 = z[1];
  10047. z0 = r;
  10048. XMEMCPY(a1->dp, &a->dp[16], sizeof(sp_int_digit) * 16);
  10049. a1->used = 16;
  10050. XMEMCPY(b1->dp, &b->dp[16], sizeof(sp_int_digit) * 16);
  10051. b1->used = 16;
  10052. /* z2 = a1 * b1 */
  10053. err = _sp_mul_16(a1, b1, z2);
  10054. }
  10055. if (err == MP_OKAY) {
  10056. l = a1->dp[0];
  10057. h = 0;
  10058. SP_ASM_ADDC(l, h, a->dp[0]);
  10059. a1->dp[0] = l;
  10060. l = h;
  10061. h = 0;
  10062. for (i = 1; i < 16; i++) {
  10063. SP_ASM_ADDC(l, h, a1->dp[i]);
  10064. SP_ASM_ADDC(l, h, a->dp[i]);
  10065. a1->dp[i] = l;
  10066. l = h;
  10067. h = 0;
  10068. }
  10069. ca = l;
  10070. /* b01 = b0 + b1 */
  10071. l = b1->dp[0];
  10072. h = 0;
  10073. SP_ASM_ADDC(l, h, b->dp[0]);
  10074. b1->dp[0] = l;
  10075. l = h;
  10076. h = 0;
  10077. for (i = 1; i < 16; i++) {
  10078. SP_ASM_ADDC(l, h, b1->dp[i]);
  10079. SP_ASM_ADDC(l, h, b->dp[i]);
  10080. b1->dp[i] = l;
  10081. l = h;
  10082. h = 0;
  10083. }
  10084. cb = l;
  10085. /* z0 = a0 * b0 */
  10086. err = _sp_mul_16(a, b, z0);
  10087. }
  10088. if (err == MP_OKAY) {
  10089. /* z1 = (a0 + a1) * (b0 + b1) */
  10090. err = _sp_mul_16(a1, b1, z1);
  10091. }
  10092. if (err == MP_OKAY) {
  10093. /* r = (z2 << 32) + (z1 - z0 - z2) << 16) + z0 */
  10094. /* r = z0 */
  10095. /* r += (z1 - z0 - z2) << 16 */
  10096. z1->dp[32] = ca & cb;
  10097. l = 0;
  10098. if (ca) {
  10099. h = 0;
  10100. for (i = 0; i < 16; i++) {
  10101. SP_ASM_ADDC(l, h, z1->dp[i + 16]);
  10102. SP_ASM_ADDC(l, h, b1->dp[i]);
  10103. z1->dp[i + 16] = l;
  10104. l = h;
  10105. h = 0;
  10106. }
  10107. }
  10108. z1->dp[32] += l;
  10109. l = 0;
  10110. if (cb) {
  10111. h = 0;
  10112. for (i = 0; i < 16; i++) {
  10113. SP_ASM_ADDC(l, h, z1->dp[i + 16]);
  10114. SP_ASM_ADDC(l, h, a1->dp[i]);
  10115. z1->dp[i + 16] = l;
  10116. l = h;
  10117. h = 0;
  10118. }
  10119. }
  10120. z1->dp[32] += l;
  10121. /* z1 = z1 - z0 - z1 */
  10122. l = 0;
  10123. h = 0;
  10124. for (i = 0; i < 32; i++) {
  10125. l += z1->dp[i];
  10126. SP_ASM_SUBB(l, h, z0->dp[i]);
  10127. SP_ASM_SUBB(l, h, z2->dp[i]);
  10128. z1->dp[i] = l;
  10129. l = h;
  10130. h = 0;
  10131. }
  10132. z1->dp[i] += l;
  10133. /* r += z1 << 16 */
  10134. l = 0;
  10135. h = 0;
  10136. for (i = 0; i < 16; i++) {
  10137. SP_ASM_ADDC(l, h, r->dp[i + 16]);
  10138. SP_ASM_ADDC(l, h, z1->dp[i]);
  10139. r->dp[i + 16] = l;
  10140. l = h;
  10141. h = 0;
  10142. }
  10143. for (; i < 33; i++) {
  10144. SP_ASM_ADDC(l, h, z1->dp[i]);
  10145. r->dp[i + 16] = l;
  10146. l = h;
  10147. h = 0;
  10148. }
  10149. /* r += z2 << 32 */
  10150. l = 0;
  10151. h = 0;
  10152. for (i = 0; i < 17; i++) {
  10153. SP_ASM_ADDC(l, h, r->dp[i + 32]);
  10154. SP_ASM_ADDC(l, h, z2->dp[i]);
  10155. r->dp[i + 32] = l;
  10156. l = h;
  10157. h = 0;
  10158. }
  10159. for (; i < 32; i++) {
  10160. SP_ASM_ADDC(l, h, z2->dp[i]);
  10161. r->dp[i + 32] = l;
  10162. l = h;
  10163. h = 0;
  10164. }
  10165. r->used = 64;
  10166. sp_clamp(r);
  10167. }
  10168. FREE_SP_INT_ARRAY(z, NULL);
  10169. FREE_SP_INT_ARRAY(t, NULL);
  10170. return err;
  10171. }
  10172. #endif /* SP_INT_DIGITS >= 64 */
  10173. #if SP_INT_DIGITS >= 96
  10174. /* Multiply a by b and store in r: r = a * b
  10175. *
  10176. * Karatsuba implementation.
  10177. *
  10178. * @param [in] a SP integer to multiply.
  10179. * @param [in] b SP integer to multiply.
  10180. * @param [out] r SP integer result.
  10181. *
  10182. * @return MP_OKAY on success.
  10183. * @return MP_MEM when dynamic memory allocation fails.
  10184. */
  10185. static int _sp_mul_48(const sp_int* a, const sp_int* b, sp_int* r)
  10186. {
  10187. int err = MP_OKAY;
  10188. int i;
  10189. sp_int_digit l;
  10190. sp_int_digit h;
  10191. sp_int* a1;
  10192. sp_int* b1;
  10193. sp_int* z0;
  10194. sp_int* z1;
  10195. sp_int* z2;
  10196. sp_int_digit ca;
  10197. sp_int_digit cb;
  10198. DECL_SP_INT_ARRAY(t, 24, 2);
  10199. DECL_SP_INT_ARRAY(z, 49, 2);
  10200. ALLOC_SP_INT_ARRAY(t, 24, 2, err, NULL);
  10201. ALLOC_SP_INT_ARRAY(z, 49, 2, err, NULL);
  10202. if (err == MP_OKAY) {
  10203. a1 = t[0];
  10204. b1 = t[1];
  10205. z1 = z[0];
  10206. z2 = z[1];
  10207. z0 = r;
  10208. XMEMCPY(a1->dp, &a->dp[24], sizeof(sp_int_digit) * 24);
  10209. a1->used = 24;
  10210. XMEMCPY(b1->dp, &b->dp[24], sizeof(sp_int_digit) * 24);
  10211. b1->used = 24;
  10212. /* z2 = a1 * b1 */
  10213. err = _sp_mul_24(a1, b1, z2);
  10214. }
  10215. if (err == MP_OKAY) {
  10216. l = a1->dp[0];
  10217. h = 0;
  10218. SP_ASM_ADDC(l, h, a->dp[0]);
  10219. a1->dp[0] = l;
  10220. l = h;
  10221. h = 0;
  10222. for (i = 1; i < 24; i++) {
  10223. SP_ASM_ADDC(l, h, a1->dp[i]);
  10224. SP_ASM_ADDC(l, h, a->dp[i]);
  10225. a1->dp[i] = l;
  10226. l = h;
  10227. h = 0;
  10228. }
  10229. ca = l;
  10230. /* b01 = b0 + b1 */
  10231. l = b1->dp[0];
  10232. h = 0;
  10233. SP_ASM_ADDC(l, h, b->dp[0]);
  10234. b1->dp[0] = l;
  10235. l = h;
  10236. h = 0;
  10237. for (i = 1; i < 24; i++) {
  10238. SP_ASM_ADDC(l, h, b1->dp[i]);
  10239. SP_ASM_ADDC(l, h, b->dp[i]);
  10240. b1->dp[i] = l;
  10241. l = h;
  10242. h = 0;
  10243. }
  10244. cb = l;
  10245. /* z0 = a0 * b0 */
  10246. err = _sp_mul_24(a, b, z0);
  10247. }
  10248. if (err == MP_OKAY) {
  10249. /* z1 = (a0 + a1) * (b0 + b1) */
  10250. err = _sp_mul_24(a1, b1, z1);
  10251. }
  10252. if (err == MP_OKAY) {
  10253. /* r = (z2 << 48) + (z1 - z0 - z2) << 24) + z0 */
  10254. /* r = z0 */
  10255. /* r += (z1 - z0 - z2) << 24 */
  10256. z1->dp[48] = ca & cb;
  10257. l = 0;
  10258. if (ca) {
  10259. h = 0;
  10260. for (i = 0; i < 24; i++) {
  10261. SP_ASM_ADDC(l, h, z1->dp[i + 24]);
  10262. SP_ASM_ADDC(l, h, b1->dp[i]);
  10263. z1->dp[i + 24] = l;
  10264. l = h;
  10265. h = 0;
  10266. }
  10267. }
  10268. z1->dp[48] += l;
  10269. l = 0;
  10270. if (cb) {
  10271. h = 0;
  10272. for (i = 0; i < 24; i++) {
  10273. SP_ASM_ADDC(l, h, z1->dp[i + 24]);
  10274. SP_ASM_ADDC(l, h, a1->dp[i]);
  10275. z1->dp[i + 24] = l;
  10276. l = h;
  10277. h = 0;
  10278. }
  10279. }
  10280. z1->dp[48] += l;
  10281. /* z1 = z1 - z0 - z1 */
  10282. l = 0;
  10283. h = 0;
  10284. for (i = 0; i < 48; i++) {
  10285. l += z1->dp[i];
  10286. SP_ASM_SUBB(l, h, z0->dp[i]);
  10287. SP_ASM_SUBB(l, h, z2->dp[i]);
  10288. z1->dp[i] = l;
  10289. l = h;
  10290. h = 0;
  10291. }
  10292. z1->dp[i] += l;
  10293. /* r += z1 << 16 */
  10294. l = 0;
  10295. h = 0;
  10296. for (i = 0; i < 24; i++) {
  10297. SP_ASM_ADDC(l, h, r->dp[i + 24]);
  10298. SP_ASM_ADDC(l, h, z1->dp[i]);
  10299. r->dp[i + 24] = l;
  10300. l = h;
  10301. h = 0;
  10302. }
  10303. for (; i < 49; i++) {
  10304. SP_ASM_ADDC(l, h, z1->dp[i]);
  10305. r->dp[i + 24] = l;
  10306. l = h;
  10307. h = 0;
  10308. }
  10309. /* r += z2 << 48 */
  10310. l = 0;
  10311. h = 0;
  10312. for (i = 0; i < 25; i++) {
  10313. SP_ASM_ADDC(l, h, r->dp[i + 48]);
  10314. SP_ASM_ADDC(l, h, z2->dp[i]);
  10315. r->dp[i + 48] = l;
  10316. l = h;
  10317. h = 0;
  10318. }
  10319. for (; i < 48; i++) {
  10320. SP_ASM_ADDC(l, h, z2->dp[i]);
  10321. r->dp[i + 48] = l;
  10322. l = h;
  10323. h = 0;
  10324. }
  10325. r->used = 96;
  10326. sp_clamp(r);
  10327. }
  10328. FREE_SP_INT_ARRAY(z, NULL);
  10329. FREE_SP_INT_ARRAY(t, NULL);
  10330. return err;
  10331. }
  10332. #endif /* SP_INT_DIGITS >= 96 */
  10333. #if SP_INT_DIGITS >= 128
  10334. /* Multiply a by b and store in r: r = a * b
  10335. *
  10336. * Karatsuba implementation.
  10337. *
  10338. * @param [in] a SP integer to multiply.
  10339. * @param [in] b SP integer to multiply.
  10340. * @param [out] r SP integer result.
  10341. *
  10342. * @return MP_OKAY on success.
  10343. * @return MP_MEM when dynamic memory allocation fails.
  10344. */
  10345. static int _sp_mul_64(const sp_int* a, const sp_int* b, sp_int* r)
  10346. {
  10347. int err = MP_OKAY;
  10348. int i;
  10349. sp_int_digit l;
  10350. sp_int_digit h;
  10351. sp_int* a1;
  10352. sp_int* b1;
  10353. sp_int* z0;
  10354. sp_int* z1;
  10355. sp_int* z2;
  10356. sp_int_digit ca;
  10357. sp_int_digit cb;
  10358. DECL_SP_INT_ARRAY(t, 32, 2);
  10359. DECL_SP_INT_ARRAY(z, 65, 2);
  10360. ALLOC_SP_INT_ARRAY(t, 32, 2, err, NULL);
  10361. ALLOC_SP_INT_ARRAY(z, 65, 2, err, NULL);
  10362. if (err == MP_OKAY) {
  10363. a1 = t[0];
  10364. b1 = t[1];
  10365. z1 = z[0];
  10366. z2 = z[1];
  10367. z0 = r;
  10368. XMEMCPY(a1->dp, &a->dp[32], sizeof(sp_int_digit) * 32);
  10369. a1->used = 32;
  10370. XMEMCPY(b1->dp, &b->dp[32], sizeof(sp_int_digit) * 32);
  10371. b1->used = 32;
  10372. /* z2 = a1 * b1 */
  10373. err = _sp_mul_32(a1, b1, z2);
  10374. }
  10375. if (err == MP_OKAY) {
  10376. l = a1->dp[0];
  10377. h = 0;
  10378. SP_ASM_ADDC(l, h, a->dp[0]);
  10379. a1->dp[0] = l;
  10380. l = h;
  10381. h = 0;
  10382. for (i = 1; i < 32; i++) {
  10383. SP_ASM_ADDC(l, h, a1->dp[i]);
  10384. SP_ASM_ADDC(l, h, a->dp[i]);
  10385. a1->dp[i] = l;
  10386. l = h;
  10387. h = 0;
  10388. }
  10389. ca = l;
  10390. /* b01 = b0 + b1 */
  10391. l = b1->dp[0];
  10392. h = 0;
  10393. SP_ASM_ADDC(l, h, b->dp[0]);
  10394. b1->dp[0] = l;
  10395. l = h;
  10396. h = 0;
  10397. for (i = 1; i < 32; i++) {
  10398. SP_ASM_ADDC(l, h, b1->dp[i]);
  10399. SP_ASM_ADDC(l, h, b->dp[i]);
  10400. b1->dp[i] = l;
  10401. l = h;
  10402. h = 0;
  10403. }
  10404. cb = l;
  10405. /* z0 = a0 * b0 */
  10406. err = _sp_mul_32(a, b, z0);
  10407. }
  10408. if (err == MP_OKAY) {
  10409. /* z1 = (a0 + a1) * (b0 + b1) */
  10410. err = _sp_mul_32(a1, b1, z1);
  10411. }
  10412. if (err == MP_OKAY) {
  10413. /* r = (z2 << 64) + (z1 - z0 - z2) << 32) + z0 */
  10414. /* r = z0 */
  10415. /* r += (z1 - z0 - z2) << 32 */
  10416. z1->dp[64] = ca & cb;
  10417. l = 0;
  10418. if (ca) {
  10419. h = 0;
  10420. for (i = 0; i < 32; i++) {
  10421. SP_ASM_ADDC(l, h, z1->dp[i + 32]);
  10422. SP_ASM_ADDC(l, h, b1->dp[i]);
  10423. z1->dp[i + 32] = l;
  10424. l = h;
  10425. h = 0;
  10426. }
  10427. }
  10428. z1->dp[64] += l;
  10429. l = 0;
  10430. if (cb) {
  10431. h = 0;
  10432. for (i = 0; i < 32; i++) {
  10433. SP_ASM_ADDC(l, h, z1->dp[i + 32]);
  10434. SP_ASM_ADDC(l, h, a1->dp[i]);
  10435. z1->dp[i + 32] = l;
  10436. l = h;
  10437. h = 0;
  10438. }
  10439. }
  10440. z1->dp[64] += l;
  10441. /* z1 = z1 - z0 - z1 */
  10442. l = 0;
  10443. h = 0;
  10444. for (i = 0; i < 64; i++) {
  10445. l += z1->dp[i];
  10446. SP_ASM_SUBB(l, h, z0->dp[i]);
  10447. SP_ASM_SUBB(l, h, z2->dp[i]);
  10448. z1->dp[i] = l;
  10449. l = h;
  10450. h = 0;
  10451. }
  10452. z1->dp[i] += l;
  10453. /* r += z1 << 16 */
  10454. l = 0;
  10455. h = 0;
  10456. for (i = 0; i < 32; i++) {
  10457. SP_ASM_ADDC(l, h, r->dp[i + 32]);
  10458. SP_ASM_ADDC(l, h, z1->dp[i]);
  10459. r->dp[i + 32] = l;
  10460. l = h;
  10461. h = 0;
  10462. }
  10463. for (; i < 65; i++) {
  10464. SP_ASM_ADDC(l, h, z1->dp[i]);
  10465. r->dp[i + 32] = l;
  10466. l = h;
  10467. h = 0;
  10468. }
  10469. /* r += z2 << 64 */
  10470. l = 0;
  10471. h = 0;
  10472. for (i = 0; i < 33; i++) {
  10473. SP_ASM_ADDC(l, h, r->dp[i + 64]);
  10474. SP_ASM_ADDC(l, h, z2->dp[i]);
  10475. r->dp[i + 64] = l;
  10476. l = h;
  10477. h = 0;
  10478. }
  10479. for (; i < 64; i++) {
  10480. SP_ASM_ADDC(l, h, z2->dp[i]);
  10481. r->dp[i + 64] = l;
  10482. l = h;
  10483. h = 0;
  10484. }
  10485. r->used = 128;
  10486. sp_clamp(r);
  10487. }
  10488. FREE_SP_INT_ARRAY(z, NULL);
  10489. FREE_SP_INT_ARRAY(t, NULL);
  10490. return err;
  10491. }
  10492. #endif /* SP_INT_DIGITS >= 128 */
  10493. #if SP_INT_DIGITS >= 192
  10494. /* Multiply a by b and store in r: r = a * b
  10495. *
  10496. * Karatsuba implementation.
  10497. *
  10498. * @param [in] a SP integer to multiply.
  10499. * @param [in] b SP integer to multiply.
  10500. * @param [out] r SP integer result.
  10501. *
  10502. * @return MP_OKAY on success.
  10503. * @return MP_MEM when dynamic memory allocation fails.
  10504. */
  10505. static int _sp_mul_96(const sp_int* a, const sp_int* b, sp_int* r)
  10506. {
  10507. int err = MP_OKAY;
  10508. int i;
  10509. sp_int_digit l;
  10510. sp_int_digit h;
  10511. sp_int* a1;
  10512. sp_int* b1;
  10513. sp_int* z0;
  10514. sp_int* z1;
  10515. sp_int* z2;
  10516. sp_int_digit ca;
  10517. sp_int_digit cb;
  10518. DECL_SP_INT_ARRAY(t, 48, 2);
  10519. DECL_SP_INT_ARRAY(z, 97, 2);
  10520. ALLOC_SP_INT_ARRAY(t, 48, 2, err, NULL);
  10521. ALLOC_SP_INT_ARRAY(z, 97, 2, err, NULL);
  10522. if (err == MP_OKAY) {
  10523. a1 = t[0];
  10524. b1 = t[1];
  10525. z1 = z[0];
  10526. z2 = z[1];
  10527. z0 = r;
  10528. XMEMCPY(a1->dp, &a->dp[48], sizeof(sp_int_digit) * 48);
  10529. a1->used = 48;
  10530. XMEMCPY(b1->dp, &b->dp[48], sizeof(sp_int_digit) * 48);
  10531. b1->used = 48;
  10532. /* z2 = a1 * b1 */
  10533. err = _sp_mul_48(a1, b1, z2);
  10534. }
  10535. if (err == MP_OKAY) {
  10536. l = a1->dp[0];
  10537. h = 0;
  10538. SP_ASM_ADDC(l, h, a->dp[0]);
  10539. a1->dp[0] = l;
  10540. l = h;
  10541. h = 0;
  10542. for (i = 1; i < 48; i++) {
  10543. SP_ASM_ADDC(l, h, a1->dp[i]);
  10544. SP_ASM_ADDC(l, h, a->dp[i]);
  10545. a1->dp[i] = l;
  10546. l = h;
  10547. h = 0;
  10548. }
  10549. ca = l;
  10550. /* b01 = b0 + b1 */
  10551. l = b1->dp[0];
  10552. h = 0;
  10553. SP_ASM_ADDC(l, h, b->dp[0]);
  10554. b1->dp[0] = l;
  10555. l = h;
  10556. h = 0;
  10557. for (i = 1; i < 48; i++) {
  10558. SP_ASM_ADDC(l, h, b1->dp[i]);
  10559. SP_ASM_ADDC(l, h, b->dp[i]);
  10560. b1->dp[i] = l;
  10561. l = h;
  10562. h = 0;
  10563. }
  10564. cb = l;
  10565. /* z0 = a0 * b0 */
  10566. err = _sp_mul_48(a, b, z0);
  10567. }
  10568. if (err == MP_OKAY) {
  10569. /* z1 = (a0 + a1) * (b0 + b1) */
  10570. err = _sp_mul_48(a1, b1, z1);
  10571. }
  10572. if (err == MP_OKAY) {
  10573. /* r = (z2 << 96) + (z1 - z0 - z2) << 48) + z0 */
  10574. /* r = z0 */
  10575. /* r += (z1 - z0 - z2) << 48 */
  10576. z1->dp[96] = ca & cb;
  10577. l = 0;
  10578. if (ca) {
  10579. h = 0;
  10580. for (i = 0; i < 48; i++) {
  10581. SP_ASM_ADDC(l, h, z1->dp[i + 48]);
  10582. SP_ASM_ADDC(l, h, b1->dp[i]);
  10583. z1->dp[i + 48] = l;
  10584. l = h;
  10585. h = 0;
  10586. }
  10587. }
  10588. z1->dp[96] += l;
  10589. l = 0;
  10590. if (cb) {
  10591. h = 0;
  10592. for (i = 0; i < 48; i++) {
  10593. SP_ASM_ADDC(l, h, z1->dp[i + 48]);
  10594. SP_ASM_ADDC(l, h, a1->dp[i]);
  10595. z1->dp[i + 48] = l;
  10596. l = h;
  10597. h = 0;
  10598. }
  10599. }
  10600. z1->dp[96] += l;
  10601. /* z1 = z1 - z0 - z1 */
  10602. l = 0;
  10603. h = 0;
  10604. for (i = 0; i < 96; i++) {
  10605. l += z1->dp[i];
  10606. SP_ASM_SUBB(l, h, z0->dp[i]);
  10607. SP_ASM_SUBB(l, h, z2->dp[i]);
  10608. z1->dp[i] = l;
  10609. l = h;
  10610. h = 0;
  10611. }
  10612. z1->dp[i] += l;
  10613. /* r += z1 << 16 */
  10614. l = 0;
  10615. h = 0;
  10616. for (i = 0; i < 48; i++) {
  10617. SP_ASM_ADDC(l, h, r->dp[i + 48]);
  10618. SP_ASM_ADDC(l, h, z1->dp[i]);
  10619. r->dp[i + 48] = l;
  10620. l = h;
  10621. h = 0;
  10622. }
  10623. for (; i < 97; i++) {
  10624. SP_ASM_ADDC(l, h, z1->dp[i]);
  10625. r->dp[i + 48] = l;
  10626. l = h;
  10627. h = 0;
  10628. }
  10629. /* r += z2 << 96 */
  10630. l = 0;
  10631. h = 0;
  10632. for (i = 0; i < 49; i++) {
  10633. SP_ASM_ADDC(l, h, r->dp[i + 96]);
  10634. SP_ASM_ADDC(l, h, z2->dp[i]);
  10635. r->dp[i + 96] = l;
  10636. l = h;
  10637. h = 0;
  10638. }
  10639. for (; i < 96; i++) {
  10640. SP_ASM_ADDC(l, h, z2->dp[i]);
  10641. r->dp[i + 96] = l;
  10642. l = h;
  10643. h = 0;
  10644. }
  10645. r->used = 192;
  10646. sp_clamp(r);
  10647. }
  10648. FREE_SP_INT_ARRAY(z, NULL);
  10649. FREE_SP_INT_ARRAY(t, NULL);
  10650. return err;
  10651. }
  10652. #endif /* SP_INT_DIGITS >= 192 */
  10653. #endif /* SQR_MUL_ASM && WOLFSSL_SP_INT_LARGE_COMBA */
  10654. #endif /* !WOLFSSL_SP_SMALL */
  10655. /* Multiply a by b and store in r: r = a * b
  10656. *
  10657. * @param [in] a SP integer to multiply.
  10658. * @param [in] b SP integer to multiply.
  10659. * @param [out] r SP integer result.
  10660. *
  10661. * @return MP_OKAY on success.
  10662. * @return MP_VAL when a, b or is NULL; or the result will be too big for fixed
  10663. * data length.
  10664. * @return MP_MEM when dynamic memory allocation fails.
  10665. */
  10666. int sp_mul(const sp_int* a, const sp_int* b, sp_int* r)
  10667. {
  10668. int err = MP_OKAY;
  10669. #ifdef WOLFSSL_SP_INT_NEGATIVE
  10670. int sign = MP_ZPOS;
  10671. #endif
  10672. if ((a == NULL) || (b == NULL) || (r == NULL)) {
  10673. err = MP_VAL;
  10674. }
  10675. /* Need extra digit during calculation. */
  10676. if ((err == MP_OKAY) && (a->used + b->used > r->size)) {
  10677. err = MP_VAL;
  10678. }
  10679. #if 0
  10680. if (err == MP_OKAY) {
  10681. sp_print(a, "a");
  10682. sp_print(b, "b");
  10683. }
  10684. #endif
  10685. if (err == MP_OKAY) {
  10686. #ifdef WOLFSSL_SP_INT_NEGATIVE
  10687. sign = a->sign ^ b->sign;
  10688. #endif
  10689. if ((a->used == 0) || (b->used == 0)) {
  10690. _sp_zero(r);
  10691. }
  10692. else
  10693. #ifndef WOLFSSL_SP_SMALL
  10694. #if !defined(WOLFSSL_HAVE_SP_ECC) && defined(HAVE_ECC)
  10695. #if SP_WORD_SIZE == 64
  10696. if ((a->used == 4) && (b->used == 4)) {
  10697. err = _sp_mul_4(a, b, r);
  10698. }
  10699. else
  10700. #endif /* SP_WORD_SIZE == 64 */
  10701. #if SP_WORD_SIZE == 64
  10702. #ifdef SQR_MUL_ASM
  10703. if ((a->used == 6) && (b->used == 6)) {
  10704. err = _sp_mul_6(a, b, r);
  10705. }
  10706. else
  10707. #endif /* SQR_MUL_ASM */
  10708. #endif /* SP_WORD_SIZE == 64 */
  10709. #if SP_WORD_SIZE == 32
  10710. #ifdef SQR_MUL_ASM
  10711. if ((a->used == 8) && (b->used == 8)) {
  10712. err = _sp_mul_8(a, b, r);
  10713. }
  10714. else
  10715. #endif /* SQR_MUL_ASM */
  10716. #endif /* SP_WORD_SIZE == 32 */
  10717. #if SP_WORD_SIZE == 32
  10718. #ifdef SQR_MUL_ASM
  10719. if ((a->used == 12) && (b->used == 12)) {
  10720. err = _sp_mul_12(a, b, r);
  10721. }
  10722. else
  10723. #endif /* SQR_MUL_ASM */
  10724. #endif /* SP_WORD_SIZE == 32 */
  10725. #endif /* !WOLFSSL_HAVE_SP_ECC && HAVE_ECC */
  10726. #if defined(SQR_MUL_ASM) && (defined(WOLFSSL_SP_INT_LARGE_COMBA) || \
  10727. (!defined(WOLFSSL_SP_MATH) && defined(WOLFCRYPT_HAVE_SAKKE) && \
  10728. (SP_WORD_SIZE == 64)))
  10729. #if SP_INT_DIGITS >= 32
  10730. if ((a->used == 16) && (b->used == 16)) {
  10731. err = _sp_mul_16(a, b, r);
  10732. }
  10733. else
  10734. #endif /* SP_INT_DIGITS >= 32 */
  10735. #endif /* SQR_MUL_ASM && (WOLFSSL_SP_INT_LARGE_COMBA || !WOLFSSL_SP_MATH &&
  10736. * WOLFCRYPT_HAVE_SAKKE && SP_WORD_SIZE == 64 */
  10737. #if defined(SQR_MUL_ASM) && defined(WOLFSSL_SP_INT_LARGE_COMBA)
  10738. #if SP_INT_DIGITS >= 48
  10739. if ((a->used == 24) && (b->used == 24)) {
  10740. err = _sp_mul_24(a, b, r);
  10741. }
  10742. else
  10743. #endif /* SP_INT_DIGITS >= 48 */
  10744. #if SP_INT_DIGITS >= 64
  10745. if ((a->used == 32) && (b->used == 32)) {
  10746. err = _sp_mul_32(a, b, r);
  10747. }
  10748. else
  10749. #endif /* SP_INT_DIGITS >= 64 */
  10750. #if SP_INT_DIGITS >= 96
  10751. if ((a->used == 48) && (b->used == 48)) {
  10752. err = _sp_mul_48(a, b, r);
  10753. }
  10754. else
  10755. #endif /* SP_INT_DIGITS >= 96 */
  10756. #if SP_INT_DIGITS >= 128
  10757. if ((a->used == 64) && (b->used == 64)) {
  10758. err = _sp_mul_64(a, b, r);
  10759. }
  10760. else
  10761. #endif /* SP_INT_DIGITS >= 128 */
  10762. #if SP_INT_DIGITS >= 192
  10763. if ((a->used == 96) && (b->used == 96)) {
  10764. err = _sp_mul_96(a, b, r);
  10765. }
  10766. else
  10767. #endif /* SP_INT_DIGITS >= 192 */
  10768. #endif /* SQR_MUL_ASM && WOLFSSL_SP_INT_LARGE_COMBA */
  10769. #endif /* !WOLFSSL_SP_SMALL */
  10770. #ifdef SQR_MUL_ASM
  10771. if (a->used == b->used) {
  10772. err = _sp_mul_nxn(a, b, r);
  10773. }
  10774. else
  10775. #endif
  10776. {
  10777. err = _sp_mul(a, b, r);
  10778. }
  10779. }
  10780. #ifdef WOLFSSL_SP_INT_NEGATIVE
  10781. if (err == MP_OKAY) {
  10782. r->sign = (r->used == 0) ? MP_ZPOS : sign;
  10783. }
  10784. #endif
  10785. #if 0
  10786. if (err == MP_OKAY) {
  10787. sp_print(r, "rmul");
  10788. }
  10789. #endif
  10790. return err;
  10791. }
  10792. /* END SP_MUL implementations. */
  10793. #if defined(WOLFSSL_SP_MATH_ALL) || defined(WOLFSSL_HAVE_SP_DH) || \
  10794. defined(WOLFCRYPT_HAVE_ECCSI) || \
  10795. (!defined(NO_RSA) && defined(WOLFSSL_KEY_GEN)) || defined(OPENSSL_ALL)
  10796. /* Multiply a by b mod m and store in r: r = (a * b) mod m
  10797. *
  10798. * @param [in] a SP integer to multiply.
  10799. * @param [in] b SP integer to multiply.
  10800. * @param [in] m SP integer that is the modulus.
  10801. * @param [out] r SP integer result.
  10802. *
  10803. * @return MP_OKAY on success.
  10804. * @return MP_VAL when a, b, m or r is NULL; m is 0; or a * b is too big for
  10805. * fixed data length.
  10806. * @return MP_MEM when dynamic memory allocation fails.
  10807. */
  10808. int sp_mulmod(const sp_int* a, const sp_int* b, const sp_int* m, sp_int* r)
  10809. {
  10810. int err = MP_OKAY;
  10811. /* Validate parameters. */
  10812. if ((a == NULL) || (b == NULL) || (m == NULL) || (r == NULL)) {
  10813. err = MP_VAL;
  10814. }
  10815. /* Ensure result SP int is big enough for intermediates. */
  10816. if ((err == MP_OKAY) && (r != m) && (a->used + b->used > r->size)) {
  10817. err = MP_VAL;
  10818. }
  10819. #if 0
  10820. if (err == 0) {
  10821. sp_print(a, "a");
  10822. sp_print(b, "b");
  10823. sp_print(m, "m");
  10824. }
  10825. #endif
  10826. /* Use r as intermediate result if not same as pointer m which is needed
  10827. * after first intermediate result.
  10828. */
  10829. if ((err == MP_OKAY) && (r != m)) {
  10830. /* Multiply and reduce. */
  10831. err = sp_mul(a, b, r);
  10832. if (err == MP_OKAY) {
  10833. err = sp_mod(r, m, r);
  10834. }
  10835. }
  10836. else if (err == MP_OKAY) {
  10837. /* Create temporary for multiplication result. */
  10838. DECL_SP_INT(t, a->used + b->used);
  10839. ALLOC_SP_INT(t, a->used + b->used, err, NULL);
  10840. if (err == MP_OKAY) {
  10841. err = sp_init_size(t, a->used + b->used);
  10842. }
  10843. /* Multiply and reduce. */
  10844. if (err == MP_OKAY) {
  10845. err = sp_mul(a, b, t);
  10846. }
  10847. if (err == MP_OKAY) {
  10848. err = sp_mod(t, m, r);
  10849. }
  10850. /* Dispose of an allocated SP int. */
  10851. FREE_SP_INT(t, NULL);
  10852. }
  10853. #if 0
  10854. if (err == 0) {
  10855. sp_print(r, "rmm");
  10856. }
  10857. #endif
  10858. return err;
  10859. }
  10860. #endif
  10861. #ifdef WOLFSSL_SP_INVMOD
  10862. /* Calculates the multiplicative inverse in the field. r*a = x*m + 1
  10863. * Right-shift Algorithm. NOT constant time.
  10864. *
  10865. * Algorithm:
  10866. * 1. u = m, v = a, b = 0, c = 1
  10867. * 2. While v != 1 and u != 0
  10868. * 2.1. If u even
  10869. * 2.1.1. u /= 2
  10870. * 2.1.2. b = (b / 2) mod m
  10871. * 2.2. Else if v even
  10872. * 2.2.1. v /= 2
  10873. * 2.2.2. c = (c / 2) mod m
  10874. * 2.3. Else if u >= v
  10875. * 2.3.1. u -= v
  10876. * 2.3.2. b = (c - b) mod m
  10877. * 2.4. Else (v > u)
  10878. * 2.4.1. v -= u
  10879. * 2.4.2. c = (b - c) mod m
  10880. * 3. NO_INVERSE if u == 0
  10881. *
  10882. * @param [in] a SP integer to find inverse of.
  10883. * @param [in] m SP integer this is the modulus.
  10884. * @param [in] u SP integer to use in calculation.
  10885. * @param [in] v SP integer to use in calculation.
  10886. * @param [in] b SP integer to use in calculation
  10887. * @param [out] c SP integer that is the inverse.
  10888. *
  10889. * @return MP_OKAY on success.
  10890. * @return MP_VAL when no inverse.
  10891. */
  10892. static int _sp_invmod(const sp_int* a, const sp_int* m, sp_int* u, sp_int* v,
  10893. sp_int* b, sp_int* c)
  10894. {
  10895. int err = MP_OKAY;
  10896. /* 1. u = m, v = a, b = 0, c = 1 */
  10897. sp_copy(m, u);
  10898. sp_copy(a, v);
  10899. _sp_zero(b);
  10900. sp_set(c, 1);
  10901. /* 2. While v != 1 and u != 0 */
  10902. while (!sp_isone(v) && !sp_iszero(u)) {
  10903. /* 2.1. If u even */
  10904. if ((u->dp[0] & 1) == 0) {
  10905. /* 2.1.1. u /= 2 */
  10906. sp_div_2(u, u);
  10907. /* 2.1.2. b = (b / 2) mod m */
  10908. if (sp_isodd(b)) {
  10909. _sp_add_off(b, m, b, 0);
  10910. }
  10911. sp_div_2(b, b);
  10912. }
  10913. /* 2.2. Else if v even */
  10914. else if ((v->dp[0] & 1) == 0) {
  10915. /* 2.2.1. v /= 2 */
  10916. sp_div_2(v, v);
  10917. /* 2.1.2. c = (c / 2) mod m */
  10918. if (sp_isodd(c)) {
  10919. _sp_add_off(c, m, c, 0);
  10920. }
  10921. sp_div_2(c, c);
  10922. }
  10923. /* 2.3. Else if u >= v */
  10924. else if (_sp_cmp_abs(u, v) != MP_LT) {
  10925. /* 2.3.1. u -= v */
  10926. _sp_sub_off(u, v, u, 0);
  10927. /* 2.3.2. b = (c - b) mod m */
  10928. if (_sp_cmp_abs(b, c) == MP_LT) {
  10929. _sp_add_off(b, m, b, 0);
  10930. }
  10931. _sp_sub_off(b, c, b, 0);
  10932. }
  10933. /* 2.4. Else (v > u) */
  10934. else {
  10935. /* 2.4.1. v -= u */
  10936. _sp_sub_off(v, u, v, 0);
  10937. /* 2.4.2. c = (b - c) mod m */
  10938. if (_sp_cmp_abs(c, b) == MP_LT) {
  10939. _sp_add_off(c, m, c, 0);
  10940. }
  10941. _sp_sub_off(c, b, c, 0);
  10942. }
  10943. }
  10944. /* 3. NO_INVERSE if u == 0 */
  10945. if (sp_iszero(u)) {
  10946. err = MP_VAL;
  10947. }
  10948. return err;
  10949. }
  10950. #if !defined(WOLFSSL_SP_SMALL) && (!defined(NO_RSA) || !defined(NO_DH))
  10951. /* Calculates the multiplicative inverse in the field. r*a = x*m + 1
  10952. * Extended Euclidean Algorithm. NOT constant time.
  10953. *
  10954. * Creates two new SP ints.
  10955. *
  10956. * Algorithm:
  10957. * 1. x = m, y = a, b = 1, c = 0
  10958. * 2. while x > 1
  10959. * 2.1. d = x / y, r = x mod y
  10960. * 2.2. c -= d * b
  10961. * 2.3. x = y, y = r
  10962. * 2.4. s = b, b = c, c = s
  10963. * 3. If y != 0 then NO_INVERSE
  10964. * 4. If c < 0 then c += m
  10965. * 5. inv = c
  10966. *
  10967. * @param [in] a SP integer to find inverse of.
  10968. * @param [in] m SP integer this is the modulus.
  10969. * @param [in] u SP integer to use in calculation.
  10970. * @param [in] v SP integer to use in calculation.
  10971. * @param [in] b SP integer to use in calculation
  10972. * @param [in] c SP integer to use in calculation
  10973. * @param [out] inv SP integer that is the inverse.
  10974. *
  10975. * @return MP_OKAY on success.
  10976. * @return MP_VAL when no inverse.
  10977. * @return MP_MEM when dynamic memory allocation fails.
  10978. */
  10979. static int _sp_invmod_div(const sp_int* a, const sp_int* m, sp_int* x,
  10980. sp_int* y, sp_int* b, sp_int* c, sp_int* inv)
  10981. {
  10982. int err = MP_OKAY;
  10983. sp_int* d = NULL;
  10984. sp_int* r = NULL;
  10985. sp_int* s;
  10986. #ifndef WOLFSSL_SP_INT_NEGATIVE
  10987. int bneg = 0;
  10988. int cneg = 0;
  10989. int neg;
  10990. #endif
  10991. DECL_SP_INT_ARRAY(t, m->used + 1, 2);
  10992. ALLOC_SP_INT_ARRAY(t, m->used + 1, 2, err, NULL);
  10993. if (err == MP_OKAY) {
  10994. d = t[0];
  10995. r = t[1];
  10996. /* 1. x = m, y = a, b = 1, c = 0 */
  10997. sp_copy(a, y);
  10998. sp_copy(m, x);
  10999. sp_set(b, 1);
  11000. _sp_zero(c);
  11001. }
  11002. #ifdef WOLFSSL_SP_INT_NEGATIVE
  11003. /* 2. while x > 1 */
  11004. while ((err == MP_OKAY) && (!sp_isone(x)) && (!sp_iszero(x))) {
  11005. /* 2.1. d = x / y, r = x mod y */
  11006. err = sp_div(x, y, d, r);
  11007. if (err == MP_OKAY) {
  11008. /* 2.2. c -= d * b */
  11009. if (sp_isone(d)) {
  11010. /* c -= 1 * b */
  11011. sp_sub(c, b, c);
  11012. }
  11013. else {
  11014. /* d *= b */
  11015. err = sp_mul(d, b, d);
  11016. /* c -= d */
  11017. if (err == MP_OKAY) {
  11018. sp_sub(c, d, c);
  11019. }
  11020. }
  11021. /* 2.3. x = y, y = r */
  11022. s = x; x = y; y = r; r = s;
  11023. /* 2.4. s = b, b = c, c = s */
  11024. s = b; b = c; c = s;
  11025. }
  11026. }
  11027. /* 3. If y != 0 then NO_INVERSE */
  11028. if ((err == MP_OKAY) && (!sp_iszero(y))) {
  11029. err = MP_VAL;
  11030. }
  11031. if (err == MP_OKAY) {
  11032. /* 4. If c < 0 then c += m */
  11033. if (sp_isneg(c)) {
  11034. sp_add(c, m, c);
  11035. }
  11036. /* 5. inv = c */
  11037. sp_copy(c, inv);
  11038. }
  11039. #else
  11040. /* 2. while x > 1 */
  11041. while ((err == MP_OKAY) && (!sp_isone(x)) && (!sp_iszero(x))) {
  11042. /* 2.1. d = x / y, r = x mod y */
  11043. err = sp_div(x, y, d, r);
  11044. if (err == MP_OKAY) {
  11045. if (sp_isone(d)) {
  11046. /* c -= 1 * b */
  11047. if ((bneg ^ cneg) == 1) {
  11048. /* c -= -b or -c -= b, therefore add. */
  11049. _sp_add_off(c, b, c, 0);
  11050. }
  11051. else if (_sp_cmp_abs(c, b) == MP_LT) {
  11052. /* |c| < |b| and same sign, reverse subtract and negate. */
  11053. _sp_sub_off(b, c, c, 0);
  11054. cneg = !cneg;
  11055. }
  11056. else {
  11057. /* |c| >= |b| */
  11058. _sp_sub_off(c, b, c, 0);
  11059. }
  11060. }
  11061. else {
  11062. /* d *= b */
  11063. err = sp_mul(d, b, d);
  11064. /* c -= d */
  11065. if (err == MP_OKAY) {
  11066. if ((bneg ^ cneg) == 1) {
  11067. /* c -= -d or -c -= d, therefore add. */
  11068. _sp_add_off(c, d, c, 0);
  11069. }
  11070. else if (_sp_cmp_abs(c, d) == MP_LT) {
  11071. /* |c| < |d| and same sign, reverse subtract and negate.
  11072. */
  11073. _sp_sub_off(d, c, c, 0);
  11074. cneg = !cneg;
  11075. }
  11076. else {
  11077. _sp_sub_off(c, d, c, 0);
  11078. }
  11079. }
  11080. }
  11081. /* 2.3. x = y, y = r */
  11082. s = x; x = y; y = r; r = s;
  11083. /* 2.4. s = b, b = c, c = s */
  11084. s = b; b = c; c = s;
  11085. neg = bneg; bneg = cneg; cneg = neg;
  11086. }
  11087. }
  11088. /* 3. If y != 0 then NO_INVERSE */
  11089. if ((err == MP_OKAY) && (!sp_iszero(y))) {
  11090. err = MP_VAL;
  11091. }
  11092. if (err == MP_OKAY) {
  11093. /* 4. If c < 0 then c += m */
  11094. if (cneg) {
  11095. sp_sub(m, c, c);
  11096. }
  11097. /* 5. inv = c */
  11098. sp_copy(c, inv);
  11099. }
  11100. #endif
  11101. FREE_SP_INT_ARRAY(t, NULL);
  11102. return err;
  11103. }
  11104. #endif
  11105. /* Calculates the multiplicative inverse in the field.
  11106. * Right-shift Algorithm or Extended Euclidean Algorithm. NOT constant time.
  11107. *
  11108. * r*a = x*m + 1
  11109. *
  11110. * @param [in] a SP integer to find inverse of.
  11111. * @param [in] m SP integer this is the modulus.
  11112. * @param [out] r SP integer to hold result. r cannot be m.
  11113. *
  11114. * @return MP_OKAY on success.
  11115. * @return MP_VAL when a, m or r is NULL; a or m is zero; a and m are even or
  11116. * m is negative.
  11117. * @return MP_MEM when dynamic memory allocation fails.
  11118. */
  11119. int sp_invmod(const sp_int* a, const sp_int* m, sp_int* r)
  11120. {
  11121. int err = MP_OKAY;
  11122. sp_int* u = NULL;
  11123. sp_int* v = NULL;
  11124. sp_int* b = NULL;
  11125. DECL_SP_INT_ARRAY(t, (m == NULL) ? 1 : (m->used + 1), 3);
  11126. DECL_SP_INT(c, (m == NULL) ? 1 : (2 * m->used + 1));
  11127. if ((a == NULL) || (m == NULL) || (r == NULL) || (r == m)) {
  11128. err = MP_VAL;
  11129. }
  11130. if ((err == MP_OKAY) && (m->used * 2 > r->size)) {
  11131. err = MP_VAL;
  11132. }
  11133. #ifdef WOLFSSL_SP_INT_NEGATIVE
  11134. if ((err == MP_OKAY) && (m->sign == MP_NEG)) {
  11135. err = MP_VAL;
  11136. }
  11137. #endif
  11138. /* Allocate SP ints:
  11139. * - x3 one word larger than modulus
  11140. * - x1 one word longer than twice modulus used
  11141. */
  11142. ALLOC_SP_INT_ARRAY(t, m->used + 1, 3, err, NULL);
  11143. ALLOC_SP_INT(c, 2 * m->used + 1, err, NULL);
  11144. if (err == MP_OKAY) {
  11145. u = t[0];
  11146. v = t[1];
  11147. b = t[2];
  11148. /* c allocated separately and larger for even mod case. */
  11149. /* Ensure number is less than modulus. */
  11150. if (_sp_cmp_abs(a, m) != MP_LT) {
  11151. err = sp_mod(a, m, r);
  11152. a = r;
  11153. }
  11154. }
  11155. #ifdef WOLFSSL_SP_INT_NEGATIVE
  11156. if ((err == MP_OKAY) && (a->sign == MP_NEG)) {
  11157. /* Make 'a' positive */
  11158. err = sp_add(m, a, r);
  11159. a = r;
  11160. }
  11161. #endif
  11162. /* 0 != n*m + 1 (+ve m), r*a mod 0 is always 0 (never 1) */
  11163. if ((err == MP_OKAY) && (sp_iszero(a) || sp_iszero(m))) {
  11164. err = MP_VAL;
  11165. }
  11166. /* r*2*x != n*2*y + 1 for integer x,y */
  11167. if ((err == MP_OKAY) && sp_iseven(a) && sp_iseven(m)) {
  11168. err = MP_VAL;
  11169. }
  11170. if (err == MP_OKAY) {
  11171. /* Initialize intermediate values with minimal sizes. */
  11172. err = sp_init_size(u, m->used + 1);
  11173. if (err == MP_OKAY)
  11174. err = sp_init_size(v, m->used + 1);
  11175. if (err == MP_OKAY)
  11176. err = sp_init_size(b, m->used + 1);
  11177. if (err == MP_OKAY)
  11178. err = sp_init_size(c, 2 * m->used + 1);
  11179. }
  11180. /* 1*1 = 0*m + 1 */
  11181. if ((err == MP_OKAY) && sp_isone(a)) {
  11182. sp_set(r, 1);
  11183. }
  11184. else if (err == MP_OKAY) {
  11185. const sp_int* mm = m;
  11186. const sp_int* ma = a;
  11187. int evenMod = 0;
  11188. if (sp_iseven(m)) {
  11189. /* a^-1 mod m = m + ((1 - m*(m^-1 % a)) / a) */
  11190. mm = a;
  11191. ma = v;
  11192. sp_copy(a, u);
  11193. sp_mod(m, a, v);
  11194. /* v == 0 when a divides m evenly - no inverse. */
  11195. if (sp_iszero(v)) {
  11196. err = MP_VAL;
  11197. }
  11198. evenMod = 1;
  11199. }
  11200. if (err == MP_OKAY) {
  11201. /* Calculate inverse. */
  11202. #if !defined(WOLFSSL_SP_SMALL) && (!defined(NO_RSA) || !defined(NO_DH))
  11203. if (sp_count_bits(mm) >= 1024) {
  11204. err = _sp_invmod_div(ma, mm, u, v, b, c, c);
  11205. }
  11206. else
  11207. #endif
  11208. {
  11209. err = _sp_invmod(ma, mm, u, v, b, c);
  11210. }
  11211. }
  11212. /* Fixup for even modulus. */
  11213. if ((err == MP_OKAY) && evenMod) {
  11214. /* Finish operation.
  11215. * a^-1 mod m = m + ((1 - m*c) / a)
  11216. * => a^-1 mod m = m - ((m*c - 1) / a)
  11217. */
  11218. err = sp_mul(c, m, c);
  11219. if (err == MP_OKAY) {
  11220. _sp_sub_d(c, 1, c);
  11221. err = sp_div(c, a, c, NULL);
  11222. }
  11223. if (err == MP_OKAY) {
  11224. sp_sub(m, c, r);
  11225. }
  11226. }
  11227. else if (err == MP_OKAY) {
  11228. err = sp_copy(c, r);
  11229. }
  11230. }
  11231. FREE_SP_INT(c, NULL);
  11232. FREE_SP_INT_ARRAY(t, NULL);
  11233. return err;
  11234. }
  11235. #endif /* WOLFSSL_SP_INVMOD */
  11236. #ifdef WOLFSSL_SP_INVMOD_MONT_CT
  11237. /* Number of entries to pre-compute.
  11238. * Many pre-defined primes have multiple of 8 consecutive 1s.
  11239. * P-256 modulus - 2 => 32x1, 31x0, 1x1, 96x0, 94x1, 1x0, 1x1.
  11240. */
  11241. #define CT_INV_MOD_PRE_CNT 8
  11242. /* Calculates the multiplicative inverse in the field - constant time.
  11243. *
  11244. * Modulus (m) must be a prime and greater than 2.
  11245. * For prime m, inv = a ^ (m-2) mod m as 1 = a ^ (m-1) mod m.
  11246. *
  11247. * Algorithm:
  11248. * pre = pre-computed values, m = modulus, a = value to find inverse of,
  11249. * e = exponent
  11250. * Pre-calc:
  11251. * 1. pre[0] = 2^0 * a mod m
  11252. * 2. For i in 2..CT_INV_MOD_PRE_CNT
  11253. * 2.1. pre[i-1] = ((pre[i-2] ^ 2) * a) mod m
  11254. * Calc inverse:
  11255. * 1. e = m - 2
  11256. * 2. j = Count leading 1's up to CT_INV_MOD_PRE_CNT
  11257. * 3. t = pre[j-1]
  11258. * 4. s = 0
  11259. * 5. j = 0
  11260. * 6. For i index of next top bit..0
  11261. * 6.1. bit = e[i]
  11262. * 6.2. j += bit
  11263. * 6.3. s += 1
  11264. * 6.4. if j == CT_INV_MOD_PRE_CNT or (bit == 0 and j > 0)
  11265. * 6.4.1. s -= 1 - bit
  11266. * 6.4.2. For s downto 1
  11267. * 6.4.2.1. t = (t ^ 2) mod m
  11268. * 6.4.3. s = 1 - bit
  11269. * 6.4.4. t = (t * pre[j-1]) mod m
  11270. * 6.4.5. j = 0
  11271. * 7. For s downto 1
  11272. * 7.1. t = (t ^ 2) mod m
  11273. * 8. If j > 0 then r = (t * pre[j-1]) mod m
  11274. * 9. Else r = t
  11275. *
  11276. * @param [in] a SP integer, Montgomery form, to find inverse of.
  11277. * @param [in] m SP integer this is the modulus.
  11278. * @param [out] r SP integer to hold result.
  11279. * @param [in] mp SP integer digit that is the bottom digit of inv(-m).
  11280. *
  11281. * @return MP_OKAY on success.
  11282. * @return MP_VAL when a, m or r is NULL; a is 0 or m is less than 3.
  11283. * @return MP_MEM when dynamic memory allocation fails.
  11284. */
  11285. int sp_invmod_mont_ct(const sp_int* a, const sp_int* m, sp_int* r,
  11286. sp_int_digit mp)
  11287. {
  11288. int err = MP_OKAY;
  11289. int i;
  11290. int j = 0;
  11291. int s = 0;
  11292. sp_int* t = NULL;
  11293. sp_int* e = NULL;
  11294. DECL_SP_INT_ARRAY(pre, (m == NULL) ? 1 : m->used * 2 + 1,
  11295. CT_INV_MOD_PRE_CNT + 2);
  11296. /* Validate parameters. */
  11297. if ((a == NULL) || (m == NULL) || (r == NULL)) {
  11298. err = MP_VAL;
  11299. }
  11300. /* 0 != n*m + 1 (+ve m), r*a mod 0 is always 0 (never 1) */
  11301. if ((err == MP_OKAY) && (sp_iszero(a) || sp_iszero(m) ||
  11302. ((m->used == 1) && (m->dp[0] < 3)))) {
  11303. err = MP_VAL;
  11304. }
  11305. ALLOC_SP_INT_ARRAY(pre, m->used * 2 + 1, CT_INV_MOD_PRE_CNT + 2, err, NULL);
  11306. if (err == MP_OKAY) {
  11307. t = pre[CT_INV_MOD_PRE_CNT + 0];
  11308. e = pre[CT_INV_MOD_PRE_CNT + 1];
  11309. /* Space for sqr and mul result. */
  11310. _sp_init_size(t, m->used * 2 + 1);
  11311. /* e = mod - 2 */
  11312. _sp_init_size(e, m->used + 1);
  11313. /* Create pre-computation results: ((2^(1..8))-1).a. */
  11314. _sp_init_size(pre[0], m->used * 2 + 1);
  11315. /* 1. pre[0] = 2^0 * a mod m
  11316. * Start with 1.a = a.
  11317. */
  11318. err = sp_copy(a, pre[0]);
  11319. /* 2. For i in 2..CT_INV_MOD_PRE_CNT
  11320. * For rest of entries in table.
  11321. */
  11322. for (i = 1; (err == MP_OKAY) && (i < CT_INV_MOD_PRE_CNT); i++) {
  11323. /* 2.1 pre[i-1] = ((pre[i-1] ^ 2) * a) mod m */
  11324. /* Previous value ..1 -> ..10 */
  11325. _sp_init_size(pre[i], m->used * 2 + 1);
  11326. err = sp_sqr(pre[i-1], pre[i]);
  11327. if (err == MP_OKAY) {
  11328. err = _sp_mont_red(pre[i], m, mp);
  11329. }
  11330. /* ..10 -> ..11 */
  11331. if (err == MP_OKAY) {
  11332. err = sp_mul(pre[i], a, pre[i]);
  11333. }
  11334. if (err == MP_OKAY) {
  11335. err = _sp_mont_red(pre[i], m, mp);
  11336. }
  11337. }
  11338. }
  11339. if (err == MP_OKAY) {
  11340. /* 1. e = m - 2 */
  11341. _sp_sub_d(m, 2, e);
  11342. /* 2. j = Count leading 1's up to CT_INV_MOD_PRE_CNT
  11343. * One or more of the top bits is 1 so count.
  11344. */
  11345. for (i = sp_count_bits(e)-2, j = 1; i >= 0; i--, j++) {
  11346. if ((!sp_is_bit_set(e, i)) || (j == CT_INV_MOD_PRE_CNT)) {
  11347. break;
  11348. }
  11349. }
  11350. /* 3. Set tmp to product of leading bits. */
  11351. err = sp_copy(pre[j-1], t);
  11352. /* 4. s = 0 */
  11353. s = 0;
  11354. /* 5. j = 0 */
  11355. j = 0;
  11356. /* 6. For i index of next top bit..0
  11357. * Do remaining bits in exponent.
  11358. */
  11359. for (; (err == MP_OKAY) && (i >= 0); i--) {
  11360. /* 6.1. bit = e[i] */
  11361. int bit = sp_is_bit_set(e, i);
  11362. /* 6.2. j += bit
  11363. * Update count of consequitive 1 bits.
  11364. */
  11365. j += bit;
  11366. /* 6.3. s += 1
  11367. * Update count of squares required.
  11368. */
  11369. s++;
  11370. /* 6.4. if j == CT_INV_MOD_PRE_CNT or (bit == 0 and j > 0)
  11371. * Check if max 1 bits or 0 and have seen at least one 1 bit.
  11372. */
  11373. if ((j == CT_INV_MOD_PRE_CNT) || ((!bit) && (j > 0))) {
  11374. /* 6.4.1. s -= 1 - bit */
  11375. bit = 1 - bit;
  11376. s -= bit;
  11377. /* 6.4.2. For s downto 1
  11378. * Do s squares.
  11379. */
  11380. for (; (err == MP_OKAY) && (s > 0); s--) {
  11381. /* 6.4.2.1. t = (t ^ 2) mod m */
  11382. err = sp_sqr(t, t);
  11383. if (err == MP_OKAY) {
  11384. err = _sp_mont_red(t, m, mp);
  11385. }
  11386. }
  11387. /* 6.4.3. s = 1 - bit */
  11388. s = bit;
  11389. /* 6.4.4. t = (t * pre[j-1]) mod m */
  11390. err = sp_mul(t, pre[j-1], t);
  11391. if (err == MP_OKAY) {
  11392. err = _sp_mont_red(t, m, mp);
  11393. }
  11394. /* 6.4.5. j = 0
  11395. * Reset number of 1 bits seen.
  11396. */
  11397. j = 0;
  11398. }
  11399. }
  11400. }
  11401. if (err == MP_OKAY) {
  11402. /* 7. For s downto 1
  11403. * Do s squares - total remaining. */
  11404. for (; (err == MP_OKAY) && (s > 0); s--) {
  11405. /* 7.1. t = (t ^ 2) mod m */
  11406. err = sp_sqr(t, t);
  11407. if (err == MP_OKAY) {
  11408. err = _sp_mont_red(t, m, mp);
  11409. }
  11410. }
  11411. /* 8. If j > 0 then r = (t * pre[j-1]) mod m */
  11412. if (j > 0) {
  11413. err = sp_mul(t, pre[j-1], r);
  11414. if (err == MP_OKAY) {
  11415. err = _sp_mont_red(r, m, mp);
  11416. }
  11417. }
  11418. /* 9. Else r = t */
  11419. else {
  11420. err = sp_copy(t, r);
  11421. }
  11422. }
  11423. FREE_SP_INT_ARRAY(pre, NULL);
  11424. return err;
  11425. }
  11426. #endif /* WOLFSSL_SP_INVMOD_MONT_CT */
  11427. /**************************
  11428. * Exponentiation functions
  11429. **************************/
  11430. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY) && \
  11431. !defined(WOLFSSL_RSA_PUBLIC_ONLY)) || !defined(NO_DH) || \
  11432. defined(OPENSSL_ALL)
  11433. /* Internal. Exponentiates b to the power of e modulo m into r: r = b ^ e mod m
  11434. * Process the exponent one bit at a time.
  11435. * Is constant time and can be cache attack resistant.
  11436. *
  11437. * Algorithm:
  11438. * b: base, e: exponent, m: modulus, r: result, bits: #bits to use
  11439. * 1. s = 0
  11440. * 2. t[0] = b mod m.
  11441. * 3. t[1] = t[0]
  11442. * 4. For i in (bits-1)...0
  11443. * 4.1. t[s] = t[s] ^ 2
  11444. * 4.2. y = e[i]
  11445. * 4.3 j = y & s
  11446. * 4.4 s = s | y
  11447. * 4.5. t[j] = t[j] * b
  11448. * 5. r = t[1]
  11449. *
  11450. * @param [in] b SP integer that is the base.
  11451. * @param [in] e SP integer that is the exponent.
  11452. * @param [in] bits Number of bits in exponent to use. May be greater than
  11453. * count of bits in e.
  11454. * @param [in] m SP integer that is the modulus.
  11455. * @param [out] r SP integer to hold result.
  11456. *
  11457. * @return MP_OKAY on success.
  11458. * @return MP_MEM when dynamic memory allocation fails.
  11459. */
  11460. static int _sp_exptmod_ex(const sp_int* b, const sp_int* e, int bits,
  11461. const sp_int* m, sp_int* r)
  11462. {
  11463. int i;
  11464. int err = MP_OKAY;
  11465. int done = 0;
  11466. /* 1. s = 0 */
  11467. int s = 0;
  11468. #ifdef WC_NO_CACHE_RESISTANT
  11469. DECL_SP_INT_ARRAY(t, 2 * m->used + 1, 2);
  11470. #else
  11471. DECL_SP_INT_ARRAY(t, 2 * m->used + 1, 3);
  11472. #endif
  11473. /* Allocate temporaries. */
  11474. #ifdef WC_NO_CACHE_RESISTANT
  11475. ALLOC_SP_INT_ARRAY(t, 2 * m->used + 1, 2, err, NULL);
  11476. #else
  11477. /* Working SP int needed when cache resistant. */
  11478. ALLOC_SP_INT_ARRAY(t, 2 * m->used + 1, 3, err, NULL);
  11479. #endif
  11480. if (err == MP_OKAY) {
  11481. /* Initialize temporaries. */
  11482. _sp_init_size(t[0], 2 * m->used + 1);
  11483. _sp_init_size(t[1], 2 * m->used + 1);
  11484. #ifndef WC_NO_CACHE_RESISTANT
  11485. _sp_init_size(t[2], 2 * m->used + 1);
  11486. #endif
  11487. /* 2. t[0] = b mod m
  11488. * Ensure base is less than modulus - set fake working value to base.
  11489. */
  11490. if (_sp_cmp_abs(b, m) != MP_LT) {
  11491. err = sp_mod(b, m, t[0]);
  11492. /* Handle base == modulus. */
  11493. if ((err == MP_OKAY) && sp_iszero(t[0])) {
  11494. sp_set(r, 0);
  11495. done = 1;
  11496. }
  11497. }
  11498. else {
  11499. /* Copy base into working variable. */
  11500. err = sp_copy(b, t[0]);
  11501. }
  11502. }
  11503. if ((!done) && (err == MP_OKAY)) {
  11504. /* 3. t[1] = t[0]
  11505. * Set real working value to base.
  11506. */
  11507. err = sp_copy(t[0], t[1]);
  11508. /* 4. For i in (bits-1)...0 */
  11509. for (i = bits - 1; (err == MP_OKAY) && (i >= 0); i--) {
  11510. #ifdef WC_NO_CACHE_RESISTANT
  11511. /* 4.1. t[s] = t[s] ^ 2 */
  11512. err = sp_sqrmod(t[s], m, t[s]);
  11513. if (err == MP_OKAY) {
  11514. /* 4.2. y = e[i] */
  11515. int y = (e->dp[i >> SP_WORD_SHIFT] >> (i & SP_WORD_MASK)) & 1;
  11516. /* 4.3. j = y & s */
  11517. int j = y & s;
  11518. /* 4.4 s = s | y */
  11519. s |= y;
  11520. /* 4.5. t[j] = t[j] * b */
  11521. err = sp_mulmod(t[j], b, m, t[j]);
  11522. }
  11523. #else
  11524. /* 4.1. t[s] = t[s] ^ 2 */
  11525. sp_copy((sp_int*)(((size_t)t[0] & sp_off_on_addr[s^1]) +
  11526. ((size_t)t[1] & sp_off_on_addr[s ])),
  11527. t[2]);
  11528. err = sp_sqrmod(t[2], m, t[2]);
  11529. sp_copy(t[2],
  11530. (sp_int*)(((size_t)t[0] & sp_off_on_addr[s^1]) +
  11531. ((size_t)t[1] & sp_off_on_addr[s ])));
  11532. if (err == MP_OKAY) {
  11533. /* 4.2. y = e[i] */
  11534. int y = (e->dp[i >> SP_WORD_SHIFT] >> (i & SP_WORD_MASK)) & 1;
  11535. /* 4.3. j = y & s */
  11536. int j = y & s;
  11537. /* 4.4 s = s | y */
  11538. s |= y;
  11539. /* 4.5. t[j] = t[j] * b */
  11540. sp_copy((sp_int*)(((size_t)t[0] & sp_off_on_addr[j^1]) +
  11541. ((size_t)t[1] & sp_off_on_addr[j ])),
  11542. t[2]);
  11543. err = sp_mulmod(t[2], b, m, t[2]);
  11544. sp_copy(t[2],
  11545. (sp_int*)(((size_t)t[0] & sp_off_on_addr[j^1]) +
  11546. ((size_t)t[1] & sp_off_on_addr[j ])));
  11547. }
  11548. #endif
  11549. }
  11550. }
  11551. if ((!done) && (err == MP_OKAY)) {
  11552. /* 5. r = t[1] */
  11553. err = sp_copy(t[1], r);
  11554. }
  11555. FREE_SP_INT_ARRAY(t, NULL);
  11556. return err;
  11557. }
  11558. #endif
  11559. #if (defined(WOLFSSL_SP_MATH_ALL) && ((!defined(WOLFSSL_RSA_VERIFY_ONLY) && \
  11560. !defined(WOLFSSL_RSA_PUBLIC_ONLY)) || !defined(NO_DH))) || \
  11561. defined(OPENSSL_ALL)
  11562. #ifndef WC_NO_HARDEN
  11563. #if !defined(WC_NO_CACHE_RESISTANT)
  11564. /* Internal. Exponentiates b to the power of e modulo m into r: r = b ^ e mod m
  11565. * Process the exponent one bit at a time with base in Montgomery form.
  11566. * Is constant time and cache attack resistant.
  11567. *
  11568. * Algorithm:
  11569. * b: base, e: exponent, m: modulus, r: result, bits: #bits to use
  11570. * 1. t[0] = b mod m.
  11571. * 2. s = 0
  11572. * 3. t[0] = ToMont(t[0])
  11573. * 4. t[1] = t[0]
  11574. * 5. bm = t[0]
  11575. * 6. For i in (bits-1)...0
  11576. * 6.1. t[s] = t[s] ^ 2
  11577. * 6.2. y = e[i]
  11578. * 6.3 j = y & s
  11579. * 6.4 s = s | y
  11580. * 6.5. t[j] = t[j] * bm
  11581. * 7. t[1] = FromMont(t[1])
  11582. * 8. r = t[1]
  11583. *
  11584. * @param [in] b SP integer that is the base.
  11585. * @param [in] e SP integer that is the exponent.
  11586. * @param [in] bits Number of bits in exponent to use. May be greater than
  11587. * count of bits in e.
  11588. * @param [in] m SP integer that is the modulus.
  11589. * @param [out] r SP integer to hold result.
  11590. *
  11591. * @return MP_OKAY on success.
  11592. * @return MP_MEM when dynamic memory allocation fails.
  11593. */
  11594. static int _sp_exptmod_mont_ex(const sp_int* b, const sp_int* e, int bits,
  11595. const sp_int* m, sp_int* r)
  11596. {
  11597. int err = MP_OKAY;
  11598. int done = 0;
  11599. DECL_SP_INT_ARRAY(t, m->used * 2 + 1, 4);
  11600. /* Allocate temporaries. */
  11601. ALLOC_SP_INT_ARRAY(t, m->used * 2 + 1, 4, err, NULL);
  11602. if (err == MP_OKAY) {
  11603. /* Initialize temporaries. */
  11604. _sp_init_size(t[0], m->used * 2 + 1);
  11605. _sp_init_size(t[1], m->used * 2 + 1);
  11606. _sp_init_size(t[2], m->used * 2 + 1);
  11607. _sp_init_size(t[3], m->used * 2 + 1);
  11608. /* 1. Ensure base is less than modulus. */
  11609. if (_sp_cmp_abs(b, m) != MP_LT) {
  11610. err = sp_mod(b, m, t[0]);
  11611. /* Handle base == modulus. */
  11612. if ((err == MP_OKAY) && sp_iszero(t[0])) {
  11613. sp_set(r, 0);
  11614. done = 1;
  11615. }
  11616. }
  11617. else {
  11618. /* Copy base into working variable. */
  11619. err = sp_copy(b, t[0]);
  11620. }
  11621. }
  11622. if ((!done) && (err == MP_OKAY)) {
  11623. int i;
  11624. /* 2. s = 0 */
  11625. int s = 0;
  11626. sp_int_digit mp;
  11627. /* Calculate Montgomery multiplier for reduction. */
  11628. _sp_mont_setup(m, &mp);
  11629. /* 3. t[0] = ToMont(t[0])
  11630. * Convert base to Montgomery form - as fake working value.
  11631. */
  11632. err = sp_mont_norm(t[1], m);
  11633. if (err == MP_OKAY) {
  11634. err = sp_mulmod(t[0], t[1], m, t[0]);
  11635. }
  11636. if (err == MP_OKAY) {
  11637. /* 4. t[1] = t[0]
  11638. * Set real working value to base.
  11639. */
  11640. sp_copy(t[0], t[1]);
  11641. /* 5. bm = t[0]. */
  11642. sp_copy(t[0], t[2]);
  11643. }
  11644. /* 6. For i in (bits-1)...0 */
  11645. for (i = bits - 1; (err == MP_OKAY) && (i >= 0); i--) {
  11646. /* 6.1. t[s] = t[s] ^ 2 */
  11647. sp_copy((sp_int*)(((size_t)t[0] & sp_off_on_addr[s^1]) +
  11648. ((size_t)t[1] & sp_off_on_addr[s ])),
  11649. t[3]);
  11650. err = sp_sqr(t[3], t[3]);
  11651. if (err == MP_OKAY) {
  11652. err = _sp_mont_red(t[3], m, mp);
  11653. }
  11654. sp_copy(t[3],
  11655. (sp_int*)(((size_t)t[0] & sp_off_on_addr[s^1]) +
  11656. ((size_t)t[1] & sp_off_on_addr[s ])));
  11657. if (err == MP_OKAY) {
  11658. /* 6.2. y = e[i] */
  11659. int y = (e->dp[i >> SP_WORD_SHIFT] >> (i & SP_WORD_MASK)) & 1;
  11660. /* 6.3 j = y & s */
  11661. int j = y & s;
  11662. /* 6.4 s = s | y */
  11663. s |= y;
  11664. /* 6.5. t[j] = t[j] * bm */
  11665. sp_copy((sp_int*)(((size_t)t[0] & sp_off_on_addr[j^1]) +
  11666. ((size_t)t[1] & sp_off_on_addr[j ])),
  11667. t[3]);
  11668. err = sp_mul(t[3], t[2], t[3]);
  11669. if (err == MP_OKAY) {
  11670. err = _sp_mont_red(t[3], m, mp);
  11671. }
  11672. sp_copy(t[3],
  11673. (sp_int*)(((size_t)t[0] & sp_off_on_addr[j^1]) +
  11674. ((size_t)t[1] & sp_off_on_addr[j ])));
  11675. }
  11676. }
  11677. if (err == MP_OKAY) {
  11678. /* 7. t[1] = FromMont(t[1]) */
  11679. err = _sp_mont_red(t[1], m, mp);
  11680. /* Reduction implementation returns number to range: 0..m-1. */
  11681. }
  11682. }
  11683. if ((!done) && (err == MP_OKAY)) {
  11684. /* 8. r = t[1] */
  11685. err = sp_copy(t[1], r);
  11686. }
  11687. FREE_SP_INT_ARRAY(t, NULL);
  11688. return err;
  11689. }
  11690. #else
  11691. #ifdef SP_ALLOC
  11692. #define SP_ALLOC_PREDEFINED
  11693. #endif
  11694. /* Always allocate large array of sp_ints unless defined WOLFSSL_SP_NO_MALLOC */
  11695. #define SP_ALLOC
  11696. /* Internal. Exponentiates b to the power of e modulo m into r: r = b ^ e mod m
  11697. * Creates a window of precalculated exponents with base in Montgomery form.
  11698. * Is constant time but NOT cache attack resistant.
  11699. *
  11700. * Algorithm:
  11701. * b: base, e: exponent, m: modulus, r: result, bits: #bits to use
  11702. * w: window size based on bits.
  11703. * 1. t[1] = b mod m.
  11704. * 2. t[0] = MontNorm(m) = ToMont(1)
  11705. * 3. t[1] = ToMont(t[1])
  11706. * 4. For i in 2..(2 ^ w) - 1
  11707. * 4.1 if i[0] == 0 then t[i] = t[i/2] ^ 2
  11708. * 4.2 if i[0] == 1 then t[i] = t[i-1] * t[1]
  11709. * 5. cb = w * (bits / w)
  11710. * 5. tr = t[e / (2 ^ cb)]
  11711. * 6. For i in cb..w
  11712. * 6.1. y = e[(i-1)..(i-w)]
  11713. * 6.2. tr = tr ^ (2 * w)
  11714. * 6.3. tr = tr * t[y]
  11715. * 7. tr = FromMont(tr)
  11716. * 8. r = tr
  11717. *
  11718. * @param [in] b SP integer that is the base.
  11719. * @param [in] e SP integer that is the exponent.
  11720. * @param [in] bits Number of bits in exponent to use. May be greater than
  11721. * count of bits in e.
  11722. * @param [in] m SP integer that is the modulus.
  11723. * @param [out] r SP integer to hold result.
  11724. *
  11725. * @return MP_OKAY on success.
  11726. * @return MP_MEM when dynamic memory allocation fails.
  11727. */
  11728. static int _sp_exptmod_mont_ex(const sp_int* b, const sp_int* e, int bits,
  11729. const sp_int* m, sp_int* r)
  11730. {
  11731. int i;
  11732. int c;
  11733. int y;
  11734. int winBits;
  11735. int preCnt;
  11736. int err = MP_OKAY;
  11737. int done = 0;
  11738. sp_int_digit mask;
  11739. sp_int* tr = NULL;
  11740. DECL_SP_INT_ARRAY(t, m->used * 2 + 1, (1 << 6) + 1);
  11741. /* Window bits based on number of pre-calculations versus number of loop
  11742. * calculcations.
  11743. * Exponents for RSA and DH will result in 6-bit windows.
  11744. */
  11745. if (bits > 450) {
  11746. winBits = 6;
  11747. }
  11748. else if (bits <= 21) {
  11749. winBits = 1;
  11750. }
  11751. else if (bits <= 36) {
  11752. winBits = 3;
  11753. }
  11754. else if (bits <= 140) {
  11755. winBits = 4;
  11756. }
  11757. else {
  11758. winBits = 5;
  11759. }
  11760. /* An entry for each possible 0..2^winBits-1 value. */
  11761. preCnt = 1 << winBits;
  11762. /* Mask for calculating index into pre-computed table. */
  11763. mask = preCnt - 1;
  11764. /* Allocate sp_ints for:
  11765. * - pre-computation table
  11766. * - temporary result
  11767. */
  11768. ALLOC_SP_INT_ARRAY(t, m->used * 2 + 1, preCnt + 1, err, NULL);
  11769. if (err == MP_OKAY) {
  11770. /* Set variable to use allocate memory. */
  11771. tr = t[preCnt];
  11772. /* Initialize all allocated. */
  11773. for (i = 0; i < preCnt; i++) {
  11774. _sp_init_size(t[i], m->used * 2 + 1);
  11775. }
  11776. _sp_init_size(tr, m->used * 2 + 1);
  11777. /* 1. t[1] = b mod m. */
  11778. if (_sp_cmp_abs(b, m) != MP_LT) {
  11779. err = sp_mod(b, m, t[1]);
  11780. /* Handle base == modulus. */
  11781. if ((err == MP_OKAY) && sp_iszero(t[1])) {
  11782. sp_set(r, 0);
  11783. done = 1;
  11784. }
  11785. }
  11786. else {
  11787. /* Copy base into entry of table to contain b^1. */
  11788. err = sp_copy(b, t[1]);
  11789. }
  11790. }
  11791. if ((!done) && (err == MP_OKAY)) {
  11792. sp_int_digit mp;
  11793. sp_int_digit n;
  11794. /* Calculate Montgomery multiplier for reduction. */
  11795. _sp_mont_setup(m, &mp);
  11796. /* 2. t[0] = MontNorm(m) = ToMont(1) */
  11797. err = sp_mont_norm(t[0], m);
  11798. if (err == MP_OKAY) {
  11799. /* 3. t[1] = ToMont(t[1]) */
  11800. err = sp_mulmod(t[1], t[0], m, t[1]);
  11801. }
  11802. /* 4. For i in 2..(2 ^ w) - 1 */
  11803. for (i = 2; (i < preCnt) && (err == MP_OKAY); i++) {
  11804. /* 4.1 if i[0] == 0 then t[i] = t[i/2] ^ 2 */
  11805. if ((i & 1) == 0) {
  11806. err = sp_sqr(t[i/2], t[i]);
  11807. }
  11808. /* 4.2 if i[0] == 1 then t[i] = t[i-1] * t[1] */
  11809. else {
  11810. err = sp_mul(t[i-1], t[1], t[i]);
  11811. }
  11812. /* Montgomery reduce square or multiplication result. */
  11813. if (err == MP_OKAY) {
  11814. err = _sp_mont_red(t[i], m, mp);
  11815. }
  11816. }
  11817. if (err == MP_OKAY) {
  11818. /* 5. cb = w * (bits / w) */
  11819. i = (bits - 1) >> SP_WORD_SHIFT;
  11820. n = e->dp[i--];
  11821. /* Find top bit index in last word. */
  11822. c = bits & (SP_WORD_SIZE - 1);
  11823. if (c == 0) {
  11824. c = SP_WORD_SIZE;
  11825. }
  11826. /* Use as many bits from top to make remaining a multiple of window
  11827. * size.
  11828. */
  11829. if ((bits % winBits) != 0) {
  11830. c -= bits % winBits;
  11831. }
  11832. else {
  11833. c -= winBits;
  11834. }
  11835. /* 5. tr = t[e / (2 ^ cb)] */
  11836. y = (int)(n >> c);
  11837. n <<= SP_WORD_SIZE - c;
  11838. /* 5. Copy table value for first window. */
  11839. sp_copy(t[y], tr);
  11840. /* 6. For i in cb..w */
  11841. for (; (i >= 0) || (c >= winBits); ) {
  11842. int j;
  11843. /* 6.1. y = e[(i-1)..(i-w)] */
  11844. if (c == 0) {
  11845. /* Bits up to end of digit */
  11846. n = e->dp[i--];
  11847. y = (int)(n >> (SP_WORD_SIZE - winBits));
  11848. n <<= winBits;
  11849. c = SP_WORD_SIZE - winBits;
  11850. }
  11851. else if (c < winBits) {
  11852. /* Bits to end of digit and part of next */
  11853. y = (int)(n >> (SP_WORD_SIZE - winBits));
  11854. n = e->dp[i--];
  11855. c = winBits - c;
  11856. y |= (int)(n >> (SP_WORD_SIZE - c));
  11857. n <<= c;
  11858. c = SP_WORD_SIZE - c;
  11859. }
  11860. else {
  11861. /* Bits from middle of digit */
  11862. y = (int)((n >> (SP_WORD_SIZE - winBits)) & mask);
  11863. n <<= winBits;
  11864. c -= winBits;
  11865. }
  11866. /* 6.2. tr = tr ^ (2 * w) */
  11867. for (j = 0; (j < winBits) && (err == MP_OKAY); j++) {
  11868. err = sp_sqr(tr, tr);
  11869. if (err == MP_OKAY) {
  11870. err = _sp_mont_red(tr, m, mp);
  11871. }
  11872. }
  11873. /* 6.3. tr = tr * t[y] */
  11874. if (err == MP_OKAY) {
  11875. err = sp_mul(tr, t[y], tr);
  11876. }
  11877. if (err == MP_OKAY) {
  11878. err = _sp_mont_red(tr, m, mp);
  11879. }
  11880. }
  11881. }
  11882. if (err == MP_OKAY) {
  11883. /* 7. tr = FromMont(tr) */
  11884. err = _sp_mont_red(tr, m, mp);
  11885. /* Reduction implementation returns number to range: 0..m-1. */
  11886. }
  11887. }
  11888. if ((!done) && (err == MP_OKAY)) {
  11889. /* 8. r = tr */
  11890. err = sp_copy(tr, r);
  11891. }
  11892. FREE_SP_INT_ARRAY(t, NULL);
  11893. return err;
  11894. }
  11895. #ifndef SP_ALLOC_PREDEFINED
  11896. #undef SP_ALLOC
  11897. #undef SP_ALLOC_PREDEFINED
  11898. #endif
  11899. #endif /* !WC_NO_CACHE_RESISTANT */
  11900. #endif /* !WC_NO_HARDEN */
  11901. /* w = Log2(SP_WORD_SIZE) - 1 */
  11902. #if SP_WORD_SIZE == 8
  11903. #define EXP2_WINSIZE 2
  11904. #elif SP_WORD_SIZE == 16
  11905. #define EXP2_WINSIZE 3
  11906. #elif SP_WORD_SIZE == 32
  11907. #define EXP2_WINSIZE 4
  11908. #elif SP_WORD_SIZE == 64
  11909. #define EXP2_WINSIZE 5
  11910. #else
  11911. #error "sp_exptmod_base_2: Unexpected SP_WORD_SIZE"
  11912. #endif
  11913. /* Mask is all bits in window set. */
  11914. #define EXP2_MASK ((1 << EXP2_WINSIZE) - 1)
  11915. /* Internal. Exponentiates 2 to the power of e modulo m into r: r = 2 ^ e mod m
  11916. * Is constant time and cache attack resistant.
  11917. *
  11918. * Calculates value to make mod operations constant time expect when
  11919. * WC_NO_HARDERN defined or modulus fits in one word.
  11920. *
  11921. * Algorithm:
  11922. * b: base, e: exponent, m: modulus, r: result, bits: #bits to use
  11923. * w: window size based on #bits in word.
  11924. * 1. if Words(m) > 1 then tr = MontNorm(m) = ToMont(1)
  11925. * else tr = 1
  11926. * 2. if Words(m) > 1 and HARDEN then a = m * (2 ^ (2^w))
  11927. * else a = 0
  11928. * 3. cb = w * (bits / w)
  11929. * 4. y = e / (2 ^ cb)
  11930. * 5. tr = (tr * (2 ^ y) + a) mod m
  11931. * 6. For i in cb..w
  11932. * 6.1. y = e[(i-1)..(i-w)]
  11933. * 6.2. tr = tr ^ (2 * w)
  11934. * 6.3. tr = ((tr * (2 ^ y) + a) mod m
  11935. * 7. if Words(m) > 1 then tr = FromMont(tr)
  11936. * 8. r = tr
  11937. *
  11938. * @param [in] e SP integer that is the exponent.
  11939. * @param [in] digits Number of digits in base to use. May be greater than
  11940. * count of bits in b.
  11941. * @param [in] m SP integer that is the modulus.
  11942. * @param [out] r SP integer to hold result.
  11943. *
  11944. * @return MP_OKAY on success.
  11945. * @return MP_MEM when dynamic memory allocation fails.
  11946. */
  11947. static int _sp_exptmod_base_2(const sp_int* e, int digits, const sp_int* m,
  11948. sp_int* r)
  11949. {
  11950. int i = 0;
  11951. int c = 0;
  11952. int y;
  11953. int err = MP_OKAY;
  11954. sp_int_digit mp = 0;
  11955. sp_int_digit n = 0;
  11956. #ifndef WC_NO_HARDEN
  11957. sp_int* a = NULL;
  11958. sp_int* tr = NULL;
  11959. DECL_SP_INT_ARRAY(d, m->used * 2 + 1, 2);
  11960. #else
  11961. DECL_SP_INT(tr, m->used * 2 + 1);
  11962. #endif
  11963. int useMont = m->used > 1;
  11964. #if 0
  11965. sp_print_int(2, "a");
  11966. sp_print(e, "b");
  11967. sp_print(m, "m");
  11968. #endif
  11969. #ifndef WC_NO_HARDEN
  11970. /* Allocate sp_ints for:
  11971. * - constant time add value for mod operation
  11972. * - temporary result
  11973. */
  11974. ALLOC_SP_INT_ARRAY(d, m->used * 2 + 1, 2, err, NULL);
  11975. #else
  11976. /* Allocate sp_int for temporary result. */
  11977. ALLOC_SP_INT(tr, m->used * 2 + 1, err, NULL);
  11978. #endif
  11979. if (err == MP_OKAY) {
  11980. #ifndef WC_NO_HARDEN
  11981. a = d[0];
  11982. tr = d[1];
  11983. _sp_init_size(a, m->used * 2 + 1);
  11984. #endif
  11985. _sp_init_size(tr, m->used * 2 + 1);
  11986. }
  11987. if ((err == MP_OKAY) && useMont) {
  11988. /* Calculate Montgomery multiplier for reduction. */
  11989. _sp_mont_setup(m, &mp);
  11990. }
  11991. if (err == MP_OKAY) {
  11992. /* 1. if Words(m) > 1 then tr = MontNorm(m) = ToMont(1)
  11993. * else tr = 1
  11994. */
  11995. if (useMont) {
  11996. /* Calculate Montgomery normalizer for modulus - 1 in Montgomery
  11997. * form.
  11998. */
  11999. err = sp_mont_norm(tr, m);
  12000. }
  12001. else {
  12002. /* For single word modulus don't use Montgomery form. */
  12003. err = sp_set(tr, 1);
  12004. }
  12005. }
  12006. /* 2. if Words(m) > 1 and HARDEN then a = m * (2 ^ (2^w))
  12007. * else a = 0
  12008. */
  12009. #ifndef WC_NO_HARDEN
  12010. if ((err == MP_OKAY) && useMont) {
  12011. err = sp_mul_2d(m, 1 << EXP2_WINSIZE, a);
  12012. }
  12013. #endif
  12014. if (err == MP_OKAY) {
  12015. /* 3. cb = w * (bits / w) */
  12016. i = digits - 1;
  12017. n = e->dp[i--];
  12018. c = SP_WORD_SIZE;
  12019. #if EXP2_WINSIZE != 1
  12020. c -= (digits * SP_WORD_SIZE) % EXP2_WINSIZE;
  12021. if (c != SP_WORD_SIZE) {
  12022. /* 4. y = e / (2 ^ cb) */
  12023. y = (int)(n >> c);
  12024. n <<= SP_WORD_SIZE - c;
  12025. }
  12026. else
  12027. #endif
  12028. {
  12029. /* 4. y = e / (2 ^ cb) */
  12030. y = (int)((n >> (SP_WORD_SIZE - EXP2_WINSIZE)) & EXP2_MASK);
  12031. n <<= EXP2_WINSIZE;
  12032. c -= EXP2_WINSIZE;
  12033. }
  12034. /* 5. tr = (tr * (2 ^ y) + a) mod m */
  12035. err = sp_mul_2d(tr, y, tr);
  12036. }
  12037. #ifndef WC_NO_HARDEN
  12038. if ((err == MP_OKAY) && useMont) {
  12039. /* Add value to make mod operation constant time. */
  12040. err = sp_add(tr, a, tr);
  12041. }
  12042. #endif
  12043. if (err == MP_OKAY) {
  12044. err = sp_mod(tr, m, tr);
  12045. }
  12046. /* 6. For i in cb..w */
  12047. for (; (err == MP_OKAY) && ((i >= 0) || (c >= EXP2_WINSIZE)); ) {
  12048. int j;
  12049. /* 6.1. y = e[(i-1)..(i-w)] */
  12050. if (c == 0) {
  12051. /* Bits from next digit. */
  12052. n = e->dp[i--];
  12053. y = (int)(n >> (SP_WORD_SIZE - EXP2_WINSIZE));
  12054. n <<= EXP2_WINSIZE;
  12055. c = SP_WORD_SIZE - EXP2_WINSIZE;
  12056. }
  12057. #if (EXP2_WINSIZE != 1) && (EXP2_WINSIZE != 2) && (EXP2_WINSIZE != 4)
  12058. else if (c < EXP2_WINSIZE) {
  12059. /* Bits to end of digit and part of next */
  12060. y = (int)(n >> (SP_WORD_SIZE - EXP2_WINSIZE));
  12061. n = e->dp[i--];
  12062. c = EXP2_WINSIZE - c;
  12063. y |= (int)(n >> (SP_WORD_SIZE - c));
  12064. n <<= c;
  12065. c = SP_WORD_SIZE - c;
  12066. }
  12067. #endif
  12068. else {
  12069. /* Bits from middle of digit */
  12070. y = (int)((n >> (SP_WORD_SIZE - EXP2_WINSIZE)) & EXP2_MASK);
  12071. n <<= EXP2_WINSIZE;
  12072. c -= EXP2_WINSIZE;
  12073. }
  12074. /* 6.2. tr = tr ^ (2 * w) */
  12075. for (j = 0; (j < EXP2_WINSIZE) && (err == MP_OKAY); j++) {
  12076. err = sp_sqr(tr, tr);
  12077. if (err == MP_OKAY) {
  12078. if (useMont) {
  12079. err = _sp_mont_red(tr, m, mp);
  12080. }
  12081. else {
  12082. err = sp_mod(tr, m, tr);
  12083. }
  12084. }
  12085. }
  12086. /* 6.3. tr = ((tr * (2 ^ y) + a) mod m */
  12087. if (err == MP_OKAY) {
  12088. err = sp_mul_2d(tr, y, tr);
  12089. }
  12090. #ifndef WC_NO_HARDEN
  12091. if ((err == MP_OKAY) && useMont) {
  12092. /* Add value to make mod operation constant time. */
  12093. err = sp_add(tr, a, tr);
  12094. }
  12095. #endif
  12096. if (err == MP_OKAY) {
  12097. /* Reduce current result by modulus. */
  12098. err = sp_mod(tr, m, tr);
  12099. }
  12100. }
  12101. /* 7. if Words(m) > 1 then tr = FromMont(tr) */
  12102. if ((err == MP_OKAY) && useMont) {
  12103. err = _sp_mont_red(tr, m, mp);
  12104. /* Reduction implementation returns number to range: 0..m-1. */
  12105. }
  12106. if (err == MP_OKAY) {
  12107. /* 8. r = tr */
  12108. err = sp_copy(tr, r);
  12109. }
  12110. #if 0
  12111. sp_print(r, "rme");
  12112. #endif
  12113. FREE_SP_INT_ARRAY(d, NULL);
  12114. return err;
  12115. }
  12116. #endif
  12117. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  12118. !defined(NO_DH) || (!defined(NO_RSA) && defined(WOLFSSL_KEY_GEN)) || \
  12119. defined(OPENSSL_ALL)
  12120. /* Exponentiates b to the power of e modulo m into r: r = b ^ e mod m
  12121. *
  12122. * Error returned when parameters r == e or r == m and base >= modulus.
  12123. *
  12124. * @param [in] b SP integer that is the base.
  12125. * @param [in] e SP integer that is the exponent.
  12126. * @param [in] bits Number of bits in exponent to use. May be greater than
  12127. * count of bits in e.
  12128. * @param [in] m SP integer that is the modulus.
  12129. * @param [out] r SP integer to hold result.
  12130. *
  12131. * @return MP_OKAY on success.
  12132. * @return MP_VAL when b, e, m or r is NULL; or m <= 0 or e is negative.
  12133. * @return MP_MEM when dynamic memory allocation fails.
  12134. */
  12135. int sp_exptmod_ex(const sp_int* b, const sp_int* e, int digits, const sp_int* m,
  12136. sp_int* r)
  12137. {
  12138. int err = MP_OKAY;
  12139. int done = 0;
  12140. int mBits = sp_count_bits(m);
  12141. int bBits = sp_count_bits(b);
  12142. int eBits = sp_count_bits(e);
  12143. if ((b == NULL) || (e == NULL) || (m == NULL) || (r == NULL)) {
  12144. err = MP_VAL;
  12145. }
  12146. #if 0
  12147. if (err == MP_OKAY) {
  12148. sp_print(b, "a");
  12149. sp_print(e, "b");
  12150. sp_print(m, "m");
  12151. }
  12152. #endif
  12153. /* Check for invalid modulus. */
  12154. if ((err == MP_OKAY) && sp_iszero(m)) {
  12155. err = MP_VAL;
  12156. }
  12157. #ifdef WOLFSSL_SP_INT_NEGATIVE
  12158. /* Check for unsupported negative values of exponent and modulus. */
  12159. if ((err == MP_OKAY) && ((e->sign == MP_NEG) || (m->sign == MP_NEG))) {
  12160. err = MP_VAL;
  12161. }
  12162. #endif
  12163. /* Check for degenerate cases. */
  12164. if ((err == MP_OKAY) && sp_isone(m)) {
  12165. sp_set(r, 0);
  12166. done = 1;
  12167. }
  12168. if ((!done) && (err == MP_OKAY) && sp_iszero(e)) {
  12169. sp_set(r, 1);
  12170. done = 1;
  12171. }
  12172. /* Ensure base is less than modulus. */
  12173. if ((!done) && (err == MP_OKAY) && (_sp_cmp_abs(b, m) != MP_LT)) {
  12174. if ((r == e) || (r == m)) {
  12175. err = MP_VAL;
  12176. }
  12177. if (err == MP_OKAY) {
  12178. err = sp_mod(b, m, r);
  12179. }
  12180. if (err == MP_OKAY) {
  12181. b = r;
  12182. }
  12183. }
  12184. /* Check for degenerate case of base. */
  12185. if ((!done) && (err == MP_OKAY) && sp_iszero(b)) {
  12186. sp_set(r, 0);
  12187. done = 1;
  12188. }
  12189. /* Ensure SP integers have space for intermediate values. */
  12190. if ((!done) && (err == MP_OKAY) && (m->used * 2 >= r->size)) {
  12191. err = MP_VAL;
  12192. }
  12193. if ((!done) && (err == MP_OKAY)) {
  12194. /* Use code optimized for specific sizes if possible */
  12195. #if (defined(WOLFSSL_SP_MATH) || defined(WOLFSSL_SP_MATH_ALL)) && \
  12196. (defined(WOLFSSL_HAVE_SP_RSA) || defined(WOLFSSL_HAVE_SP_DH))
  12197. #ifndef WOLFSSL_SP_NO_2048
  12198. if ((mBits == 1024) && sp_isodd(m) && (bBits <= 1024) &&
  12199. (eBits <= 1024)) {
  12200. err = sp_ModExp_1024((sp_int*)b, (sp_int*)e, (sp_int*)m, r);
  12201. done = 1;
  12202. }
  12203. else if ((mBits == 2048) && sp_isodd(m) && (bBits <= 2048) &&
  12204. (eBits <= 2048)) {
  12205. err = sp_ModExp_2048((sp_int*)b, (sp_int*)e, (sp_int*)m, r);
  12206. done = 1;
  12207. }
  12208. else
  12209. #endif
  12210. #ifndef WOLFSSL_SP_NO_3072
  12211. if ((mBits == 1536) && sp_isodd(m) && (bBits <= 1536) &&
  12212. (eBits <= 1536)) {
  12213. err = sp_ModExp_1536((sp_int*)b, (sp_int*)e, (sp_int*)m, r);
  12214. done = 1;
  12215. }
  12216. else if ((mBits == 3072) && sp_isodd(m) && (bBits <= 3072) &&
  12217. (eBits <= 3072)) {
  12218. err = sp_ModExp_3072((sp_int*)b, (sp_int*)e, (sp_int*)m, r);
  12219. done = 1;
  12220. }
  12221. else
  12222. #endif
  12223. #ifdef WOLFSSL_SP_4096
  12224. if ((mBits == 4096) && sp_isodd(m) && (bBits <= 4096) &&
  12225. (eBits <= 4096)) {
  12226. err = sp_ModExp_4096((sp_int*)b, (sp_int*)e, (sp_int*)m, r);
  12227. done = 1;
  12228. }
  12229. else
  12230. #endif
  12231. #endif
  12232. {
  12233. /* SP does not support size. */
  12234. }
  12235. }
  12236. #if defined(WOLFSSL_SP_MATH_ALL) || !defined(NO_DH) || defined(OPENSSL_ALL)
  12237. #if (defined(WOLFSSL_RSA_VERIFY_ONLY) || defined(WOLFSSL_RSA_PUBLIC_ONLY)) && \
  12238. defined(NO_DH)
  12239. if ((!done) && (err == MP_OKAY))
  12240. /* Use non-constant time version - fastest. */
  12241. err = sp_exptmod_nct(b, e, m, r);
  12242. }
  12243. #else
  12244. #if defined(WOLFSSL_SP_MATH_ALL) || defined(OPENSSL_ALL)
  12245. if ((!done) && (err == MP_OKAY) && (b->used == 1) && (b->dp[0] == 2) &&
  12246. mp_isodd(m)) {
  12247. /* Use the generic base 2 implementation. */
  12248. err = _sp_exptmod_base_2(e, digits, m, r);
  12249. }
  12250. else if ((!done) && (err == MP_OKAY) && ((m->used > 1) && mp_isodd(m))) {
  12251. #ifndef WC_NO_HARDEN
  12252. /* Use constant time version hardened against timing attacks and
  12253. * cache attacks when WC_NO_CACHE_RESISTANT not defined. */
  12254. err = _sp_exptmod_mont_ex(b, e, digits * SP_WORD_SIZE, m, r);
  12255. #else
  12256. /* Use non-constant time version - fastest. */
  12257. err = sp_exptmod_nct(b, e, m, r);
  12258. #endif
  12259. }
  12260. else
  12261. #endif /* WOLFSSL_SP_MATH_ALL || OPENSSL_ALL */
  12262. if ((!done) && (err == MP_OKAY)) {
  12263. /* Otherwise use the generic implementation hardened against
  12264. * timing and cache attacks. */
  12265. err = _sp_exptmod_ex(b, e, digits * SP_WORD_SIZE, m, r);
  12266. }
  12267. #endif /* WOLFSSL_RSA_VERIFY_ONLY || WOLFSSL_RSA_PUBLIC_ONLY */
  12268. #else
  12269. if ((!done) && (err == MP_OKAY)) {
  12270. err = MP_VAL;
  12271. }
  12272. #endif /* WOLFSSL_SP_MATH_ALL || WOLFSSL_HAVE_SP_DH */
  12273. (void)mBits;
  12274. (void)bBits;
  12275. (void)eBits;
  12276. (void)digits;
  12277. #if 0
  12278. if (err == MP_OKAY) {
  12279. sp_print(r, "rme");
  12280. }
  12281. #endif
  12282. return err;
  12283. }
  12284. #endif
  12285. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  12286. !defined(NO_DH) || (!defined(NO_RSA) && defined(WOLFSSL_KEY_GEN)) || \
  12287. defined(OPENSSL_ALL)
  12288. /* Exponentiates b to the power of e modulo m into r: r = b ^ e mod m
  12289. *
  12290. * @param [in] b SP integer that is the base.
  12291. * @param [in] e SP integer that is the exponent.
  12292. * @param [in] m SP integer that is the modulus.
  12293. * @param [out] r SP integer to hold result.
  12294. *
  12295. * @return MP_OKAY on success.
  12296. * @return MP_VAL when b, e, m or r is NULL; or m <= 0 or e is negative.
  12297. * @return MP_MEM when dynamic memory allocation fails.
  12298. */
  12299. int sp_exptmod(const sp_int* b, const sp_int* e, const sp_int* m, sp_int* r)
  12300. {
  12301. int err = MP_OKAY;
  12302. /* Validate parameters. */
  12303. if ((b == NULL) || (e == NULL) || (m == NULL) || (r == NULL)) {
  12304. err = MP_VAL;
  12305. }
  12306. SAVE_VECTOR_REGISTERS(err = _svr_ret;);
  12307. if (err == MP_OKAY) {
  12308. err = sp_exptmod_ex(b, e, e->used, m, r);
  12309. }
  12310. RESTORE_VECTOR_REGISTERS();
  12311. return err;
  12312. }
  12313. #endif
  12314. #if defined(WOLFSSL_SP_MATH_ALL) || defined(WOLFSSL_HAVE_SP_DH)
  12315. #if defined(WOLFSSL_SP_FAST_NCT_EXPTMOD) || !defined(WOLFSSL_SP_SMALL)
  12316. /* Always allocate large array of sp_ints unless defined WOLFSSL_SP_NO_MALLOC */
  12317. #ifdef SP_ALLOC
  12318. #define SP_ALLOC_PREDEFINED
  12319. #else
  12320. #define SP_ALLOC
  12321. #endif
  12322. /* Internal. Exponentiates b to the power of e modulo m into r: r = b ^ e mod m
  12323. * Creates a window of precalculated exponents with base in Montgomery form.
  12324. * Sliding window and is NOT constant time.
  12325. *
  12326. * n-bit window is: (b^(2^(n-1))*b^0)...(b^(2^(n-1))*b^(2^(n-1)-1))
  12327. * e.g. when n=6, b^32..b^63
  12328. * Algorithm:
  12329. * 1. Ensure base is less than modulus.
  12330. * 2. Convert base to Montgomery form
  12331. * 3. Set result to table entry for top window bits, or
  12332. * if less than windows bits in exponent, 1 in Montgomery form.
  12333. * 4. While at least window bits left:
  12334. * 4.1. Count number of and skip leading 0 bits unless less then window bits
  12335. * left.
  12336. * 4.2. Montgomery square result for each leading 0 and window bits if bits
  12337. * left.
  12338. * 4.3. Break if less than window bits left.
  12339. * 4.4. Get top window bits from expononent and drop.
  12340. * 4.5. Montgomery multiply result by table entry.
  12341. * 5. While bits left:
  12342. * 5.1. Montogmery square result
  12343. * 5.2. If exponent bit set
  12344. * 5.2.1. Montgomery multiply result by Montgomery form of base.
  12345. * 6. Convert result back from Montgomery form.
  12346. *
  12347. * @param [in] b SP integer that is the base.
  12348. * @param [in] e SP integer that is the exponent.
  12349. * @param [in] bits Number of bits in exponent to use. May be greater than
  12350. * count of bits in e.
  12351. * @param [in] m SP integer that is the modulus.
  12352. * @param [out] r SP integer to hold result.
  12353. *
  12354. * @return MP_OKAY on success.
  12355. * @return MP_MEM when dynamic memory allocation fails.
  12356. */
  12357. static int _sp_exptmod_nct(const sp_int* b, const sp_int* e, const sp_int* m,
  12358. sp_int* r)
  12359. {
  12360. int i = 0;
  12361. int c = 0;
  12362. int y = 0;
  12363. int bits;
  12364. int winBits;
  12365. int preCnt;
  12366. int err = MP_OKAY;
  12367. int done = 0;
  12368. sp_int* tr = NULL;
  12369. sp_int* bm = NULL;
  12370. sp_int_digit mask;
  12371. /* Maximum winBits is 6 and preCnt is (1 << (winBits - 1)). */
  12372. DECL_SP_INT_ARRAY(t, m->used * 2 + 1, (1 << 5) + 2);
  12373. bits = sp_count_bits(e);
  12374. /* Window bits based on number of pre-calculations versus number of loop
  12375. * calculcations.
  12376. * Exponents for RSA and DH will result in 6-bit windows.
  12377. * Note: for 4096-bit values, 7-bit window is slightly better.
  12378. */
  12379. if (bits > 450) {
  12380. winBits = 6;
  12381. }
  12382. else if (bits <= 21) {
  12383. winBits = 1;
  12384. }
  12385. else if (bits <= 36) {
  12386. winBits = 3;
  12387. }
  12388. else if (bits <= 140) {
  12389. winBits = 4;
  12390. }
  12391. else {
  12392. winBits = 5;
  12393. }
  12394. /* Top bit of exponent fixed as 1 for pre-calculated window. */
  12395. preCnt = 1 << (winBits - 1);
  12396. /* Mask for calculating index into pre-computed table. */
  12397. mask = preCnt - 1;
  12398. /* Allocate sp_ints for:
  12399. * - pre-computation table
  12400. * - temporary result
  12401. * - Montgomery form of base
  12402. */
  12403. ALLOC_SP_INT_ARRAY(t, m->used * 2 + 1, preCnt + 2, err, NULL);
  12404. if (err == MP_OKAY) {
  12405. /* Set variables to use allocate memory. */
  12406. tr = t[preCnt + 0];
  12407. bm = t[preCnt + 1];
  12408. /* Iniitialize all allocated */
  12409. for (i = 0; i < preCnt; i++) {
  12410. _sp_init_size(t[i], m->used * 2 + 1);
  12411. }
  12412. _sp_init_size(tr, m->used * 2 + 1);
  12413. _sp_init_size(bm, m->used * 2 + 1);
  12414. /* 1. Ensure base is less than modulus. */
  12415. if (_sp_cmp_abs(b, m) != MP_LT) {
  12416. err = sp_mod(b, m, bm);
  12417. /* Handle base == modulus. */
  12418. if ((err == MP_OKAY) && sp_iszero(bm)) {
  12419. sp_set(r, 0);
  12420. done = 1;
  12421. }
  12422. }
  12423. else {
  12424. /* Copy base into Montogmery base variable. */
  12425. err = sp_copy(b, bm);
  12426. }
  12427. }
  12428. if ((!done) && (err == MP_OKAY)) {
  12429. sp_int_digit mp;
  12430. sp_int_digit n;
  12431. /* Calculate Montgomery multiplier for reduction. */
  12432. _sp_mont_setup(m, &mp);
  12433. /* Calculate Montgomery normalizer for modulus. */
  12434. err = sp_mont_norm(t[0], m);
  12435. if (err == MP_OKAY) {
  12436. /* 2. Convert base to Montgomery form. */
  12437. err = sp_mulmod(bm, t[0], m, bm);
  12438. }
  12439. if (err == MP_OKAY) {
  12440. /* Copy Montgomery form of base into first element of table. */
  12441. err = sp_copy(bm, t[0]);
  12442. }
  12443. /* Calculate b^(2^(winBits-1)) */
  12444. for (i = 1; (i < winBits) && (err == MP_OKAY); i++) {
  12445. err = sp_sqr(t[0], t[0]);
  12446. if (err == MP_OKAY) {
  12447. err = _sp_mont_red(t[0], m, mp);
  12448. }
  12449. }
  12450. /* For each table entry after first. */
  12451. for (i = 1; (i < preCnt) && (err == MP_OKAY); i++) {
  12452. /* Multiply previous entry by the base in Mont form into table. */
  12453. err = sp_mul(t[i-1], bm, t[i]);
  12454. if (err == MP_OKAY) {
  12455. err = _sp_mont_red(t[i], m, mp);
  12456. }
  12457. }
  12458. /* 3. Set result to table entry for top window bits, or
  12459. * if less than windows bits in exponent, 1 in Montgomery form.
  12460. */
  12461. if (err == MP_OKAY) {
  12462. /* Find the top bit. */
  12463. i = (bits - 1) >> SP_WORD_SHIFT;
  12464. n = e->dp[i--];
  12465. c = bits % SP_WORD_SIZE;
  12466. if (c == 0) {
  12467. c = SP_WORD_SIZE;
  12468. }
  12469. /* Put top bit at highest offset in digit. */
  12470. n <<= SP_WORD_SIZE - c;
  12471. if (bits >= winBits) {
  12472. /* Top bit set. Copy from window. */
  12473. if (c < winBits) {
  12474. /* Bits to end of digit and part of next */
  12475. y = (int)((n >> (SP_WORD_SIZE - winBits)) & mask);
  12476. n = e->dp[i--];
  12477. c = winBits - c;
  12478. y |= (int)(n >> (SP_WORD_SIZE - c));
  12479. n <<= c;
  12480. c = SP_WORD_SIZE - c;
  12481. }
  12482. else {
  12483. /* Bits from middle of digit */
  12484. y = (int)((n >> (SP_WORD_SIZE - winBits)) & mask);
  12485. n <<= winBits;
  12486. c -= winBits;
  12487. }
  12488. err = sp_copy(t[y], tr);
  12489. }
  12490. else {
  12491. /* 1 in Montgomery form. */
  12492. err = sp_mont_norm(tr, m);
  12493. }
  12494. /* 4. While at least window bits left. */
  12495. while ((err == MP_OKAY) && ((i >= 0) || (c >= winBits))) {
  12496. /* Number of squares to before due to top bits being 0. */
  12497. int sqrs = 0;
  12498. /* 4.1. Count number of and skip leading 0 bits unless less
  12499. * than window bits.
  12500. */
  12501. do {
  12502. /* Make sure n has bits from the right digit. */
  12503. if (c == 0) {
  12504. n = e->dp[i--];
  12505. c = SP_WORD_SIZE;
  12506. }
  12507. /* Mask off the next bit. */
  12508. if ((n & ((sp_int_digit)1 << (SP_WORD_SIZE - 1))) != 0) {
  12509. break;
  12510. }
  12511. /* Another square needed. */
  12512. sqrs++;
  12513. /* Skip bit. */
  12514. n <<= 1;
  12515. c--;
  12516. }
  12517. while ((err == MP_OKAY) && ((i >= 0) || (c >= winBits)));
  12518. if ((err == MP_OKAY) && ((i >= 0) || (c >= winBits))) {
  12519. /* Add squares needed before using table entry. */
  12520. sqrs += winBits;
  12521. }
  12522. /* 4.2. Montgomery square result for each leading 0 and window
  12523. * bits if bits left.
  12524. */
  12525. for (; (err == MP_OKAY) && (sqrs > 0); sqrs--) {
  12526. err = sp_sqr(tr, tr);
  12527. if (err == MP_OKAY) {
  12528. err = _sp_mont_red(tr, m, mp);
  12529. }
  12530. }
  12531. /* 4.3. Break if less than window bits left. */
  12532. if ((err == MP_OKAY) && (i < 0) && (c < winBits)) {
  12533. break;
  12534. }
  12535. /* 4.4. Get top window bits from expononent and drop. */
  12536. if (err == MP_OKAY) {
  12537. if (c == 0) {
  12538. /* Bits from next digit. */
  12539. n = e->dp[i--];
  12540. y = (int)(n >> (SP_WORD_SIZE - winBits));
  12541. n <<= winBits;
  12542. c = SP_WORD_SIZE - winBits;
  12543. }
  12544. else if (c < winBits) {
  12545. /* Bits to end of digit and part of next. */
  12546. y = (int)(n >> (SP_WORD_SIZE - winBits));
  12547. n = e->dp[i--];
  12548. c = winBits - c;
  12549. y |= (int)(n >> (SP_WORD_SIZE - c));
  12550. n <<= c;
  12551. c = SP_WORD_SIZE - c;
  12552. }
  12553. else {
  12554. /* Bits from middle of digit. */
  12555. y = (int)(n >> (SP_WORD_SIZE - winBits));
  12556. n <<= winBits;
  12557. c -= winBits;
  12558. }
  12559. y &= mask;
  12560. }
  12561. /* 4.5. Montgomery multiply result by table entry. */
  12562. if (err == MP_OKAY) {
  12563. err = sp_mul(tr, t[y], tr);
  12564. }
  12565. if (err == MP_OKAY) {
  12566. err = _sp_mont_red(tr, m, mp);
  12567. }
  12568. }
  12569. /* Finished multiplying in table entries. */
  12570. if ((err == MP_OKAY) && (c > 0)) {
  12571. /* Handle remaining bits.
  12572. * Window values have top bit set and can't be used. */
  12573. n = e->dp[0];
  12574. /* 5. While bits left: */
  12575. for (--c; (err == MP_OKAY) && (c >= 0); c--) {
  12576. /* 5.1. Montogmery square result */
  12577. err = sp_sqr(tr, tr);
  12578. if (err == MP_OKAY) {
  12579. err = _sp_mont_red(tr, m, mp);
  12580. }
  12581. /* 5.2. If exponent bit set */
  12582. if ((err == MP_OKAY) && ((n >> c) & 1)) {
  12583. /* 5.2.1. Montgomery multiply result by Montgomery form
  12584. * of base.
  12585. */
  12586. err = sp_mul(tr, bm, tr);
  12587. if (err == MP_OKAY) {
  12588. err = _sp_mont_red(tr, m, mp);
  12589. }
  12590. }
  12591. }
  12592. }
  12593. }
  12594. if (err == MP_OKAY) {
  12595. /* 6. Convert result back from Montgomery form. */
  12596. err = _sp_mont_red(tr, m, mp);
  12597. /* Reduction implementation returns number to range: 0..m-1. */
  12598. }
  12599. }
  12600. if ((!done) && (err == MP_OKAY)) {
  12601. /* Copy temporary result into parameter. */
  12602. err = sp_copy(tr, r);
  12603. }
  12604. FREE_SP_INT_ARRAY(t, NULL);
  12605. return err;
  12606. }
  12607. #ifndef SP_ALLOC_PREDEFINED
  12608. #undef SP_ALLOC
  12609. #undef SP_ALLOC_PREDEFINED
  12610. #endif
  12611. #else
  12612. /* Exponentiates b to the power of e modulo m into r: r = b ^ e mod m
  12613. * Non-constant time implementation.
  12614. *
  12615. * Algorithm:
  12616. * 1. Convert base to Montgomery form
  12617. * 2. Set result to base (assumes exponent is not zero)
  12618. * 3. For each bit in exponent starting at second highest
  12619. * 3.1. Montogmery square result
  12620. * 3.2. If exponent bit set
  12621. * 3.2.1. Montgomery multiply result by Montgomery form of base.
  12622. * 4. Convert result back from Montgomery form.
  12623. *
  12624. * @param [in] b SP integer that is the base.
  12625. * @param [in] e SP integer that is the exponent.
  12626. * @param [in] m SP integer that is the modulus.
  12627. * @param [out] r SP integer to hold result.
  12628. *
  12629. * @return MP_OKAY on success.
  12630. * @return MP_VAL when b, e, m or r is NULL; or m <= 0 or e is negative.
  12631. * @return MP_MEM when dynamic memory allocation fails.
  12632. */
  12633. static int _sp_exptmod_nct(const sp_int* b, const sp_int* e, const sp_int* m,
  12634. sp_int* r)
  12635. {
  12636. int i;
  12637. int err = MP_OKAY;
  12638. int done = 0;
  12639. int y = 0;
  12640. int bits = sp_count_bits(e);
  12641. sp_int_digit mp;
  12642. DECL_SP_INT_ARRAY(t, m->used * 2 + 1, 2);
  12643. /* Allocate memory for:
  12644. * - Montgomery form of base
  12645. * - Temporary result (in case r is same var as another parameter). */
  12646. ALLOC_SP_INT_ARRAY(t, m->used * 2 + 1, 2, err, NULL);
  12647. if (err == MP_OKAY) {
  12648. _sp_init_size(t[0], m->used * 2 + 1);
  12649. _sp_init_size(t[1], m->used * 2 + 1);
  12650. /* Ensure base is less than modulus and copy into temp. */
  12651. if (_sp_cmp_abs(b, m) != MP_LT) {
  12652. err = sp_mod(b, m, t[0]);
  12653. /* Handle base == modulus. */
  12654. if ((err == MP_OKAY) && sp_iszero(t[0])) {
  12655. sp_set(r, 0);
  12656. done = 1;
  12657. }
  12658. }
  12659. else {
  12660. /* Copy base into temp. */
  12661. err = sp_copy(b, t[0]);
  12662. }
  12663. }
  12664. if ((!done) && (err == MP_OKAY)) {
  12665. /* Calculate Montgomery multiplier for reduction. */
  12666. _sp_mont_setup(m, &mp);
  12667. /* Calculate Montgomery normalizer for modulus. */
  12668. err = sp_mont_norm(t[1], m);
  12669. if (err == MP_OKAY) {
  12670. /* 1. Convert base to Montgomery form. */
  12671. err = sp_mulmod(t[0], t[1], m, t[0]);
  12672. }
  12673. if (err == MP_OKAY) {
  12674. /* 2. Result starts as Montgomery form of base (assuming e > 0). */
  12675. sp_copy(t[0], t[1]);
  12676. }
  12677. /* 3. For each bit in exponent starting at second highest. */
  12678. for (i = bits - 2; (err == MP_OKAY) && (i >= 0); i--) {
  12679. /* 3.1. Montgomery square result. */
  12680. err = sp_sqr(t[0], t[0]);
  12681. if (err == MP_OKAY) {
  12682. err = _sp_mont_red(t[0], m, mp);
  12683. }
  12684. if (err == MP_OKAY) {
  12685. /* Get bit and index i. */
  12686. y = (e->dp[i >> SP_WORD_SHIFT] >> (i & SP_WORD_MASK)) & 1;
  12687. /* 3.2. If exponent bit set */
  12688. if (y != 0) {
  12689. /* 3.2.1. Montgomery multiply result by Mont of base. */
  12690. err = sp_mul(t[0], t[1], t[0]);
  12691. if (err == MP_OKAY) {
  12692. err = _sp_mont_red(t[0], m, mp);
  12693. }
  12694. }
  12695. }
  12696. }
  12697. if (err == MP_OKAY) {
  12698. /* 4. Convert from Montgomery form. */
  12699. err = _sp_mont_red(t[0], m, mp);
  12700. /* Reduction implementation returns number of range 0..m-1. */
  12701. }
  12702. }
  12703. if ((!done) && (err == MP_OKAY)) {
  12704. /* Copy temporary result into parameter. */
  12705. err = sp_copy(t[0], r);
  12706. }
  12707. FREE_SP_INT_ARRAY(t, NULL);
  12708. return err;
  12709. }
  12710. #endif /* WOLFSSL_SP_FAST_NCT_EXPTMOD || !WOLFSSL_SP_SMALL */
  12711. /* Exponentiates b to the power of e modulo m into r: r = b ^ e mod m
  12712. * Non-constant time implementation.
  12713. *
  12714. * @param [in] b SP integer that is the base.
  12715. * @param [in] e SP integer that is the exponent.
  12716. * @param [in] m SP integer that is the modulus.
  12717. * @param [out] r SP integer to hold result.
  12718. *
  12719. * @return MP_OKAY on success.
  12720. * @return MP_VAL when b, e, m or r is NULL; or m <= 0 or e is negative.
  12721. * @return MP_MEM when dynamic memory allocation fails.
  12722. */
  12723. int sp_exptmod_nct(const sp_int* b, const sp_int* e, const sp_int* m, sp_int* r)
  12724. {
  12725. int err = MP_OKAY;
  12726. if ((b == NULL) || (e == NULL) || (m == NULL) || (r == NULL)) {
  12727. err = MP_VAL;
  12728. }
  12729. #if 0
  12730. if (err == MP_OKAY) {
  12731. sp_print(b, "a");
  12732. sp_print(e, "b");
  12733. sp_print(m, "m");
  12734. }
  12735. #endif
  12736. if (err != MP_OKAY) {
  12737. }
  12738. /* Handle special cases. */
  12739. else if (sp_iszero(m)) {
  12740. err = MP_VAL;
  12741. }
  12742. #ifdef WOLFSSL_SP_INT_NEGATIVE
  12743. else if ((e->sign == MP_NEG) || (m->sign == MP_NEG)) {
  12744. err = MP_VAL;
  12745. }
  12746. #endif
  12747. else if (sp_isone(m)) {
  12748. sp_set(r, 0);
  12749. }
  12750. else if (sp_iszero(e)) {
  12751. sp_set(r, 1);
  12752. }
  12753. else if (sp_iszero(b)) {
  12754. sp_set(r, 0);
  12755. }
  12756. /* Ensure SP integers have space for intermediate values. */
  12757. else if (m->used * 2 >= r->size) {
  12758. err = MP_VAL;
  12759. }
  12760. #if !defined(WOLFSSL_RSA_VERIFY_ONLY) && !defined(WOLFSSL_RSA_PUBLIC_ONLY)
  12761. else if (mp_iseven(m)) {
  12762. err = _sp_exptmod_ex(b, e, e->used * SP_WORD_SIZE, m, r);
  12763. }
  12764. #endif
  12765. else {
  12766. err = _sp_exptmod_nct(b, e, m, r);
  12767. }
  12768. #if 0
  12769. if (err == MP_OKAY) {
  12770. sp_print(r, "rme");
  12771. }
  12772. #endif
  12773. return err;
  12774. }
  12775. #endif /* WOLFSSL_SP_MATH_ALL || WOLFSSL_HAVE_SP_DH */
  12776. /***************
  12777. * 2^e functions
  12778. ***************/
  12779. #if defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)
  12780. /* Divide by 2^e: r = a >> e and rem = bits shifted out
  12781. *
  12782. * @param [in] a SP integer to divide.
  12783. * @param [in] e Exponent bits (dividing by 2^e).
  12784. * @param [in] m SP integer that is the modulus.
  12785. * @param [out] r SP integer to hold result.
  12786. * @param [out] rem SP integer to hold remainder.
  12787. *
  12788. * @return MP_OKAY on success.
  12789. * @return MP_VAL when a is NULL.
  12790. */
  12791. int sp_div_2d(const sp_int* a, int e, sp_int* r, sp_int* rem)
  12792. {
  12793. int err = MP_OKAY;
  12794. if (a == NULL) {
  12795. err = MP_VAL;
  12796. }
  12797. if (err == MP_OKAY) {
  12798. /* Number of bits remaining after shift. */
  12799. int remBits = sp_count_bits(a) - e;
  12800. if (remBits <= 0) {
  12801. /* Shifting down by more bits than in number. */
  12802. _sp_zero(r);
  12803. sp_copy(a, rem);
  12804. }
  12805. else {
  12806. if (rem != NULL) {
  12807. /* Copy a in to remainder. */
  12808. err = sp_copy(a, rem);
  12809. }
  12810. if (err == MP_OKAY) {
  12811. /* Shift a down by into result. */
  12812. err = sp_rshb(a, e, r);
  12813. }
  12814. if ((err == MP_OKAY) && (rem != NULL)) {
  12815. /* Set used and mask off top digit of remainder. */
  12816. rem->used = (e + SP_WORD_SIZE - 1) >> SP_WORD_SHIFT;
  12817. e &= SP_WORD_MASK;
  12818. if (e > 0) {
  12819. rem->dp[rem->used - 1] &= ((sp_int_digit)1 << e) - 1;
  12820. }
  12821. /* Remove leading zeros from remainder. */
  12822. sp_clamp(rem);
  12823. #ifdef WOLFSSL_SP_INT_NEGATIVE
  12824. rem->sign = MP_ZPOS;
  12825. #endif
  12826. }
  12827. }
  12828. }
  12829. return err;
  12830. }
  12831. #endif /* WOLFSSL_SP_MATH_ALL && !WOLFSSL_RSA_VERIFY_ONLY */
  12832. #if defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)
  12833. /* The bottom e bits: r = a & ((1 << e) - 1)
  12834. *
  12835. * @param [in] a SP integer to reduce.
  12836. * @param [in] e Modulus bits (modulus equals 2^e).
  12837. * @param [out] r SP integer to hold result.
  12838. *
  12839. * @return MP_OKAY on success.
  12840. * @return MP_VAL when a or r is NULL.
  12841. */
  12842. int sp_mod_2d(const sp_int* a, int e, sp_int* r)
  12843. {
  12844. int err = MP_OKAY;
  12845. int digits = (e + SP_WORD_SIZE - 1) >> SP_WORD_SHIFT;
  12846. if ((a == NULL) || (r == NULL)) {
  12847. err = MP_VAL;
  12848. }
  12849. if ((err == MP_OKAY) && (digits > r->size)) {
  12850. err = MP_VAL;
  12851. }
  12852. if (err == MP_OKAY) {
  12853. /* Copy a into r if not same pointer. */
  12854. if (a != r) {
  12855. XMEMCPY(r->dp, a->dp, digits * SP_WORD_SIZEOF);
  12856. r->used = a->used;
  12857. #ifdef WOLFSSL_SP_INT_NEGATIVE
  12858. r->sign = a->sign;
  12859. #endif
  12860. }
  12861. /* Modify result if a is bigger or same digit size. */
  12862. #ifndef WOLFSSL_SP_INT_NEGATIVE
  12863. if (digits <= a->used)
  12864. #else
  12865. /* Need to make negative positive and mask. */
  12866. if ((a->sign == MP_NEG) || (digits <= a->used))
  12867. #endif
  12868. {
  12869. #ifdef WOLFSSL_SP_INT_NEGATIVE
  12870. if (a->sign == MP_NEG) {
  12871. int i;
  12872. sp_int_digit carry = 0;
  12873. /* Negate value. */
  12874. for (i = 0; i < r->used; i++) {
  12875. sp_int_digit next = r->dp[i] > 0;
  12876. r->dp[i] = (sp_int_digit)0 - r->dp[i] - carry;
  12877. carry |= next;
  12878. }
  12879. for (; i < digits; i++) {
  12880. r->dp[i] = (sp_int_digit)0 - carry;
  12881. }
  12882. r->sign = MP_ZPOS;
  12883. }
  12884. #endif
  12885. /* Set used and mask off top digit of result. */
  12886. r->used = digits;
  12887. e &= SP_WORD_MASK;
  12888. if (e > 0) {
  12889. r->dp[r->used - 1] &= ((sp_int_digit)1 << e) - 1;
  12890. }
  12891. sp_clamp(r);
  12892. }
  12893. }
  12894. return err;
  12895. }
  12896. #endif /* WOLFSSL_SP_MATH_ALL && !WOLFSSL_RSA_VERIFY_ONLY */
  12897. #if (defined(WOLFSSL_SP_MATH_ALL) && (!defined(WOLFSSL_RSA_VERIFY_ONLY) || \
  12898. !defined(NO_DH))) || defined(OPENSSL_ALL)
  12899. /* Multiply by 2^e: r = a << e
  12900. *
  12901. * @param [in] a SP integer to multiply.
  12902. * @param [in] e Multiplier bits (multiplier equals 2^e).
  12903. * @param [out] r SP integer to hold result.
  12904. *
  12905. * @return MP_OKAY on success.
  12906. * @return MP_VAL when a or r is NULL, or result is too big for fixed data
  12907. * length.
  12908. */
  12909. int sp_mul_2d(const sp_int* a, int e, sp_int* r)
  12910. {
  12911. int err = MP_OKAY;
  12912. /* Validate parameters. */
  12913. if ((a == NULL) || (r == NULL)) {
  12914. err = MP_VAL;
  12915. }
  12916. /* Ensure result has enough allocated digits for result. */
  12917. if ((err == MP_OKAY) && (sp_count_bits(a) + e > r->size * SP_WORD_SIZE)) {
  12918. err = MP_VAL;
  12919. }
  12920. if (err == MP_OKAY) {
  12921. /* Copy a into r as left shift function works on the number. */
  12922. if (a != r) {
  12923. err = sp_copy(a, r);
  12924. }
  12925. }
  12926. if (err == MP_OKAY) {
  12927. #if 0
  12928. sp_print(a, "a");
  12929. sp_print_int(e, "n");
  12930. #endif
  12931. err = sp_lshb(r, e);
  12932. #if 0
  12933. sp_print(r, "rsl");
  12934. #endif
  12935. }
  12936. return err;
  12937. }
  12938. #endif /* WOLFSSL_SP_MATH_ALL && !WOLFSSL_RSA_VERIFY_ONLY */
  12939. #if defined(WOLFSSL_SP_MATH_ALL) || defined(WOLFSSL_HAVE_SP_DH) || \
  12940. defined(HAVE_ECC) || (!defined(NO_RSA) && !defined(WOLFSSL_RSA_VERIFY_ONLY))
  12941. /* START SP_SQR implementations */
  12942. /* This code is generated.
  12943. * To generate:
  12944. * cd scripts/sp/sp_int
  12945. * ./gen.sh
  12946. * File sp_sqr.c contains code.
  12947. */
  12948. #if !defined(WOLFSSL_SP_MATH) || !defined(WOLFSSL_SP_SMALL)
  12949. #ifdef SQR_MUL_ASM
  12950. /* Square a and store in r. r = a * a
  12951. *
  12952. * @param [in] a SP integer to square.
  12953. * @param [out] r SP integer result.
  12954. *
  12955. * @return MP_OKAY on success.
  12956. * @return MP_MEM when dynamic memory allocation fails.
  12957. */
  12958. static int _sp_sqr(const sp_int* a, sp_int* r)
  12959. {
  12960. int err = MP_OKAY;
  12961. int i;
  12962. int j;
  12963. int k;
  12964. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  12965. sp_int_digit* t = NULL;
  12966. #elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
  12967. defined(WOLFSSL_SP_SMALL) && !defined(WOLFSSL_SP_NO_DYN_STACK)
  12968. sp_int_digit t[a->used * 2];
  12969. #else
  12970. sp_int_digit t[SP_INT_DIGITS];
  12971. #endif
  12972. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  12973. t = (sp_int_digit*)XMALLOC(sizeof(sp_int_digit) * (a->used * 2), NULL,
  12974. DYNAMIC_TYPE_BIGINT);
  12975. if (t == NULL) {
  12976. err = MP_MEM;
  12977. }
  12978. #endif
  12979. if ((err == MP_OKAY) && (a->used <= 1)) {
  12980. sp_int_digit l;
  12981. sp_int_digit h;
  12982. h = 0;
  12983. l = 0;
  12984. SP_ASM_SQR(h, l, a->dp[0]);
  12985. t[0] = h;
  12986. t[1] = l;
  12987. }
  12988. else if (err == MP_OKAY) {
  12989. sp_int_digit l;
  12990. sp_int_digit h;
  12991. sp_int_digit o;
  12992. h = 0;
  12993. l = 0;
  12994. SP_ASM_SQR(h, l, a->dp[0]);
  12995. t[0] = h;
  12996. h = 0;
  12997. o = 0;
  12998. for (k = 1; k < (a->used + 1) / 2; k++) {
  12999. i = k;
  13000. j = k - 1;
  13001. for (; (j >= 0); i++, j--) {
  13002. SP_ASM_MUL_ADD2(l, h, o, a->dp[i], a->dp[j]);
  13003. }
  13004. t[k * 2 - 1] = l;
  13005. l = h;
  13006. h = o;
  13007. o = 0;
  13008. SP_ASM_SQR_ADD(l, h, o, a->dp[k]);
  13009. i = k + 1;
  13010. j = k - 1;
  13011. for (; (j >= 0); i++, j--) {
  13012. SP_ASM_MUL_ADD2(l, h, o, a->dp[i], a->dp[j]);
  13013. }
  13014. t[k * 2] = l;
  13015. l = h;
  13016. h = o;
  13017. o = 0;
  13018. }
  13019. for (; k < a->used; k++) {
  13020. i = k;
  13021. j = k - 1;
  13022. for (; (i < a->used); i++, j--) {
  13023. SP_ASM_MUL_ADD2(l, h, o, a->dp[i], a->dp[j]);
  13024. }
  13025. t[k * 2 - 1] = l;
  13026. l = h;
  13027. h = o;
  13028. o = 0;
  13029. SP_ASM_SQR_ADD(l, h, o, a->dp[k]);
  13030. i = k + 1;
  13031. j = k - 1;
  13032. for (; (i < a->used); i++, j--) {
  13033. SP_ASM_MUL_ADD2(l, h, o, a->dp[i], a->dp[j]);
  13034. }
  13035. t[k * 2] = l;
  13036. l = h;
  13037. h = o;
  13038. o = 0;
  13039. }
  13040. t[k * 2 - 1] = l;
  13041. }
  13042. if (err == MP_OKAY) {
  13043. r->used = a->used * 2;
  13044. XMEMCPY(r->dp, t, r->used * sizeof(sp_int_digit));
  13045. sp_clamp(r);
  13046. }
  13047. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  13048. if (t != NULL) {
  13049. XFREE(t, NULL, DYNAMIC_TYPE_BIGINT);
  13050. }
  13051. #endif
  13052. return err;
  13053. }
  13054. #else /* !SQR_MUL_ASM */
  13055. /* Square a and store in r. r = a * a
  13056. *
  13057. * @param [in] a SP integer to square.
  13058. * @param [out] r SP integer result.
  13059. *
  13060. * @return MP_OKAY on success.
  13061. * @return MP_MEM when dynamic memory allocation fails.
  13062. */
  13063. static int _sp_sqr(const sp_int* a, sp_int* r)
  13064. {
  13065. int err = MP_OKAY;
  13066. int i;
  13067. int j;
  13068. int k;
  13069. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  13070. sp_int_digit* t = NULL;
  13071. #elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
  13072. defined(WOLFSSL_SP_SMALL) && !defined(WOLFSSL_SP_NO_DYN_STACK)
  13073. sp_int_digit t[a->used * 2];
  13074. #else
  13075. sp_int_digit t[SP_INT_DIGITS];
  13076. #endif
  13077. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  13078. t = (sp_int_digit*)XMALLOC(sizeof(sp_int_digit) * (a->used * 2), NULL,
  13079. DYNAMIC_TYPE_BIGINT);
  13080. if (t == NULL) {
  13081. err = MP_MEM;
  13082. }
  13083. #endif
  13084. if (err == MP_OKAY) {
  13085. sp_int_word w;
  13086. sp_int_word l;
  13087. sp_int_word h;
  13088. #ifdef SP_WORD_OVERFLOW
  13089. sp_int_word o;
  13090. #endif
  13091. w = (sp_int_word)a->dp[0] * a->dp[0];
  13092. t[0] = (sp_int_digit)w;
  13093. l = (sp_int_digit)(w >> SP_WORD_SIZE);
  13094. h = 0;
  13095. #ifdef SP_WORD_OVERFLOW
  13096. o = 0;
  13097. #endif
  13098. for (k = 1; k <= (a->used - 1) * 2; k++) {
  13099. i = k / 2;
  13100. j = k - i;
  13101. if (i == j) {
  13102. w = (sp_int_word)a->dp[i] * a->dp[j];
  13103. l += (sp_int_digit)w;
  13104. h += (sp_int_digit)(w >> SP_WORD_SIZE);
  13105. #ifdef SP_WORD_OVERFLOW
  13106. h += (sp_int_digit)(l >> SP_WORD_SIZE);
  13107. l &= SP_MASK;
  13108. o += (sp_int_digit)(h >> SP_WORD_SIZE);
  13109. h &= SP_MASK;
  13110. #endif
  13111. }
  13112. for (++i, --j; (i < a->used) && (j >= 0); i++, j--) {
  13113. w = (sp_int_word)a->dp[i] * a->dp[j];
  13114. l += (sp_int_digit)w;
  13115. h += (sp_int_digit)(w >> SP_WORD_SIZE);
  13116. #ifdef SP_WORD_OVERFLOW
  13117. h += (sp_int_digit)(l >> SP_WORD_SIZE);
  13118. l &= SP_MASK;
  13119. o += (sp_int_digit)(h >> SP_WORD_SIZE);
  13120. h &= SP_MASK;
  13121. #endif
  13122. l += (sp_int_digit)w;
  13123. h += (sp_int_digit)(w >> SP_WORD_SIZE);
  13124. #ifdef SP_WORD_OVERFLOW
  13125. h += (sp_int_digit)(l >> SP_WORD_SIZE);
  13126. l &= SP_MASK;
  13127. o += (sp_int_digit)(h >> SP_WORD_SIZE);
  13128. h &= SP_MASK;
  13129. #endif
  13130. }
  13131. t[k] = (sp_int_digit)l;
  13132. l >>= SP_WORD_SIZE;
  13133. l += (sp_int_digit)h;
  13134. h >>= SP_WORD_SIZE;
  13135. #ifdef SP_WORD_OVERFLOW
  13136. h += o & SP_MASK;
  13137. o >>= SP_WORD_SIZE;
  13138. #endif
  13139. }
  13140. t[k] = (sp_int_digit)l;
  13141. r->used = k + 1;
  13142. XMEMCPY(r->dp, t, r->used * sizeof(sp_int_digit));
  13143. sp_clamp(r);
  13144. }
  13145. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  13146. if (t != NULL) {
  13147. XFREE(t, NULL, DYNAMIC_TYPE_BIGINT);
  13148. }
  13149. #endif
  13150. return err;
  13151. }
  13152. #endif /* SQR_MUL_ASM */
  13153. #endif /* !WOLFSSL_SP_MATH || !WOLFSSL_SP_SMALL */
  13154. #ifndef WOLFSSL_SP_SMALL
  13155. #if !defined(WOLFSSL_HAVE_SP_ECC) && defined(HAVE_ECC)
  13156. #if SP_WORD_SIZE == 64
  13157. #ifndef SQR_MUL_ASM
  13158. /* Square a and store in r. r = a * a
  13159. *
  13160. * Long-hand implementation.
  13161. *
  13162. * @param [in] a SP integer to square.
  13163. * @param [out] r SP integer result.
  13164. *
  13165. * @return MP_OKAY on success.
  13166. * @return MP_MEM when dynamic memory allocation fails.
  13167. */
  13168. static int _sp_sqr_4(const sp_int* a, sp_int* r)
  13169. {
  13170. int err = MP_OKAY;
  13171. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  13172. sp_int_word* w = NULL;
  13173. #else
  13174. sp_int_word w[10];
  13175. #endif
  13176. const sp_int_digit* da = a->dp;
  13177. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  13178. w = (sp_int_word*)XMALLOC(sizeof(sp_int_word) * 10, NULL,
  13179. DYNAMIC_TYPE_BIGINT);
  13180. if (w == NULL) {
  13181. err = MP_MEM;
  13182. }
  13183. #endif
  13184. if (err == MP_OKAY) {
  13185. w[0] = (sp_int_word)da[0] * da[0];
  13186. w[1] = (sp_int_word)da[0] * da[1];
  13187. w[2] = (sp_int_word)da[0] * da[2];
  13188. w[3] = (sp_int_word)da[1] * da[1];
  13189. w[4] = (sp_int_word)da[0] * da[3];
  13190. w[5] = (sp_int_word)da[1] * da[2];
  13191. w[6] = (sp_int_word)da[1] * da[3];
  13192. w[7] = (sp_int_word)da[2] * da[2];
  13193. w[8] = (sp_int_word)da[2] * da[3];
  13194. w[9] = (sp_int_word)da[3] * da[3];
  13195. r->dp[0] = w[0];
  13196. w[0] >>= SP_WORD_SIZE;
  13197. w[0] += (sp_int_digit)w[1];
  13198. w[0] += (sp_int_digit)w[1];
  13199. r->dp[1] = w[0];
  13200. w[0] >>= SP_WORD_SIZE;
  13201. w[1] >>= SP_WORD_SIZE;
  13202. w[0] += (sp_int_digit)w[1];
  13203. w[0] += (sp_int_digit)w[1];
  13204. w[0] += (sp_int_digit)w[2];
  13205. w[0] += (sp_int_digit)w[2];
  13206. w[0] += (sp_int_digit)w[3];
  13207. r->dp[2] = w[0];
  13208. w[0] >>= SP_WORD_SIZE;
  13209. w[2] >>= SP_WORD_SIZE;
  13210. w[0] += (sp_int_digit)w[2];
  13211. w[0] += (sp_int_digit)w[2];
  13212. w[3] >>= SP_WORD_SIZE;
  13213. w[0] += (sp_int_digit)w[3];
  13214. w[0] += (sp_int_digit)w[4];
  13215. w[0] += (sp_int_digit)w[4];
  13216. w[0] += (sp_int_digit)w[5];
  13217. w[0] += (sp_int_digit)w[5];
  13218. r->dp[3] = w[0];
  13219. w[0] >>= SP_WORD_SIZE;
  13220. w[4] >>= SP_WORD_SIZE;
  13221. w[0] += (sp_int_digit)w[4];
  13222. w[0] += (sp_int_digit)w[4];
  13223. w[5] >>= SP_WORD_SIZE;
  13224. w[0] += (sp_int_digit)w[5];
  13225. w[0] += (sp_int_digit)w[5];
  13226. w[0] += (sp_int_digit)w[6];
  13227. w[0] += (sp_int_digit)w[6];
  13228. w[0] += (sp_int_digit)w[7];
  13229. r->dp[4] = w[0];
  13230. w[0] >>= SP_WORD_SIZE;
  13231. w[6] >>= SP_WORD_SIZE;
  13232. w[0] += (sp_int_digit)w[6];
  13233. w[0] += (sp_int_digit)w[6];
  13234. w[7] >>= SP_WORD_SIZE;
  13235. w[0] += (sp_int_digit)w[7];
  13236. w[0] += (sp_int_digit)w[8];
  13237. w[0] += (sp_int_digit)w[8];
  13238. r->dp[5] = w[0];
  13239. w[0] >>= SP_WORD_SIZE;
  13240. w[8] >>= SP_WORD_SIZE;
  13241. w[0] += (sp_int_digit)w[8];
  13242. w[0] += (sp_int_digit)w[8];
  13243. w[0] += (sp_int_digit)w[9];
  13244. r->dp[6] = w[0];
  13245. w[0] >>= SP_WORD_SIZE;
  13246. w[9] >>= SP_WORD_SIZE;
  13247. w[0] += (sp_int_digit)w[9];
  13248. r->dp[7] = w[0];
  13249. r->used = 8;
  13250. sp_clamp(r);
  13251. }
  13252. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  13253. if (w != NULL) {
  13254. XFREE(w, NULL, DYNAMIC_TYPE_BIGINT);
  13255. }
  13256. #endif
  13257. return err;
  13258. }
  13259. #else /* SQR_MUL_ASM */
  13260. /* Square a and store in r. r = a * a
  13261. *
  13262. * Comba implementation.
  13263. *
  13264. * @param [in] a SP integer to square.
  13265. * @param [out] r SP integer result.
  13266. *
  13267. * @return MP_OKAY on success.
  13268. * @return MP_MEM when dynamic memory allocation fails.
  13269. */
  13270. static int _sp_sqr_4(const sp_int* a, sp_int* r)
  13271. {
  13272. sp_int_digit l = 0;
  13273. sp_int_digit h = 0;
  13274. sp_int_digit o = 0;
  13275. sp_int_digit t[4];
  13276. SP_ASM_SQR(h, l, a->dp[0]);
  13277. t[0] = h;
  13278. h = 0;
  13279. SP_ASM_MUL_ADD2_NO(l, h, o, a->dp[0], a->dp[1]);
  13280. t[1] = l;
  13281. l = h;
  13282. h = o;
  13283. o = 0;
  13284. SP_ASM_MUL_ADD2_NO(l, h, o, a->dp[0], a->dp[2]);
  13285. SP_ASM_SQR_ADD(l, h, o, a->dp[1]);
  13286. t[2] = l;
  13287. l = h;
  13288. h = o;
  13289. o = 0;
  13290. SP_ASM_MUL_ADD2(l, h, o, a->dp[0], a->dp[3]);
  13291. SP_ASM_MUL_ADD2(l, h, o, a->dp[1], a->dp[2]);
  13292. t[3] = l;
  13293. l = h;
  13294. h = o;
  13295. o = 0;
  13296. SP_ASM_MUL_ADD2(l, h, o, a->dp[1], a->dp[3]);
  13297. SP_ASM_SQR_ADD(l, h, o, a->dp[2]);
  13298. r->dp[4] = l;
  13299. l = h;
  13300. h = o;
  13301. o = 0;
  13302. SP_ASM_MUL_ADD2(l, h, o, a->dp[2], a->dp[3]);
  13303. r->dp[5] = l;
  13304. l = h;
  13305. h = o;
  13306. SP_ASM_SQR_ADD_NO(l, h, a->dp[3]);
  13307. r->dp[6] = l;
  13308. r->dp[7] = h;
  13309. XMEMCPY(r->dp, t, 4 * sizeof(sp_int_digit));
  13310. r->used = 8;
  13311. sp_clamp(r);
  13312. return MP_OKAY;
  13313. }
  13314. #endif /* SQR_MUL_ASM */
  13315. #endif /* SP_WORD_SIZE == 64 */
  13316. #if SP_WORD_SIZE == 64
  13317. #ifdef SQR_MUL_ASM
  13318. /* Square a and store in r. r = a * a
  13319. *
  13320. * Comba implementation.
  13321. *
  13322. * @param [in] a SP integer to square.
  13323. * @param [out] r SP integer result.
  13324. *
  13325. * @return MP_OKAY on success.
  13326. * @return MP_MEM when dynamic memory allocation fails.
  13327. */
  13328. static int _sp_sqr_6(const sp_int* a, sp_int* r)
  13329. {
  13330. sp_int_digit l = 0;
  13331. sp_int_digit h = 0;
  13332. sp_int_digit o = 0;
  13333. sp_int_digit tl = 0;
  13334. sp_int_digit th = 0;
  13335. sp_int_digit to;
  13336. sp_int_digit t[6];
  13337. #if defined(WOLFSSL_SP_ARM_THUMB) && SP_WORD_SIZE == 32
  13338. to = 0;
  13339. #endif
  13340. SP_ASM_SQR(h, l, a->dp[0]);
  13341. t[0] = h;
  13342. h = 0;
  13343. SP_ASM_MUL_ADD2_NO(l, h, o, a->dp[0], a->dp[1]);
  13344. t[1] = l;
  13345. l = h;
  13346. h = o;
  13347. o = 0;
  13348. SP_ASM_MUL_ADD2_NO(l, h, o, a->dp[0], a->dp[2]);
  13349. SP_ASM_SQR_ADD(l, h, o, a->dp[1]);
  13350. t[2] = l;
  13351. l = h;
  13352. h = o;
  13353. o = 0;
  13354. SP_ASM_MUL_ADD2(l, h, o, a->dp[0], a->dp[3]);
  13355. SP_ASM_MUL_ADD2(l, h, o, a->dp[1], a->dp[2]);
  13356. t[3] = l;
  13357. l = h;
  13358. h = o;
  13359. o = 0;
  13360. SP_ASM_MUL_ADD2(l, h, o, a->dp[0], a->dp[4]);
  13361. SP_ASM_MUL_ADD2(l, h, o, a->dp[1], a->dp[3]);
  13362. SP_ASM_SQR_ADD(l, h, o, a->dp[2]);
  13363. t[4] = l;
  13364. l = h;
  13365. h = o;
  13366. o = 0;
  13367. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[5]);
  13368. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[4]);
  13369. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[3]);
  13370. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13371. t[5] = l;
  13372. l = h;
  13373. h = o;
  13374. o = 0;
  13375. SP_ASM_MUL_ADD2(l, h, o, a->dp[1], a->dp[5]);
  13376. SP_ASM_MUL_ADD2(l, h, o, a->dp[2], a->dp[4]);
  13377. SP_ASM_SQR_ADD(l, h, o, a->dp[3]);
  13378. r->dp[6] = l;
  13379. l = h;
  13380. h = o;
  13381. o = 0;
  13382. SP_ASM_MUL_ADD2(l, h, o, a->dp[2], a->dp[5]);
  13383. SP_ASM_MUL_ADD2(l, h, o, a->dp[3], a->dp[4]);
  13384. r->dp[7] = l;
  13385. l = h;
  13386. h = o;
  13387. o = 0;
  13388. SP_ASM_MUL_ADD2(l, h, o, a->dp[3], a->dp[5]);
  13389. SP_ASM_SQR_ADD(l, h, o, a->dp[4]);
  13390. r->dp[8] = l;
  13391. l = h;
  13392. h = o;
  13393. o = 0;
  13394. SP_ASM_MUL_ADD2(l, h, o, a->dp[4], a->dp[5]);
  13395. r->dp[9] = l;
  13396. l = h;
  13397. h = o;
  13398. SP_ASM_SQR_ADD_NO(l, h, a->dp[5]);
  13399. r->dp[10] = l;
  13400. r->dp[11] = h;
  13401. XMEMCPY(r->dp, t, 6 * sizeof(sp_int_digit));
  13402. r->used = 12;
  13403. sp_clamp(r);
  13404. return MP_OKAY;
  13405. }
  13406. #endif /* SQR_MUL_ASM */
  13407. #endif /* SP_WORD_SIZE == 64 */
  13408. #if SP_WORD_SIZE == 32
  13409. #ifdef SQR_MUL_ASM
  13410. /* Square a and store in r. r = a * a
  13411. *
  13412. * Comba implementation.
  13413. *
  13414. * @param [in] a SP integer to square.
  13415. * @param [out] r SP integer result.
  13416. *
  13417. * @return MP_OKAY on success.
  13418. * @return MP_MEM when dynamic memory allocation fails.
  13419. */
  13420. static int _sp_sqr_8(const sp_int* a, sp_int* r)
  13421. {
  13422. sp_int_digit l = 0;
  13423. sp_int_digit h = 0;
  13424. sp_int_digit o = 0;
  13425. sp_int_digit tl = 0;
  13426. sp_int_digit th = 0;
  13427. sp_int_digit to;
  13428. sp_int_digit t[8];
  13429. #if defined(WOLFSSL_SP_ARM_THUMB) && SP_WORD_SIZE == 32
  13430. to = 0;
  13431. #endif
  13432. SP_ASM_SQR(h, l, a->dp[0]);
  13433. t[0] = h;
  13434. h = 0;
  13435. SP_ASM_MUL_ADD2_NO(l, h, o, a->dp[0], a->dp[1]);
  13436. t[1] = l;
  13437. l = h;
  13438. h = o;
  13439. o = 0;
  13440. SP_ASM_MUL_ADD2_NO(l, h, o, a->dp[0], a->dp[2]);
  13441. SP_ASM_SQR_ADD(l, h, o, a->dp[1]);
  13442. t[2] = l;
  13443. l = h;
  13444. h = o;
  13445. o = 0;
  13446. SP_ASM_MUL_ADD2(l, h, o, a->dp[0], a->dp[3]);
  13447. SP_ASM_MUL_ADD2(l, h, o, a->dp[1], a->dp[2]);
  13448. t[3] = l;
  13449. l = h;
  13450. h = o;
  13451. o = 0;
  13452. SP_ASM_MUL_ADD2(l, h, o, a->dp[0], a->dp[4]);
  13453. SP_ASM_MUL_ADD2(l, h, o, a->dp[1], a->dp[3]);
  13454. SP_ASM_SQR_ADD(l, h, o, a->dp[2]);
  13455. t[4] = l;
  13456. l = h;
  13457. h = o;
  13458. o = 0;
  13459. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[5]);
  13460. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[4]);
  13461. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[3]);
  13462. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13463. t[5] = l;
  13464. l = h;
  13465. h = o;
  13466. o = 0;
  13467. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[6]);
  13468. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[5]);
  13469. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[4]);
  13470. SP_ASM_SQR_ADD(l, h, o, a->dp[3]);
  13471. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13472. t[6] = l;
  13473. l = h;
  13474. h = o;
  13475. o = 0;
  13476. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[7]);
  13477. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[6]);
  13478. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[5]);
  13479. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[4]);
  13480. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13481. t[7] = l;
  13482. l = h;
  13483. h = o;
  13484. o = 0;
  13485. SP_ASM_MUL_SET(tl, th, to, a->dp[1], a->dp[7]);
  13486. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[6]);
  13487. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[5]);
  13488. SP_ASM_SQR_ADD(l, h, o, a->dp[4]);
  13489. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13490. r->dp[8] = l;
  13491. l = h;
  13492. h = o;
  13493. o = 0;
  13494. SP_ASM_MUL_SET(tl, th, to, a->dp[2], a->dp[7]);
  13495. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[6]);
  13496. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[5]);
  13497. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13498. r->dp[9] = l;
  13499. l = h;
  13500. h = o;
  13501. o = 0;
  13502. SP_ASM_MUL_ADD2(l, h, o, a->dp[3], a->dp[7]);
  13503. SP_ASM_MUL_ADD2(l, h, o, a->dp[4], a->dp[6]);
  13504. SP_ASM_SQR_ADD(l, h, o, a->dp[5]);
  13505. r->dp[10] = l;
  13506. l = h;
  13507. h = o;
  13508. o = 0;
  13509. SP_ASM_MUL_ADD2(l, h, o, a->dp[4], a->dp[7]);
  13510. SP_ASM_MUL_ADD2(l, h, o, a->dp[5], a->dp[6]);
  13511. r->dp[11] = l;
  13512. l = h;
  13513. h = o;
  13514. o = 0;
  13515. SP_ASM_MUL_ADD2(l, h, o, a->dp[5], a->dp[7]);
  13516. SP_ASM_SQR_ADD(l, h, o, a->dp[6]);
  13517. r->dp[12] = l;
  13518. l = h;
  13519. h = o;
  13520. o = 0;
  13521. SP_ASM_MUL_ADD2(l, h, o, a->dp[6], a->dp[7]);
  13522. r->dp[13] = l;
  13523. l = h;
  13524. h = o;
  13525. SP_ASM_SQR_ADD_NO(l, h, a->dp[7]);
  13526. r->dp[14] = l;
  13527. r->dp[15] = h;
  13528. XMEMCPY(r->dp, t, 8 * sizeof(sp_int_digit));
  13529. r->used = 16;
  13530. sp_clamp(r);
  13531. return MP_OKAY;
  13532. }
  13533. #endif /* SQR_MUL_ASM */
  13534. #endif /* SP_WORD_SIZE == 32 */
  13535. #if SP_WORD_SIZE == 32
  13536. #ifdef SQR_MUL_ASM
  13537. /* Square a and store in r. r = a * a
  13538. *
  13539. * Comba implementation.
  13540. *
  13541. * @param [in] a SP integer to square.
  13542. * @param [out] r SP integer result.
  13543. *
  13544. * @return MP_OKAY on success.
  13545. * @return MP_MEM when dynamic memory allocation fails.
  13546. */
  13547. static int _sp_sqr_12(const sp_int* a, sp_int* r)
  13548. {
  13549. sp_int_digit l = 0;
  13550. sp_int_digit h = 0;
  13551. sp_int_digit o = 0;
  13552. sp_int_digit tl = 0;
  13553. sp_int_digit th = 0;
  13554. sp_int_digit to;
  13555. sp_int_digit t[12];
  13556. #if defined(WOLFSSL_SP_ARM_THUMB) && SP_WORD_SIZE == 32
  13557. to = 0;
  13558. #endif
  13559. SP_ASM_SQR(h, l, a->dp[0]);
  13560. t[0] = h;
  13561. h = 0;
  13562. SP_ASM_MUL_ADD2_NO(l, h, o, a->dp[0], a->dp[1]);
  13563. t[1] = l;
  13564. l = h;
  13565. h = o;
  13566. o = 0;
  13567. SP_ASM_MUL_ADD2_NO(l, h, o, a->dp[0], a->dp[2]);
  13568. SP_ASM_SQR_ADD(l, h, o, a->dp[1]);
  13569. t[2] = l;
  13570. l = h;
  13571. h = o;
  13572. o = 0;
  13573. SP_ASM_MUL_ADD2(l, h, o, a->dp[0], a->dp[3]);
  13574. SP_ASM_MUL_ADD2(l, h, o, a->dp[1], a->dp[2]);
  13575. t[3] = l;
  13576. l = h;
  13577. h = o;
  13578. o = 0;
  13579. SP_ASM_MUL_ADD2(l, h, o, a->dp[0], a->dp[4]);
  13580. SP_ASM_MUL_ADD2(l, h, o, a->dp[1], a->dp[3]);
  13581. SP_ASM_SQR_ADD(l, h, o, a->dp[2]);
  13582. t[4] = l;
  13583. l = h;
  13584. h = o;
  13585. o = 0;
  13586. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[5]);
  13587. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[4]);
  13588. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[3]);
  13589. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13590. t[5] = l;
  13591. l = h;
  13592. h = o;
  13593. o = 0;
  13594. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[6]);
  13595. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[5]);
  13596. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[4]);
  13597. SP_ASM_SQR_ADD(l, h, o, a->dp[3]);
  13598. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13599. t[6] = l;
  13600. l = h;
  13601. h = o;
  13602. o = 0;
  13603. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[7]);
  13604. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[6]);
  13605. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[5]);
  13606. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[4]);
  13607. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13608. t[7] = l;
  13609. l = h;
  13610. h = o;
  13611. o = 0;
  13612. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[8]);
  13613. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[7]);
  13614. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[6]);
  13615. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[5]);
  13616. SP_ASM_SQR_ADD(l, h, o, a->dp[4]);
  13617. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13618. t[8] = l;
  13619. l = h;
  13620. h = o;
  13621. o = 0;
  13622. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[9]);
  13623. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[8]);
  13624. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[7]);
  13625. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[6]);
  13626. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[5]);
  13627. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13628. t[9] = l;
  13629. l = h;
  13630. h = o;
  13631. o = 0;
  13632. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[10]);
  13633. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[9]);
  13634. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[8]);
  13635. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[7]);
  13636. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[6]);
  13637. SP_ASM_SQR_ADD(l, h, o, a->dp[5]);
  13638. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13639. t[10] = l;
  13640. l = h;
  13641. h = o;
  13642. o = 0;
  13643. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[11]);
  13644. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[10]);
  13645. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[9]);
  13646. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[8]);
  13647. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[7]);
  13648. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[6]);
  13649. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13650. t[11] = l;
  13651. l = h;
  13652. h = o;
  13653. o = 0;
  13654. SP_ASM_MUL_SET(tl, th, to, a->dp[1], a->dp[11]);
  13655. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[10]);
  13656. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[9]);
  13657. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[8]);
  13658. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[7]);
  13659. SP_ASM_SQR_ADD(l, h, o, a->dp[6]);
  13660. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13661. r->dp[12] = l;
  13662. l = h;
  13663. h = o;
  13664. o = 0;
  13665. SP_ASM_MUL_SET(tl, th, to, a->dp[2], a->dp[11]);
  13666. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[10]);
  13667. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[9]);
  13668. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[8]);
  13669. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[7]);
  13670. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13671. r->dp[13] = l;
  13672. l = h;
  13673. h = o;
  13674. o = 0;
  13675. SP_ASM_MUL_SET(tl, th, to, a->dp[3], a->dp[11]);
  13676. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[10]);
  13677. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[9]);
  13678. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[8]);
  13679. SP_ASM_SQR_ADD(l, h, o, a->dp[7]);
  13680. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13681. r->dp[14] = l;
  13682. l = h;
  13683. h = o;
  13684. o = 0;
  13685. SP_ASM_MUL_SET(tl, th, to, a->dp[4], a->dp[11]);
  13686. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[10]);
  13687. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[9]);
  13688. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[8]);
  13689. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13690. r->dp[15] = l;
  13691. l = h;
  13692. h = o;
  13693. o = 0;
  13694. SP_ASM_MUL_SET(tl, th, to, a->dp[5], a->dp[11]);
  13695. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[10]);
  13696. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[9]);
  13697. SP_ASM_SQR_ADD(l, h, o, a->dp[8]);
  13698. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13699. r->dp[16] = l;
  13700. l = h;
  13701. h = o;
  13702. o = 0;
  13703. SP_ASM_MUL_SET(tl, th, to, a->dp[6], a->dp[11]);
  13704. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[10]);
  13705. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[9]);
  13706. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13707. r->dp[17] = l;
  13708. l = h;
  13709. h = o;
  13710. o = 0;
  13711. SP_ASM_MUL_ADD2(l, h, o, a->dp[7], a->dp[11]);
  13712. SP_ASM_MUL_ADD2(l, h, o, a->dp[8], a->dp[10]);
  13713. SP_ASM_SQR_ADD(l, h, o, a->dp[9]);
  13714. r->dp[18] = l;
  13715. l = h;
  13716. h = o;
  13717. o = 0;
  13718. SP_ASM_MUL_ADD2(l, h, o, a->dp[8], a->dp[11]);
  13719. SP_ASM_MUL_ADD2(l, h, o, a->dp[9], a->dp[10]);
  13720. r->dp[19] = l;
  13721. l = h;
  13722. h = o;
  13723. o = 0;
  13724. SP_ASM_MUL_ADD2(l, h, o, a->dp[9], a->dp[11]);
  13725. SP_ASM_SQR_ADD(l, h, o, a->dp[10]);
  13726. r->dp[20] = l;
  13727. l = h;
  13728. h = o;
  13729. o = 0;
  13730. SP_ASM_MUL_ADD2(l, h, o, a->dp[10], a->dp[11]);
  13731. r->dp[21] = l;
  13732. l = h;
  13733. h = o;
  13734. SP_ASM_SQR_ADD_NO(l, h, a->dp[11]);
  13735. r->dp[22] = l;
  13736. r->dp[23] = h;
  13737. XMEMCPY(r->dp, t, 12 * sizeof(sp_int_digit));
  13738. r->used = 24;
  13739. sp_clamp(r);
  13740. return MP_OKAY;
  13741. }
  13742. #endif /* SQR_MUL_ASM */
  13743. #endif /* SP_WORD_SIZE == 32 */
  13744. #endif /* !WOLFSSL_HAVE_SP_ECC && HAVE_ECC */
  13745. #if defined(SQR_MUL_ASM) && (defined(WOLFSSL_SP_INT_LARGE_COMBA) || \
  13746. (!defined(WOLFSSL_SP_MATH) && defined(WOLFCRYPT_HAVE_SAKKE) && \
  13747. (SP_WORD_SIZE == 64)))
  13748. #if SP_INT_DIGITS >= 32
  13749. /* Square a and store in r. r = a * a
  13750. *
  13751. * Comba implementation.
  13752. *
  13753. * @param [in] a SP integer to square.
  13754. * @param [out] r SP integer result.
  13755. *
  13756. * @return MP_OKAY on success.
  13757. * @return MP_MEM when dynamic memory allocation fails.
  13758. */
  13759. static int _sp_sqr_16(const sp_int* a, sp_int* r)
  13760. {
  13761. int err = MP_OKAY;
  13762. sp_int_digit l = 0;
  13763. sp_int_digit h = 0;
  13764. sp_int_digit o = 0;
  13765. sp_int_digit tl = 0;
  13766. sp_int_digit th = 0;
  13767. sp_int_digit to;
  13768. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  13769. sp_int_digit* t = NULL;
  13770. #else
  13771. sp_int_digit t[16];
  13772. #endif
  13773. #if defined(WOLFSSL_SP_ARM_THUMB) && SP_WORD_SIZE == 32
  13774. to = 0;
  13775. #endif
  13776. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  13777. t = (sp_int_digit*)XMALLOC(sizeof(sp_int_digit) * 16, NULL,
  13778. DYNAMIC_TYPE_BIGINT);
  13779. if (t == NULL) {
  13780. err = MP_MEM;
  13781. }
  13782. #endif
  13783. if (err == MP_OKAY) {
  13784. SP_ASM_SQR(h, l, a->dp[0]);
  13785. t[0] = h;
  13786. h = 0;
  13787. SP_ASM_MUL_ADD2_NO(l, h, o, a->dp[0], a->dp[1]);
  13788. t[1] = l;
  13789. l = h;
  13790. h = o;
  13791. o = 0;
  13792. SP_ASM_MUL_ADD2_NO(l, h, o, a->dp[0], a->dp[2]);
  13793. SP_ASM_SQR_ADD(l, h, o, a->dp[1]);
  13794. t[2] = l;
  13795. l = h;
  13796. h = o;
  13797. o = 0;
  13798. SP_ASM_MUL_ADD2(l, h, o, a->dp[0], a->dp[3]);
  13799. SP_ASM_MUL_ADD2(l, h, o, a->dp[1], a->dp[2]);
  13800. t[3] = l;
  13801. l = h;
  13802. h = o;
  13803. o = 0;
  13804. SP_ASM_MUL_ADD2(l, h, o, a->dp[0], a->dp[4]);
  13805. SP_ASM_MUL_ADD2(l, h, o, a->dp[1], a->dp[3]);
  13806. SP_ASM_SQR_ADD(l, h, o, a->dp[2]);
  13807. t[4] = l;
  13808. l = h;
  13809. h = o;
  13810. o = 0;
  13811. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[5]);
  13812. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[4]);
  13813. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[3]);
  13814. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13815. t[5] = l;
  13816. l = h;
  13817. h = o;
  13818. o = 0;
  13819. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[6]);
  13820. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[5]);
  13821. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[4]);
  13822. SP_ASM_SQR_ADD(l, h, o, a->dp[3]);
  13823. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13824. t[6] = l;
  13825. l = h;
  13826. h = o;
  13827. o = 0;
  13828. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[7]);
  13829. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[6]);
  13830. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[5]);
  13831. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[4]);
  13832. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13833. t[7] = l;
  13834. l = h;
  13835. h = o;
  13836. o = 0;
  13837. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[8]);
  13838. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[7]);
  13839. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[6]);
  13840. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[5]);
  13841. SP_ASM_SQR_ADD(l, h, o, a->dp[4]);
  13842. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13843. t[8] = l;
  13844. l = h;
  13845. h = o;
  13846. o = 0;
  13847. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[9]);
  13848. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[8]);
  13849. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[7]);
  13850. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[6]);
  13851. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[5]);
  13852. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13853. t[9] = l;
  13854. l = h;
  13855. h = o;
  13856. o = 0;
  13857. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[10]);
  13858. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[9]);
  13859. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[8]);
  13860. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[7]);
  13861. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[6]);
  13862. SP_ASM_SQR_ADD(l, h, o, a->dp[5]);
  13863. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13864. t[10] = l;
  13865. l = h;
  13866. h = o;
  13867. o = 0;
  13868. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[11]);
  13869. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[10]);
  13870. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[9]);
  13871. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[8]);
  13872. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[7]);
  13873. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[6]);
  13874. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13875. t[11] = l;
  13876. l = h;
  13877. h = o;
  13878. o = 0;
  13879. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[12]);
  13880. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[11]);
  13881. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[10]);
  13882. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[9]);
  13883. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[8]);
  13884. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[7]);
  13885. SP_ASM_SQR_ADD(l, h, o, a->dp[6]);
  13886. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13887. t[12] = l;
  13888. l = h;
  13889. h = o;
  13890. o = 0;
  13891. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[13]);
  13892. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[12]);
  13893. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[11]);
  13894. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[10]);
  13895. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[9]);
  13896. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[8]);
  13897. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[7]);
  13898. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13899. t[13] = l;
  13900. l = h;
  13901. h = o;
  13902. o = 0;
  13903. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[14]);
  13904. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[13]);
  13905. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[12]);
  13906. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[11]);
  13907. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[10]);
  13908. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[9]);
  13909. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[8]);
  13910. SP_ASM_SQR_ADD(l, h, o, a->dp[7]);
  13911. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13912. t[14] = l;
  13913. l = h;
  13914. h = o;
  13915. o = 0;
  13916. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[15]);
  13917. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[14]);
  13918. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[13]);
  13919. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[12]);
  13920. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[11]);
  13921. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[10]);
  13922. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[9]);
  13923. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[8]);
  13924. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13925. t[15] = l;
  13926. l = h;
  13927. h = o;
  13928. o = 0;
  13929. SP_ASM_MUL_SET(tl, th, to, a->dp[1], a->dp[15]);
  13930. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[14]);
  13931. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[13]);
  13932. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[12]);
  13933. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[11]);
  13934. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[10]);
  13935. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[9]);
  13936. SP_ASM_SQR_ADD(l, h, o, a->dp[8]);
  13937. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13938. r->dp[16] = l;
  13939. l = h;
  13940. h = o;
  13941. o = 0;
  13942. SP_ASM_MUL_SET(tl, th, to, a->dp[2], a->dp[15]);
  13943. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[14]);
  13944. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[13]);
  13945. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[12]);
  13946. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[11]);
  13947. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[10]);
  13948. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[9]);
  13949. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13950. r->dp[17] = l;
  13951. l = h;
  13952. h = o;
  13953. o = 0;
  13954. SP_ASM_MUL_SET(tl, th, to, a->dp[3], a->dp[15]);
  13955. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[14]);
  13956. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[13]);
  13957. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[12]);
  13958. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[11]);
  13959. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[10]);
  13960. SP_ASM_SQR_ADD(l, h, o, a->dp[9]);
  13961. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13962. r->dp[18] = l;
  13963. l = h;
  13964. h = o;
  13965. o = 0;
  13966. SP_ASM_MUL_SET(tl, th, to, a->dp[4], a->dp[15]);
  13967. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[14]);
  13968. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[13]);
  13969. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[12]);
  13970. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[11]);
  13971. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[10]);
  13972. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13973. r->dp[19] = l;
  13974. l = h;
  13975. h = o;
  13976. o = 0;
  13977. SP_ASM_MUL_SET(tl, th, to, a->dp[5], a->dp[15]);
  13978. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[14]);
  13979. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[13]);
  13980. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[12]);
  13981. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[11]);
  13982. SP_ASM_SQR_ADD(l, h, o, a->dp[10]);
  13983. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13984. r->dp[20] = l;
  13985. l = h;
  13986. h = o;
  13987. o = 0;
  13988. SP_ASM_MUL_SET(tl, th, to, a->dp[6], a->dp[15]);
  13989. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[14]);
  13990. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[13]);
  13991. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[12]);
  13992. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[11]);
  13993. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  13994. r->dp[21] = l;
  13995. l = h;
  13996. h = o;
  13997. o = 0;
  13998. SP_ASM_MUL_SET(tl, th, to, a->dp[7], a->dp[15]);
  13999. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[14]);
  14000. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[13]);
  14001. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[12]);
  14002. SP_ASM_SQR_ADD(l, h, o, a->dp[11]);
  14003. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14004. r->dp[22] = l;
  14005. l = h;
  14006. h = o;
  14007. o = 0;
  14008. SP_ASM_MUL_SET(tl, th, to, a->dp[8], a->dp[15]);
  14009. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[14]);
  14010. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[13]);
  14011. SP_ASM_MUL_ADD(tl, th, to, a->dp[11], a->dp[12]);
  14012. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14013. r->dp[23] = l;
  14014. l = h;
  14015. h = o;
  14016. o = 0;
  14017. SP_ASM_MUL_SET(tl, th, to, a->dp[9], a->dp[15]);
  14018. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[14]);
  14019. SP_ASM_MUL_ADD(tl, th, to, a->dp[11], a->dp[13]);
  14020. SP_ASM_SQR_ADD(l, h, o, a->dp[12]);
  14021. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14022. r->dp[24] = l;
  14023. l = h;
  14024. h = o;
  14025. o = 0;
  14026. SP_ASM_MUL_SET(tl, th, to, a->dp[10], a->dp[15]);
  14027. SP_ASM_MUL_ADD(tl, th, to, a->dp[11], a->dp[14]);
  14028. SP_ASM_MUL_ADD(tl, th, to, a->dp[12], a->dp[13]);
  14029. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14030. r->dp[25] = l;
  14031. l = h;
  14032. h = o;
  14033. o = 0;
  14034. SP_ASM_MUL_ADD2(l, h, o, a->dp[11], a->dp[15]);
  14035. SP_ASM_MUL_ADD2(l, h, o, a->dp[12], a->dp[14]);
  14036. SP_ASM_SQR_ADD(l, h, o, a->dp[13]);
  14037. r->dp[26] = l;
  14038. l = h;
  14039. h = o;
  14040. o = 0;
  14041. SP_ASM_MUL_ADD2(l, h, o, a->dp[12], a->dp[15]);
  14042. SP_ASM_MUL_ADD2(l, h, o, a->dp[13], a->dp[14]);
  14043. r->dp[27] = l;
  14044. l = h;
  14045. h = o;
  14046. o = 0;
  14047. SP_ASM_MUL_ADD2(l, h, o, a->dp[13], a->dp[15]);
  14048. SP_ASM_SQR_ADD(l, h, o, a->dp[14]);
  14049. r->dp[28] = l;
  14050. l = h;
  14051. h = o;
  14052. o = 0;
  14053. SP_ASM_MUL_ADD2(l, h, o, a->dp[14], a->dp[15]);
  14054. r->dp[29] = l;
  14055. l = h;
  14056. h = o;
  14057. SP_ASM_SQR_ADD_NO(l, h, a->dp[15]);
  14058. r->dp[30] = l;
  14059. r->dp[31] = h;
  14060. XMEMCPY(r->dp, t, 16 * sizeof(sp_int_digit));
  14061. r->used = 32;
  14062. sp_clamp(r);
  14063. }
  14064. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  14065. if (t != NULL) {
  14066. XFREE(t, NULL, DYNAMIC_TYPE_BIGINT);
  14067. }
  14068. #endif
  14069. return err;
  14070. }
  14071. #endif /* SP_INT_DIGITS >= 32 */
  14072. #endif /* SQR_MUL_ASM && (WOLFSSL_SP_INT_LARGE_COMBA || !WOLFSSL_SP_MATH &&
  14073. * WOLFCRYPT_HAVE_SAKKE && SP_WORD_SIZE == 64 */
  14074. #if defined(SQR_MUL_ASM) && defined(WOLFSSL_SP_INT_LARGE_COMBA)
  14075. #if SP_INT_DIGITS >= 48
  14076. /* Square a and store in r. r = a * a
  14077. *
  14078. * Comba implementation.
  14079. *
  14080. * @param [in] a SP integer to square.
  14081. * @param [out] r SP integer result.
  14082. *
  14083. * @return MP_OKAY on success.
  14084. * @return MP_MEM when dynamic memory allocation fails.
  14085. */
  14086. static int _sp_sqr_24(const sp_int* a, sp_int* r)
  14087. {
  14088. int err = MP_OKAY;
  14089. sp_int_digit l = 0;
  14090. sp_int_digit h = 0;
  14091. sp_int_digit o = 0;
  14092. sp_int_digit tl = 0;
  14093. sp_int_digit th = 0;
  14094. sp_int_digit to;
  14095. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  14096. sp_int_digit* t = NULL;
  14097. #else
  14098. sp_int_digit t[24];
  14099. #endif
  14100. #if defined(WOLFSSL_SP_ARM_THUMB) && SP_WORD_SIZE == 32
  14101. to = 0;
  14102. #endif
  14103. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  14104. t = (sp_int_digit*)XMALLOC(sizeof(sp_int_digit) * 24, NULL,
  14105. DYNAMIC_TYPE_BIGINT);
  14106. if (t == NULL) {
  14107. err = MP_MEM;
  14108. }
  14109. #endif
  14110. if (err == MP_OKAY) {
  14111. SP_ASM_SQR(h, l, a->dp[0]);
  14112. t[0] = h;
  14113. h = 0;
  14114. SP_ASM_MUL_ADD2_NO(l, h, o, a->dp[0], a->dp[1]);
  14115. t[1] = l;
  14116. l = h;
  14117. h = o;
  14118. o = 0;
  14119. SP_ASM_MUL_ADD2_NO(l, h, o, a->dp[0], a->dp[2]);
  14120. SP_ASM_SQR_ADD(l, h, o, a->dp[1]);
  14121. t[2] = l;
  14122. l = h;
  14123. h = o;
  14124. o = 0;
  14125. SP_ASM_MUL_ADD2(l, h, o, a->dp[0], a->dp[3]);
  14126. SP_ASM_MUL_ADD2(l, h, o, a->dp[1], a->dp[2]);
  14127. t[3] = l;
  14128. l = h;
  14129. h = o;
  14130. o = 0;
  14131. SP_ASM_MUL_ADD2(l, h, o, a->dp[0], a->dp[4]);
  14132. SP_ASM_MUL_ADD2(l, h, o, a->dp[1], a->dp[3]);
  14133. SP_ASM_SQR_ADD(l, h, o, a->dp[2]);
  14134. t[4] = l;
  14135. l = h;
  14136. h = o;
  14137. o = 0;
  14138. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[5]);
  14139. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[4]);
  14140. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[3]);
  14141. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14142. t[5] = l;
  14143. l = h;
  14144. h = o;
  14145. o = 0;
  14146. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[6]);
  14147. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[5]);
  14148. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[4]);
  14149. SP_ASM_SQR_ADD(l, h, o, a->dp[3]);
  14150. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14151. t[6] = l;
  14152. l = h;
  14153. h = o;
  14154. o = 0;
  14155. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[7]);
  14156. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[6]);
  14157. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[5]);
  14158. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[4]);
  14159. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14160. t[7] = l;
  14161. l = h;
  14162. h = o;
  14163. o = 0;
  14164. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[8]);
  14165. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[7]);
  14166. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[6]);
  14167. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[5]);
  14168. SP_ASM_SQR_ADD(l, h, o, a->dp[4]);
  14169. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14170. t[8] = l;
  14171. l = h;
  14172. h = o;
  14173. o = 0;
  14174. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[9]);
  14175. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[8]);
  14176. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[7]);
  14177. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[6]);
  14178. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[5]);
  14179. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14180. t[9] = l;
  14181. l = h;
  14182. h = o;
  14183. o = 0;
  14184. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[10]);
  14185. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[9]);
  14186. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[8]);
  14187. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[7]);
  14188. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[6]);
  14189. SP_ASM_SQR_ADD(l, h, o, a->dp[5]);
  14190. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14191. t[10] = l;
  14192. l = h;
  14193. h = o;
  14194. o = 0;
  14195. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[11]);
  14196. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[10]);
  14197. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[9]);
  14198. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[8]);
  14199. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[7]);
  14200. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[6]);
  14201. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14202. t[11] = l;
  14203. l = h;
  14204. h = o;
  14205. o = 0;
  14206. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[12]);
  14207. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[11]);
  14208. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[10]);
  14209. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[9]);
  14210. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[8]);
  14211. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[7]);
  14212. SP_ASM_SQR_ADD(l, h, o, a->dp[6]);
  14213. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14214. t[12] = l;
  14215. l = h;
  14216. h = o;
  14217. o = 0;
  14218. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[13]);
  14219. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[12]);
  14220. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[11]);
  14221. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[10]);
  14222. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[9]);
  14223. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[8]);
  14224. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[7]);
  14225. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14226. t[13] = l;
  14227. l = h;
  14228. h = o;
  14229. o = 0;
  14230. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[14]);
  14231. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[13]);
  14232. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[12]);
  14233. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[11]);
  14234. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[10]);
  14235. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[9]);
  14236. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[8]);
  14237. SP_ASM_SQR_ADD(l, h, o, a->dp[7]);
  14238. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14239. t[14] = l;
  14240. l = h;
  14241. h = o;
  14242. o = 0;
  14243. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[15]);
  14244. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[14]);
  14245. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[13]);
  14246. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[12]);
  14247. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[11]);
  14248. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[10]);
  14249. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[9]);
  14250. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[8]);
  14251. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14252. t[15] = l;
  14253. l = h;
  14254. h = o;
  14255. o = 0;
  14256. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[16]);
  14257. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[15]);
  14258. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[14]);
  14259. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[13]);
  14260. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[12]);
  14261. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[11]);
  14262. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[10]);
  14263. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[9]);
  14264. SP_ASM_SQR_ADD(l, h, o, a->dp[8]);
  14265. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14266. t[16] = l;
  14267. l = h;
  14268. h = o;
  14269. o = 0;
  14270. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[17]);
  14271. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[16]);
  14272. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[15]);
  14273. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[14]);
  14274. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[13]);
  14275. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[12]);
  14276. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[11]);
  14277. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[10]);
  14278. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[9]);
  14279. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14280. t[17] = l;
  14281. l = h;
  14282. h = o;
  14283. o = 0;
  14284. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[18]);
  14285. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[17]);
  14286. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[16]);
  14287. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[15]);
  14288. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[14]);
  14289. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[13]);
  14290. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[12]);
  14291. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[11]);
  14292. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[10]);
  14293. SP_ASM_SQR_ADD(l, h, o, a->dp[9]);
  14294. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14295. t[18] = l;
  14296. l = h;
  14297. h = o;
  14298. o = 0;
  14299. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[19]);
  14300. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[18]);
  14301. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[17]);
  14302. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[16]);
  14303. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[15]);
  14304. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[14]);
  14305. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[13]);
  14306. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[12]);
  14307. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[11]);
  14308. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[10]);
  14309. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14310. t[19] = l;
  14311. l = h;
  14312. h = o;
  14313. o = 0;
  14314. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[20]);
  14315. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[19]);
  14316. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[18]);
  14317. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[17]);
  14318. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[16]);
  14319. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[15]);
  14320. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[14]);
  14321. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[13]);
  14322. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[12]);
  14323. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[11]);
  14324. SP_ASM_SQR_ADD(l, h, o, a->dp[10]);
  14325. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14326. t[20] = l;
  14327. l = h;
  14328. h = o;
  14329. o = 0;
  14330. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[21]);
  14331. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[20]);
  14332. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[19]);
  14333. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[18]);
  14334. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[17]);
  14335. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[16]);
  14336. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[15]);
  14337. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[14]);
  14338. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[13]);
  14339. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[12]);
  14340. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[11]);
  14341. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14342. t[21] = l;
  14343. l = h;
  14344. h = o;
  14345. o = 0;
  14346. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[22]);
  14347. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[21]);
  14348. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[20]);
  14349. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[19]);
  14350. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[18]);
  14351. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[17]);
  14352. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[16]);
  14353. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[15]);
  14354. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[14]);
  14355. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[13]);
  14356. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[12]);
  14357. SP_ASM_SQR_ADD(l, h, o, a->dp[11]);
  14358. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14359. t[22] = l;
  14360. l = h;
  14361. h = o;
  14362. o = 0;
  14363. SP_ASM_MUL_SET(tl, th, to, a->dp[0], a->dp[23]);
  14364. SP_ASM_MUL_ADD(tl, th, to, a->dp[1], a->dp[22]);
  14365. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[21]);
  14366. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[20]);
  14367. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[19]);
  14368. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[18]);
  14369. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[17]);
  14370. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[16]);
  14371. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[15]);
  14372. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[14]);
  14373. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[13]);
  14374. SP_ASM_MUL_ADD(tl, th, to, a->dp[11], a->dp[12]);
  14375. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14376. t[23] = l;
  14377. l = h;
  14378. h = o;
  14379. o = 0;
  14380. SP_ASM_MUL_SET(tl, th, to, a->dp[1], a->dp[23]);
  14381. SP_ASM_MUL_ADD(tl, th, to, a->dp[2], a->dp[22]);
  14382. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[21]);
  14383. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[20]);
  14384. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[19]);
  14385. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[18]);
  14386. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[17]);
  14387. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[16]);
  14388. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[15]);
  14389. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[14]);
  14390. SP_ASM_MUL_ADD(tl, th, to, a->dp[11], a->dp[13]);
  14391. SP_ASM_SQR_ADD(l, h, o, a->dp[12]);
  14392. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14393. r->dp[24] = l;
  14394. l = h;
  14395. h = o;
  14396. o = 0;
  14397. SP_ASM_MUL_SET(tl, th, to, a->dp[2], a->dp[23]);
  14398. SP_ASM_MUL_ADD(tl, th, to, a->dp[3], a->dp[22]);
  14399. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[21]);
  14400. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[20]);
  14401. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[19]);
  14402. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[18]);
  14403. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[17]);
  14404. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[16]);
  14405. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[15]);
  14406. SP_ASM_MUL_ADD(tl, th, to, a->dp[11], a->dp[14]);
  14407. SP_ASM_MUL_ADD(tl, th, to, a->dp[12], a->dp[13]);
  14408. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14409. r->dp[25] = l;
  14410. l = h;
  14411. h = o;
  14412. o = 0;
  14413. SP_ASM_MUL_SET(tl, th, to, a->dp[3], a->dp[23]);
  14414. SP_ASM_MUL_ADD(tl, th, to, a->dp[4], a->dp[22]);
  14415. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[21]);
  14416. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[20]);
  14417. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[19]);
  14418. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[18]);
  14419. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[17]);
  14420. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[16]);
  14421. SP_ASM_MUL_ADD(tl, th, to, a->dp[11], a->dp[15]);
  14422. SP_ASM_MUL_ADD(tl, th, to, a->dp[12], a->dp[14]);
  14423. SP_ASM_SQR_ADD(l, h, o, a->dp[13]);
  14424. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14425. r->dp[26] = l;
  14426. l = h;
  14427. h = o;
  14428. o = 0;
  14429. SP_ASM_MUL_SET(tl, th, to, a->dp[4], a->dp[23]);
  14430. SP_ASM_MUL_ADD(tl, th, to, a->dp[5], a->dp[22]);
  14431. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[21]);
  14432. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[20]);
  14433. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[19]);
  14434. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[18]);
  14435. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[17]);
  14436. SP_ASM_MUL_ADD(tl, th, to, a->dp[11], a->dp[16]);
  14437. SP_ASM_MUL_ADD(tl, th, to, a->dp[12], a->dp[15]);
  14438. SP_ASM_MUL_ADD(tl, th, to, a->dp[13], a->dp[14]);
  14439. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14440. r->dp[27] = l;
  14441. l = h;
  14442. h = o;
  14443. o = 0;
  14444. SP_ASM_MUL_SET(tl, th, to, a->dp[5], a->dp[23]);
  14445. SP_ASM_MUL_ADD(tl, th, to, a->dp[6], a->dp[22]);
  14446. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[21]);
  14447. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[20]);
  14448. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[19]);
  14449. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[18]);
  14450. SP_ASM_MUL_ADD(tl, th, to, a->dp[11], a->dp[17]);
  14451. SP_ASM_MUL_ADD(tl, th, to, a->dp[12], a->dp[16]);
  14452. SP_ASM_MUL_ADD(tl, th, to, a->dp[13], a->dp[15]);
  14453. SP_ASM_SQR_ADD(l, h, o, a->dp[14]);
  14454. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14455. r->dp[28] = l;
  14456. l = h;
  14457. h = o;
  14458. o = 0;
  14459. SP_ASM_MUL_SET(tl, th, to, a->dp[6], a->dp[23]);
  14460. SP_ASM_MUL_ADD(tl, th, to, a->dp[7], a->dp[22]);
  14461. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[21]);
  14462. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[20]);
  14463. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[19]);
  14464. SP_ASM_MUL_ADD(tl, th, to, a->dp[11], a->dp[18]);
  14465. SP_ASM_MUL_ADD(tl, th, to, a->dp[12], a->dp[17]);
  14466. SP_ASM_MUL_ADD(tl, th, to, a->dp[13], a->dp[16]);
  14467. SP_ASM_MUL_ADD(tl, th, to, a->dp[14], a->dp[15]);
  14468. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14469. r->dp[29] = l;
  14470. l = h;
  14471. h = o;
  14472. o = 0;
  14473. SP_ASM_MUL_SET(tl, th, to, a->dp[7], a->dp[23]);
  14474. SP_ASM_MUL_ADD(tl, th, to, a->dp[8], a->dp[22]);
  14475. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[21]);
  14476. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[20]);
  14477. SP_ASM_MUL_ADD(tl, th, to, a->dp[11], a->dp[19]);
  14478. SP_ASM_MUL_ADD(tl, th, to, a->dp[12], a->dp[18]);
  14479. SP_ASM_MUL_ADD(tl, th, to, a->dp[13], a->dp[17]);
  14480. SP_ASM_MUL_ADD(tl, th, to, a->dp[14], a->dp[16]);
  14481. SP_ASM_SQR_ADD(l, h, o, a->dp[15]);
  14482. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14483. r->dp[30] = l;
  14484. l = h;
  14485. h = o;
  14486. o = 0;
  14487. SP_ASM_MUL_SET(tl, th, to, a->dp[8], a->dp[23]);
  14488. SP_ASM_MUL_ADD(tl, th, to, a->dp[9], a->dp[22]);
  14489. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[21]);
  14490. SP_ASM_MUL_ADD(tl, th, to, a->dp[11], a->dp[20]);
  14491. SP_ASM_MUL_ADD(tl, th, to, a->dp[12], a->dp[19]);
  14492. SP_ASM_MUL_ADD(tl, th, to, a->dp[13], a->dp[18]);
  14493. SP_ASM_MUL_ADD(tl, th, to, a->dp[14], a->dp[17]);
  14494. SP_ASM_MUL_ADD(tl, th, to, a->dp[15], a->dp[16]);
  14495. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14496. r->dp[31] = l;
  14497. l = h;
  14498. h = o;
  14499. o = 0;
  14500. SP_ASM_MUL_SET(tl, th, to, a->dp[9], a->dp[23]);
  14501. SP_ASM_MUL_ADD(tl, th, to, a->dp[10], a->dp[22]);
  14502. SP_ASM_MUL_ADD(tl, th, to, a->dp[11], a->dp[21]);
  14503. SP_ASM_MUL_ADD(tl, th, to, a->dp[12], a->dp[20]);
  14504. SP_ASM_MUL_ADD(tl, th, to, a->dp[13], a->dp[19]);
  14505. SP_ASM_MUL_ADD(tl, th, to, a->dp[14], a->dp[18]);
  14506. SP_ASM_MUL_ADD(tl, th, to, a->dp[15], a->dp[17]);
  14507. SP_ASM_SQR_ADD(l, h, o, a->dp[16]);
  14508. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14509. r->dp[32] = l;
  14510. l = h;
  14511. h = o;
  14512. o = 0;
  14513. SP_ASM_MUL_SET(tl, th, to, a->dp[10], a->dp[23]);
  14514. SP_ASM_MUL_ADD(tl, th, to, a->dp[11], a->dp[22]);
  14515. SP_ASM_MUL_ADD(tl, th, to, a->dp[12], a->dp[21]);
  14516. SP_ASM_MUL_ADD(tl, th, to, a->dp[13], a->dp[20]);
  14517. SP_ASM_MUL_ADD(tl, th, to, a->dp[14], a->dp[19]);
  14518. SP_ASM_MUL_ADD(tl, th, to, a->dp[15], a->dp[18]);
  14519. SP_ASM_MUL_ADD(tl, th, to, a->dp[16], a->dp[17]);
  14520. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14521. r->dp[33] = l;
  14522. l = h;
  14523. h = o;
  14524. o = 0;
  14525. SP_ASM_MUL_SET(tl, th, to, a->dp[11], a->dp[23]);
  14526. SP_ASM_MUL_ADD(tl, th, to, a->dp[12], a->dp[22]);
  14527. SP_ASM_MUL_ADD(tl, th, to, a->dp[13], a->dp[21]);
  14528. SP_ASM_MUL_ADD(tl, th, to, a->dp[14], a->dp[20]);
  14529. SP_ASM_MUL_ADD(tl, th, to, a->dp[15], a->dp[19]);
  14530. SP_ASM_MUL_ADD(tl, th, to, a->dp[16], a->dp[18]);
  14531. SP_ASM_SQR_ADD(l, h, o, a->dp[17]);
  14532. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14533. r->dp[34] = l;
  14534. l = h;
  14535. h = o;
  14536. o = 0;
  14537. SP_ASM_MUL_SET(tl, th, to, a->dp[12], a->dp[23]);
  14538. SP_ASM_MUL_ADD(tl, th, to, a->dp[13], a->dp[22]);
  14539. SP_ASM_MUL_ADD(tl, th, to, a->dp[14], a->dp[21]);
  14540. SP_ASM_MUL_ADD(tl, th, to, a->dp[15], a->dp[20]);
  14541. SP_ASM_MUL_ADD(tl, th, to, a->dp[16], a->dp[19]);
  14542. SP_ASM_MUL_ADD(tl, th, to, a->dp[17], a->dp[18]);
  14543. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14544. r->dp[35] = l;
  14545. l = h;
  14546. h = o;
  14547. o = 0;
  14548. SP_ASM_MUL_SET(tl, th, to, a->dp[13], a->dp[23]);
  14549. SP_ASM_MUL_ADD(tl, th, to, a->dp[14], a->dp[22]);
  14550. SP_ASM_MUL_ADD(tl, th, to, a->dp[15], a->dp[21]);
  14551. SP_ASM_MUL_ADD(tl, th, to, a->dp[16], a->dp[20]);
  14552. SP_ASM_MUL_ADD(tl, th, to, a->dp[17], a->dp[19]);
  14553. SP_ASM_SQR_ADD(l, h, o, a->dp[18]);
  14554. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14555. r->dp[36] = l;
  14556. l = h;
  14557. h = o;
  14558. o = 0;
  14559. SP_ASM_MUL_SET(tl, th, to, a->dp[14], a->dp[23]);
  14560. SP_ASM_MUL_ADD(tl, th, to, a->dp[15], a->dp[22]);
  14561. SP_ASM_MUL_ADD(tl, th, to, a->dp[16], a->dp[21]);
  14562. SP_ASM_MUL_ADD(tl, th, to, a->dp[17], a->dp[20]);
  14563. SP_ASM_MUL_ADD(tl, th, to, a->dp[18], a->dp[19]);
  14564. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14565. r->dp[37] = l;
  14566. l = h;
  14567. h = o;
  14568. o = 0;
  14569. SP_ASM_MUL_SET(tl, th, to, a->dp[15], a->dp[23]);
  14570. SP_ASM_MUL_ADD(tl, th, to, a->dp[16], a->dp[22]);
  14571. SP_ASM_MUL_ADD(tl, th, to, a->dp[17], a->dp[21]);
  14572. SP_ASM_MUL_ADD(tl, th, to, a->dp[18], a->dp[20]);
  14573. SP_ASM_SQR_ADD(l, h, o, a->dp[19]);
  14574. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14575. r->dp[38] = l;
  14576. l = h;
  14577. h = o;
  14578. o = 0;
  14579. SP_ASM_MUL_SET(tl, th, to, a->dp[16], a->dp[23]);
  14580. SP_ASM_MUL_ADD(tl, th, to, a->dp[17], a->dp[22]);
  14581. SP_ASM_MUL_ADD(tl, th, to, a->dp[18], a->dp[21]);
  14582. SP_ASM_MUL_ADD(tl, th, to, a->dp[19], a->dp[20]);
  14583. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14584. r->dp[39] = l;
  14585. l = h;
  14586. h = o;
  14587. o = 0;
  14588. SP_ASM_MUL_SET(tl, th, to, a->dp[17], a->dp[23]);
  14589. SP_ASM_MUL_ADD(tl, th, to, a->dp[18], a->dp[22]);
  14590. SP_ASM_MUL_ADD(tl, th, to, a->dp[19], a->dp[21]);
  14591. SP_ASM_SQR_ADD(l, h, o, a->dp[20]);
  14592. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14593. r->dp[40] = l;
  14594. l = h;
  14595. h = o;
  14596. o = 0;
  14597. SP_ASM_MUL_SET(tl, th, to, a->dp[18], a->dp[23]);
  14598. SP_ASM_MUL_ADD(tl, th, to, a->dp[19], a->dp[22]);
  14599. SP_ASM_MUL_ADD(tl, th, to, a->dp[20], a->dp[21]);
  14600. SP_ASM_ADD_DBL_3(l, h, o, tl, th, to);
  14601. r->dp[41] = l;
  14602. l = h;
  14603. h = o;
  14604. o = 0;
  14605. SP_ASM_MUL_ADD2(l, h, o, a->dp[19], a->dp[23]);
  14606. SP_ASM_MUL_ADD2(l, h, o, a->dp[20], a->dp[22]);
  14607. SP_ASM_SQR_ADD(l, h, o, a->dp[21]);
  14608. r->dp[42] = l;
  14609. l = h;
  14610. h = o;
  14611. o = 0;
  14612. SP_ASM_MUL_ADD2(l, h, o, a->dp[20], a->dp[23]);
  14613. SP_ASM_MUL_ADD2(l, h, o, a->dp[21], a->dp[22]);
  14614. r->dp[43] = l;
  14615. l = h;
  14616. h = o;
  14617. o = 0;
  14618. SP_ASM_MUL_ADD2(l, h, o, a->dp[21], a->dp[23]);
  14619. SP_ASM_SQR_ADD(l, h, o, a->dp[22]);
  14620. r->dp[44] = l;
  14621. l = h;
  14622. h = o;
  14623. o = 0;
  14624. SP_ASM_MUL_ADD2(l, h, o, a->dp[22], a->dp[23]);
  14625. r->dp[45] = l;
  14626. l = h;
  14627. h = o;
  14628. SP_ASM_SQR_ADD_NO(l, h, a->dp[23]);
  14629. r->dp[46] = l;
  14630. r->dp[47] = h;
  14631. XMEMCPY(r->dp, t, 24 * sizeof(sp_int_digit));
  14632. r->used = 48;
  14633. sp_clamp(r);
  14634. }
  14635. #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SP_NO_MALLOC)
  14636. if (t != NULL) {
  14637. XFREE(t, NULL, DYNAMIC_TYPE_BIGINT);
  14638. }
  14639. #endif
  14640. return err;
  14641. }
  14642. #endif /* SP_INT_DIGITS >= 48 */
  14643. #if SP_INT_DIGITS >= 64
  14644. /* Square a and store in r. r = a * a
  14645. *
  14646. * Karatsuba implementation.
  14647. *
  14648. * @param [in] a SP integer to square.
  14649. * @param [out] r SP integer result.
  14650. *
  14651. * @return MP_OKAY on success.
  14652. * @return MP_MEM when dynamic memory allocation fails.
  14653. */
  14654. static int _sp_sqr_32(const sp_int* a, sp_int* r)
  14655. {
  14656. int err = MP_OKAY;
  14657. int i;
  14658. sp_int_digit l;
  14659. sp_int_digit h;
  14660. sp_int* z0;
  14661. sp_int* z1;
  14662. sp_int* z2;
  14663. sp_int_digit ca;
  14664. DECL_SP_INT(a1, 16);
  14665. DECL_SP_INT_ARRAY(z, 33, 2);
  14666. ALLOC_SP_INT(a1, 16, err, NULL);
  14667. ALLOC_SP_INT_ARRAY(z, 33, 2, err, NULL);
  14668. if (err == MP_OKAY) {
  14669. z1 = z[0];
  14670. z2 = z[1];
  14671. z0 = r;
  14672. XMEMCPY(a1->dp, &a->dp[16], sizeof(sp_int_digit) * 16);
  14673. a1->used = 16;
  14674. /* z2 = a1 ^ 2 */
  14675. err = _sp_sqr_16(a1, z2);
  14676. }
  14677. if (err == MP_OKAY) {
  14678. l = 0;
  14679. h = 0;
  14680. for (i = 0; i < 16; i++) {
  14681. SP_ASM_ADDC(l, h, a1->dp[i]);
  14682. SP_ASM_ADDC(l, h, a->dp[i]);
  14683. a1->dp[i] = l;
  14684. l = h;
  14685. h = 0;
  14686. }
  14687. ca = l;
  14688. /* z0 = a0 ^ 2 */
  14689. err = _sp_sqr_16(a, z0);
  14690. }
  14691. if (err == MP_OKAY) {
  14692. /* z1 = (a0 + a1) ^ 2 */
  14693. err = _sp_sqr_16(a1, z1);
  14694. }
  14695. if (err == MP_OKAY) {
  14696. /* r = (z2 << 32) + (z1 - z0 - z2) << 16) + z0 */
  14697. /* r = z0 */
  14698. /* r += (z1 - z0 - z2) << 16 */
  14699. z1->dp[32] = ca;
  14700. l = 0;
  14701. if (ca) {
  14702. l = z1->dp[0 + 16];
  14703. h = 0;
  14704. SP_ASM_ADDC(l, h, a1->dp[0]);
  14705. SP_ASM_ADDC(l, h, a1->dp[0]);
  14706. z1->dp[0 + 16] = l;
  14707. l = h;
  14708. h = 0;
  14709. for (i = 1; i < 16; i++) {
  14710. SP_ASM_ADDC(l, h, z1->dp[i + 16]);
  14711. SP_ASM_ADDC(l, h, a1->dp[i]);
  14712. SP_ASM_ADDC(l, h, a1->dp[i]);
  14713. z1->dp[i + 16] = l;
  14714. l = h;
  14715. h = 0;
  14716. }
  14717. }
  14718. z1->dp[32] += l;
  14719. /* z1 = z1 - z0 - z1 */
  14720. l = z1->dp[0];
  14721. h = 0;
  14722. SP_ASM_SUBB(l, h, z0->dp[0]);
  14723. SP_ASM_SUBB(l, h, z2->dp[0]);
  14724. z1->dp[0] = l;
  14725. l = h;
  14726. h = 0;
  14727. for (i = 1; i < 32; i++) {
  14728. l += z1->dp[i];
  14729. SP_ASM_SUBB(l, h, z0->dp[i]);
  14730. SP_ASM_SUBB(l, h, z2->dp[i]);
  14731. z1->dp[i] = l;
  14732. l = h;
  14733. h = 0;
  14734. }
  14735. z1->dp[i] += l;
  14736. /* r += z1 << 16 */
  14737. l = 0;
  14738. h = 0;
  14739. for (i = 0; i < 16; i++) {
  14740. SP_ASM_ADDC(l, h, r->dp[i + 16]);
  14741. SP_ASM_ADDC(l, h, z1->dp[i]);
  14742. r->dp[i + 16] = l;
  14743. l = h;
  14744. h = 0;
  14745. }
  14746. for (; i < 33; i++) {
  14747. SP_ASM_ADDC(l, h, z1->dp[i]);
  14748. r->dp[i + 16] = l;
  14749. l = h;
  14750. h = 0;
  14751. }
  14752. /* r += z2 << 32 */
  14753. l = 0;
  14754. h = 0;
  14755. for (i = 0; i < 17; i++) {
  14756. SP_ASM_ADDC(l, h, r->dp[i + 32]);
  14757. SP_ASM_ADDC(l, h, z2->dp[i]);
  14758. r->dp[i + 32] = l;
  14759. l = h;
  14760. h = 0;
  14761. }
  14762. for (; i < 32; i++) {
  14763. SP_ASM_ADDC(l, h, z2->dp[i]);
  14764. r->dp[i + 32] = l;
  14765. l = h;
  14766. h = 0;
  14767. }
  14768. r->used = 64;
  14769. sp_clamp(r);
  14770. }
  14771. FREE_SP_INT_ARRAY(z, NULL);
  14772. FREE_SP_INT(a1, NULL);
  14773. return err;
  14774. }
  14775. #endif /* SP_INT_DIGITS >= 64 */
  14776. #if SP_INT_DIGITS >= 96
  14777. /* Square a and store in r. r = a * a
  14778. *
  14779. * Karatsuba implementation.
  14780. *
  14781. * @param [in] a SP integer to square.
  14782. * @param [out] r SP integer result.
  14783. *
  14784. * @return MP_OKAY on success.
  14785. * @return MP_MEM when dynamic memory allocation fails.
  14786. */
  14787. static int _sp_sqr_48(const sp_int* a, sp_int* r)
  14788. {
  14789. int err = MP_OKAY;
  14790. int i;
  14791. sp_int_digit l;
  14792. sp_int_digit h;
  14793. sp_int* z0;
  14794. sp_int* z1;
  14795. sp_int* z2;
  14796. sp_int_digit ca;
  14797. DECL_SP_INT(a1, 24);
  14798. DECL_SP_INT_ARRAY(z, 49, 2);
  14799. ALLOC_SP_INT(a1, 24, err, NULL);
  14800. ALLOC_SP_INT_ARRAY(z, 49, 2, err, NULL);
  14801. if (err == MP_OKAY) {
  14802. z1 = z[0];
  14803. z2 = z[1];
  14804. z0 = r;
  14805. XMEMCPY(a1->dp, &a->dp[24], sizeof(sp_int_digit) * 24);
  14806. a1->used = 24;
  14807. /* z2 = a1 ^ 2 */
  14808. err = _sp_sqr_24(a1, z2);
  14809. }
  14810. if (err == MP_OKAY) {
  14811. l = 0;
  14812. h = 0;
  14813. for (i = 0; i < 24; i++) {
  14814. SP_ASM_ADDC(l, h, a1->dp[i]);
  14815. SP_ASM_ADDC(l, h, a->dp[i]);
  14816. a1->dp[i] = l;
  14817. l = h;
  14818. h = 0;
  14819. }
  14820. ca = l;
  14821. /* z0 = a0 ^ 2 */
  14822. err = _sp_sqr_24(a, z0);
  14823. }
  14824. if (err == MP_OKAY) {
  14825. /* z1 = (a0 + a1) ^ 2 */
  14826. err = _sp_sqr_24(a1, z1);
  14827. }
  14828. if (err == MP_OKAY) {
  14829. /* r = (z2 << 48) + (z1 - z0 - z2) << 24) + z0 */
  14830. /* r = z0 */
  14831. /* r += (z1 - z0 - z2) << 24 */
  14832. z1->dp[48] = ca;
  14833. l = 0;
  14834. if (ca) {
  14835. l = z1->dp[0 + 24];
  14836. h = 0;
  14837. SP_ASM_ADDC(l, h, a1->dp[0]);
  14838. SP_ASM_ADDC(l, h, a1->dp[0]);
  14839. z1->dp[0 + 24] = l;
  14840. l = h;
  14841. h = 0;
  14842. for (i = 1; i < 24; i++) {
  14843. SP_ASM_ADDC(l, h, z1->dp[i + 24]);
  14844. SP_ASM_ADDC(l, h, a1->dp[i]);
  14845. SP_ASM_ADDC(l, h, a1->dp[i]);
  14846. z1->dp[i + 24] = l;
  14847. l = h;
  14848. h = 0;
  14849. }
  14850. }
  14851. z1->dp[48] += l;
  14852. /* z1 = z1 - z0 - z1 */
  14853. l = z1->dp[0];
  14854. h = 0;
  14855. SP_ASM_SUBB(l, h, z0->dp[0]);
  14856. SP_ASM_SUBB(l, h, z2->dp[0]);
  14857. z1->dp[0] = l;
  14858. l = h;
  14859. h = 0;
  14860. for (i = 1; i < 48; i++) {
  14861. l += z1->dp[i];
  14862. SP_ASM_SUBB(l, h, z0->dp[i]);
  14863. SP_ASM_SUBB(l, h, z2->dp[i]);
  14864. z1->dp[i] = l;
  14865. l = h;
  14866. h = 0;
  14867. }
  14868. z1->dp[i] += l;
  14869. /* r += z1 << 16 */
  14870. l = 0;
  14871. h = 0;
  14872. for (i = 0; i < 24; i++) {
  14873. SP_ASM_ADDC(l, h, r->dp[i + 24]);
  14874. SP_ASM_ADDC(l, h, z1->dp[i]);
  14875. r->dp[i + 24] = l;
  14876. l = h;
  14877. h = 0;
  14878. }
  14879. for (; i < 49; i++) {
  14880. SP_ASM_ADDC(l, h, z1->dp[i]);
  14881. r->dp[i + 24] = l;
  14882. l = h;
  14883. h = 0;
  14884. }
  14885. /* r += z2 << 48 */
  14886. l = 0;
  14887. h = 0;
  14888. for (i = 0; i < 25; i++) {
  14889. SP_ASM_ADDC(l, h, r->dp[i + 48]);
  14890. SP_ASM_ADDC(l, h, z2->dp[i]);
  14891. r->dp[i + 48] = l;
  14892. l = h;
  14893. h = 0;
  14894. }
  14895. for (; i < 48; i++) {
  14896. SP_ASM_ADDC(l, h, z2->dp[i]);
  14897. r->dp[i + 48] = l;
  14898. l = h;
  14899. h = 0;
  14900. }
  14901. r->used = 96;
  14902. sp_clamp(r);
  14903. }
  14904. FREE_SP_INT_ARRAY(z, NULL);
  14905. FREE_SP_INT(a1, NULL);
  14906. return err;
  14907. }
  14908. #endif /* SP_INT_DIGITS >= 96 */
  14909. #if SP_INT_DIGITS >= 128
  14910. /* Square a and store in r. r = a * a
  14911. *
  14912. * Karatsuba implementation.
  14913. *
  14914. * @param [in] a SP integer to square.
  14915. * @param [out] r SP integer result.
  14916. *
  14917. * @return MP_OKAY on success.
  14918. * @return MP_MEM when dynamic memory allocation fails.
  14919. */
  14920. static int _sp_sqr_64(const sp_int* a, sp_int* r)
  14921. {
  14922. int err = MP_OKAY;
  14923. int i;
  14924. sp_int_digit l;
  14925. sp_int_digit h;
  14926. sp_int* z0;
  14927. sp_int* z1;
  14928. sp_int* z2;
  14929. sp_int_digit ca;
  14930. DECL_SP_INT(a1, 32);
  14931. DECL_SP_INT_ARRAY(z, 65, 2);
  14932. ALLOC_SP_INT(a1, 32, err, NULL);
  14933. ALLOC_SP_INT_ARRAY(z, 65, 2, err, NULL);
  14934. if (err == MP_OKAY) {
  14935. z1 = z[0];
  14936. z2 = z[1];
  14937. z0 = r;
  14938. XMEMCPY(a1->dp, &a->dp[32], sizeof(sp_int_digit) * 32);
  14939. a1->used = 32;
  14940. /* z2 = a1 ^ 2 */
  14941. err = _sp_sqr_32(a1, z2);
  14942. }
  14943. if (err == MP_OKAY) {
  14944. l = 0;
  14945. h = 0;
  14946. for (i = 0; i < 32; i++) {
  14947. SP_ASM_ADDC(l, h, a1->dp[i]);
  14948. SP_ASM_ADDC(l, h, a->dp[i]);
  14949. a1->dp[i] = l;
  14950. l = h;
  14951. h = 0;
  14952. }
  14953. ca = l;
  14954. /* z0 = a0 ^ 2 */
  14955. err = _sp_sqr_32(a, z0);
  14956. }
  14957. if (err == MP_OKAY) {
  14958. /* z1 = (a0 + a1) ^ 2 */
  14959. err = _sp_sqr_32(a1, z1);
  14960. }
  14961. if (err == MP_OKAY) {
  14962. /* r = (z2 << 64) + (z1 - z0 - z2) << 32) + z0 */
  14963. /* r = z0 */
  14964. /* r += (z1 - z0 - z2) << 32 */
  14965. z1->dp[64] = ca;
  14966. l = 0;
  14967. if (ca) {
  14968. l = z1->dp[0 + 32];
  14969. h = 0;
  14970. SP_ASM_ADDC(l, h, a1->dp[0]);
  14971. SP_ASM_ADDC(l, h, a1->dp[0]);
  14972. z1->dp[0 + 32] = l;
  14973. l = h;
  14974. h = 0;
  14975. for (i = 1; i < 32; i++) {
  14976. SP_ASM_ADDC(l, h, z1->dp[i + 32]);
  14977. SP_ASM_ADDC(l, h, a1->dp[i]);
  14978. SP_ASM_ADDC(l, h, a1->dp[i]);
  14979. z1->dp[i + 32] = l;
  14980. l = h;
  14981. h = 0;
  14982. }
  14983. }
  14984. z1->dp[64] += l;
  14985. /* z1 = z1 - z0 - z1 */
  14986. l = z1->dp[0];
  14987. h = 0;
  14988. SP_ASM_SUBB(l, h, z0->dp[0]);
  14989. SP_ASM_SUBB(l, h, z2->dp[0]);
  14990. z1->dp[0] = l;
  14991. l = h;
  14992. h = 0;
  14993. for (i = 1; i < 64; i++) {
  14994. l += z1->dp[i];
  14995. SP_ASM_SUBB(l, h, z0->dp[i]);
  14996. SP_ASM_SUBB(l, h, z2->dp[i]);
  14997. z1->dp[i] = l;
  14998. l = h;
  14999. h = 0;
  15000. }
  15001. z1->dp[i] += l;
  15002. /* r += z1 << 16 */
  15003. l = 0;
  15004. h = 0;
  15005. for (i = 0; i < 32; i++) {
  15006. SP_ASM_ADDC(l, h, r->dp[i + 32]);
  15007. SP_ASM_ADDC(l, h, z1->dp[i]);
  15008. r->dp[i + 32] = l;
  15009. l = h;
  15010. h = 0;
  15011. }
  15012. for (; i < 65; i++) {
  15013. SP_ASM_ADDC(l, h, z1->dp[i]);
  15014. r->dp[i + 32] = l;
  15015. l = h;
  15016. h = 0;
  15017. }
  15018. /* r += z2 << 64 */
  15019. l = 0;
  15020. h = 0;
  15021. for (i = 0; i < 33; i++) {
  15022. SP_ASM_ADDC(l, h, r->dp[i + 64]);
  15023. SP_ASM_ADDC(l, h, z2->dp[i]);
  15024. r->dp[i + 64] = l;
  15025. l = h;
  15026. h = 0;
  15027. }
  15028. for (; i < 64; i++) {
  15029. SP_ASM_ADDC(l, h, z2->dp[i]);
  15030. r->dp[i + 64] = l;
  15031. l = h;
  15032. h = 0;
  15033. }
  15034. r->used = 128;
  15035. sp_clamp(r);
  15036. }
  15037. FREE_SP_INT_ARRAY(z, NULL);
  15038. FREE_SP_INT(a1, NULL);
  15039. return err;
  15040. }
  15041. #endif /* SP_INT_DIGITS >= 128 */
  15042. #if SP_INT_DIGITS >= 192
  15043. /* Square a and store in r. r = a * a
  15044. *
  15045. * Karatsuba implementation.
  15046. *
  15047. * @param [in] a SP integer to square.
  15048. * @param [out] r SP integer result.
  15049. *
  15050. * @return MP_OKAY on success.
  15051. * @return MP_MEM when dynamic memory allocation fails.
  15052. */
  15053. static int _sp_sqr_96(const sp_int* a, sp_int* r)
  15054. {
  15055. int err = MP_OKAY;
  15056. int i;
  15057. sp_int_digit l;
  15058. sp_int_digit h;
  15059. sp_int* z0;
  15060. sp_int* z1;
  15061. sp_int* z2;
  15062. sp_int_digit ca;
  15063. DECL_SP_INT(a1, 48);
  15064. DECL_SP_INT_ARRAY(z, 97, 2);
  15065. ALLOC_SP_INT(a1, 48, err, NULL);
  15066. ALLOC_SP_INT_ARRAY(z, 97, 2, err, NULL);
  15067. if (err == MP_OKAY) {
  15068. z1 = z[0];
  15069. z2 = z[1];
  15070. z0 = r;
  15071. XMEMCPY(a1->dp, &a->dp[48], sizeof(sp_int_digit) * 48);
  15072. a1->used = 48;
  15073. /* z2 = a1 ^ 2 */
  15074. err = _sp_sqr_48(a1, z2);
  15075. }
  15076. if (err == MP_OKAY) {
  15077. l = 0;
  15078. h = 0;
  15079. for (i = 0; i < 48; i++) {
  15080. SP_ASM_ADDC(l, h, a1->dp[i]);
  15081. SP_ASM_ADDC(l, h, a->dp[i]);
  15082. a1->dp[i] = l;
  15083. l = h;
  15084. h = 0;
  15085. }
  15086. ca = l;
  15087. /* z0 = a0 ^ 2 */
  15088. err = _sp_sqr_48(a, z0);
  15089. }
  15090. if (err == MP_OKAY) {
  15091. /* z1 = (a0 + a1) ^ 2 */
  15092. err = _sp_sqr_48(a1, z1);
  15093. }
  15094. if (err == MP_OKAY) {
  15095. /* r = (z2 << 96) + (z1 - z0 - z2) << 48) + z0 */
  15096. /* r = z0 */
  15097. /* r += (z1 - z0 - z2) << 48 */
  15098. z1->dp[96] = ca;
  15099. l = 0;
  15100. if (ca) {
  15101. l = z1->dp[0 + 48];
  15102. h = 0;
  15103. SP_ASM_ADDC(l, h, a1->dp[0]);
  15104. SP_ASM_ADDC(l, h, a1->dp[0]);
  15105. z1->dp[0 + 48] = l;
  15106. l = h;
  15107. h = 0;
  15108. for (i = 1; i < 48; i++) {
  15109. SP_ASM_ADDC(l, h, z1->dp[i + 48]);
  15110. SP_ASM_ADDC(l, h, a1->dp[i]);
  15111. SP_ASM_ADDC(l, h, a1->dp[i]);
  15112. z1->dp[i + 48] = l;
  15113. l = h;
  15114. h = 0;
  15115. }
  15116. }
  15117. z1->dp[96] += l;
  15118. /* z1 = z1 - z0 - z1 */
  15119. l = z1->dp[0];
  15120. h = 0;
  15121. SP_ASM_SUBB(l, h, z0->dp[0]);
  15122. SP_ASM_SUBB(l, h, z2->dp[0]);
  15123. z1->dp[0] = l;
  15124. l = h;
  15125. h = 0;
  15126. for (i = 1; i < 96; i++) {
  15127. l += z1->dp[i];
  15128. SP_ASM_SUBB(l, h, z0->dp[i]);
  15129. SP_ASM_SUBB(l, h, z2->dp[i]);
  15130. z1->dp[i] = l;
  15131. l = h;
  15132. h = 0;
  15133. }
  15134. z1->dp[i] += l;
  15135. /* r += z1 << 16 */
  15136. l = 0;
  15137. h = 0;
  15138. for (i = 0; i < 48; i++) {
  15139. SP_ASM_ADDC(l, h, r->dp[i + 48]);
  15140. SP_ASM_ADDC(l, h, z1->dp[i]);
  15141. r->dp[i + 48] = l;
  15142. l = h;
  15143. h = 0;
  15144. }
  15145. for (; i < 97; i++) {
  15146. SP_ASM_ADDC(l, h, z1->dp[i]);
  15147. r->dp[i + 48] = l;
  15148. l = h;
  15149. h = 0;
  15150. }
  15151. /* r += z2 << 96 */
  15152. l = 0;
  15153. h = 0;
  15154. for (i = 0; i < 49; i++) {
  15155. SP_ASM_ADDC(l, h, r->dp[i + 96]);
  15156. SP_ASM_ADDC(l, h, z2->dp[i]);
  15157. r->dp[i + 96] = l;
  15158. l = h;
  15159. h = 0;
  15160. }
  15161. for (; i < 96; i++) {
  15162. SP_ASM_ADDC(l, h, z2->dp[i]);
  15163. r->dp[i + 96] = l;
  15164. l = h;
  15165. h = 0;
  15166. }
  15167. r->used = 192;
  15168. sp_clamp(r);
  15169. }
  15170. FREE_SP_INT_ARRAY(z, NULL);
  15171. FREE_SP_INT(a1, NULL);
  15172. return err;
  15173. }
  15174. #endif /* SP_INT_DIGITS >= 192 */
  15175. #endif /* SQR_MUL_ASM && WOLFSSL_SP_INT_LARGE_COMBA */
  15176. #endif /* !WOLFSSL_SP_SMALL */
  15177. /* Square a and store in r. r = a * a
  15178. *
  15179. * @param [in] a SP integer to square.
  15180. * @param [out] r SP integer result.
  15181. *
  15182. * @return MP_OKAY on success.
  15183. * @return MP_VAL when a or r is NULL, or the result will be too big for fixed
  15184. * data length.
  15185. * @return MP_MEM when dynamic memory allocation fails.
  15186. */
  15187. int sp_sqr(const sp_int* a, sp_int* r)
  15188. {
  15189. #if defined(WOLFSSL_SP_MATH) && defined(WOLFSSL_SP_SMALL)
  15190. return sp_mul(a, a, r);
  15191. #else
  15192. int err = MP_OKAY;
  15193. if ((a == NULL) || (r == NULL)) {
  15194. err = MP_VAL;
  15195. }
  15196. /* Need extra digit during calculation. */
  15197. if ((err == MP_OKAY) && (a->used * 2 > r->size)) {
  15198. err = MP_VAL;
  15199. }
  15200. #if 0
  15201. if (err == MP_OKAY) {
  15202. sp_print(a, "a");
  15203. }
  15204. #endif
  15205. if (err == MP_OKAY) {
  15206. if (a->used == 0) {
  15207. _sp_zero(r);
  15208. }
  15209. else
  15210. #ifndef WOLFSSL_SP_SMALL
  15211. #if !defined(WOLFSSL_HAVE_SP_ECC) && defined(HAVE_ECC)
  15212. #if SP_WORD_SIZE == 64
  15213. if (a->used == 4) {
  15214. err = _sp_sqr_4(a, r);
  15215. }
  15216. else
  15217. #endif /* SP_WORD_SIZE == 64 */
  15218. #if SP_WORD_SIZE == 64
  15219. #ifdef SQR_MUL_ASM
  15220. if (a->used == 6) {
  15221. err = _sp_sqr_6(a, r);
  15222. }
  15223. else
  15224. #endif /* SQR_MUL_ASM */
  15225. #endif /* SP_WORD_SIZE == 64 */
  15226. #if SP_WORD_SIZE == 32
  15227. #ifdef SQR_MUL_ASM
  15228. if (a->used == 8) {
  15229. err = _sp_sqr_8(a, r);
  15230. }
  15231. else
  15232. #endif /* SQR_MUL_ASM */
  15233. #endif /* SP_WORD_SIZE == 32 */
  15234. #if SP_WORD_SIZE == 32
  15235. #ifdef SQR_MUL_ASM
  15236. if (a->used == 12) {
  15237. err = _sp_sqr_12(a, r);
  15238. }
  15239. else
  15240. #endif /* SQR_MUL_ASM */
  15241. #endif /* SP_WORD_SIZE == 32 */
  15242. #endif /* !WOLFSSL_HAVE_SP_ECC && HAVE_ECC */
  15243. #if defined(SQR_MUL_ASM) && (defined(WOLFSSL_SP_INT_LARGE_COMBA) || \
  15244. (!defined(WOLFSSL_SP_MATH) && defined(WOLFCRYPT_HAVE_SAKKE) && \
  15245. (SP_WORD_SIZE == 64)))
  15246. #if SP_INT_DIGITS >= 32
  15247. if (a->used == 16) {
  15248. err = _sp_sqr_16(a, r);
  15249. }
  15250. else
  15251. #endif /* SP_INT_DIGITS >= 32 */
  15252. #endif /* SQR_MUL_ASM && (WOLFSSL_SP_INT_LARGE_COMBA || !WOLFSSL_SP_MATH &&
  15253. * WOLFCRYPT_HAVE_SAKKE && SP_WORD_SIZE == 64 */
  15254. #if defined(SQR_MUL_ASM) && defined(WOLFSSL_SP_INT_LARGE_COMBA)
  15255. #if SP_INT_DIGITS >= 48
  15256. if (a->used == 24) {
  15257. err = _sp_sqr_24(a, r);
  15258. }
  15259. else
  15260. #endif /* SP_INT_DIGITS >= 48 */
  15261. #if SP_INT_DIGITS >= 64
  15262. if (a->used == 32) {
  15263. err = _sp_sqr_32(a, r);
  15264. }
  15265. else
  15266. #endif /* SP_INT_DIGITS >= 64 */
  15267. #if SP_INT_DIGITS >= 96
  15268. if (a->used == 48) {
  15269. err = _sp_sqr_48(a, r);
  15270. }
  15271. else
  15272. #endif /* SP_INT_DIGITS >= 96 */
  15273. #if SP_INT_DIGITS >= 128
  15274. if (a->used == 64) {
  15275. err = _sp_sqr_64(a, r);
  15276. }
  15277. else
  15278. #endif /* SP_INT_DIGITS >= 128 */
  15279. #if SP_INT_DIGITS >= 192
  15280. if (a->used == 96) {
  15281. err = _sp_sqr_96(a, r);
  15282. }
  15283. else
  15284. #endif /* SP_INT_DIGITS >= 192 */
  15285. #endif /* SQR_MUL_ASM && WOLFSSL_SP_INT_LARGE_COMBA */
  15286. #endif /* !WOLFSSL_SP_SMALL */
  15287. {
  15288. err = _sp_sqr(a, r);
  15289. }
  15290. }
  15291. #ifdef WOLFSSL_SP_INT_NEGATIVE
  15292. if (err == MP_OKAY) {
  15293. r->sign = MP_ZPOS;
  15294. }
  15295. #endif
  15296. #if 0
  15297. if (err == MP_OKAY) {
  15298. sp_print(r, "rsqr");
  15299. }
  15300. #endif
  15301. return err;
  15302. #endif /* WOLFSSL_SP_MATH && WOLFSSL_SP_SMALL */
  15303. }
  15304. /* END SP_SQR implementations */
  15305. #endif /* WOLFSSL_SP_MATH_ALL || WOLFSSL_HAVE_SP_DH || HAVE_ECC ||
  15306. * (!NO_RSA && !WOLFSSL_RSA_VERIFY_ONLY) */
  15307. #if (!defined(WOLFSSL_RSA_VERIFY_ONLY) && \
  15308. !defined(WOLFSSL_RSA_PUBLIC_ONLY)) || !defined(NO_DH)
  15309. /* Square a mod m and store in r: r = (a * a) mod m
  15310. *
  15311. * @param [in] a SP integer to square.
  15312. * @param [in] m SP integer that is the modulus.
  15313. * @param [out] r SP integer result.
  15314. *
  15315. * @return MP_OKAY on success.
  15316. * @return MP_VAL when a, m or r is NULL; or m is 0; or a squared is too big
  15317. * for fixed data length.
  15318. * @return MP_MEM when dynamic memory allocation fails.
  15319. */
  15320. int sp_sqrmod(const sp_int* a, const sp_int* m, sp_int* r)
  15321. {
  15322. int err = MP_OKAY;
  15323. /* Validate parameters. */
  15324. if ((a == NULL) || (m == NULL) || (r == NULL)) {
  15325. err = MP_VAL;
  15326. }
  15327. /* Ensure r has space for intermediate result. */
  15328. if ((err == MP_OKAY) && (r != m) && (a->used * 2 > r->size)) {
  15329. err = MP_VAL;
  15330. }
  15331. /* Use r as intermediate result if not same as pointer m which is needed
  15332. * after first intermediate result.
  15333. */
  15334. if ((err == MP_OKAY) && (r != m)) {
  15335. /* Square and reduce. */
  15336. err = sp_sqr(a, r);
  15337. if (err == MP_OKAY) {
  15338. err = sp_mod(r, m, r);
  15339. }
  15340. }
  15341. else if (err == MP_OKAY) {
  15342. /* Create temporary for multiplication result. */
  15343. DECL_SP_INT(t, a->used * 2);
  15344. ALLOC_SP_INT(t, a->used * 2, err, NULL);
  15345. if (err == MP_OKAY) {
  15346. err = sp_init_size(t, a->used * 2);
  15347. }
  15348. /* Square and reduce. */
  15349. if (err == MP_OKAY) {
  15350. err = sp_sqr(a, t);
  15351. }
  15352. if (err == MP_OKAY) {
  15353. err = sp_mod(t, m, r);
  15354. }
  15355. /* Dispose of an allocated SP int. */
  15356. FREE_SP_INT(t, NULL);
  15357. }
  15358. return err;
  15359. }
  15360. #endif /* !WOLFSSL_RSA_VERIFY_ONLY */
  15361. /**********************
  15362. * Montgomery functions
  15363. **********************/
  15364. #if defined(WOLFSSL_SP_MATH_ALL) || defined(WOLFSSL_HAVE_SP_DH) || \
  15365. defined(WOLFCRYPT_HAVE_ECCSI) || defined(WOLFCRYPT_HAVE_SAKKE) || \
  15366. defined(OPENSSL_ALL)
  15367. /* Reduce a number in Montgomery form.
  15368. *
  15369. * Assumes a and m are not NULL and m is not 0.
  15370. *
  15371. * DigitMask(a,i) := mask out the 'i'th digit in place.
  15372. *
  15373. * Algorithm:
  15374. * 1. mask = (1 << (NumBits(m) % WORD_SIZE)) - 1
  15375. * 2. For i = 0..NumDigits(m)-1
  15376. * 2.1. mu = (mp * DigitMask(a, i)) & WORD_MASK
  15377. * 2.2. If i == NumDigits(m)-1 and mask != 0 then mu & = mask
  15378. * 2.3. a += mu * DigitMask(m, 0)
  15379. * 2.4. For j = 1 up to NumDigits(m)-2
  15380. * 2.4.1 a += mu * DigitMask(m, j)
  15381. * 2.5 a += mu * DigitMask(m, NumDigits(m)-1))
  15382. * 3. a >>= NumBits(m)
  15383. * 4. a = a % m
  15384. *
  15385. * @param [in,out] a SP integer to Montgomery reduce.
  15386. * @param [in] m SP integer that is the modulus.
  15387. * @param [in] mp SP integer digit that is the bottom digit of inv(-m).
  15388. *
  15389. * @return MP_OKAY on success.
  15390. */
  15391. static int _sp_mont_red(sp_int* a, const sp_int* m, sp_int_digit mp)
  15392. {
  15393. #if !defined(SQR_MUL_ASM)
  15394. int i;
  15395. int bits;
  15396. sp_int_word w;
  15397. sp_int_digit mu;
  15398. #if 0
  15399. sp_print(a, "a");
  15400. sp_print(m, "m");
  15401. #endif
  15402. /* Count bits in modulus. */
  15403. bits = sp_count_bits(m);
  15404. /* Adding numbers into m->used * 2 digits - zero out unused digits. */
  15405. for (i = a->used; i < m->used * 2; i++) {
  15406. a->dp[i] = 0;
  15407. }
  15408. /* Special case when modulus is 1 digit. */
  15409. if (m->used == 1) {
  15410. /* mu = (mp * DigitMask(a, i)) & WORD_MASK */
  15411. mu = mp * a->dp[0];
  15412. /* a += mu * m */
  15413. w = a->dp[0];
  15414. w += (sp_int_word)mu * m->dp[0];
  15415. a->dp[0] = (sp_int_digit)w;
  15416. w >>= SP_WORD_SIZE;
  15417. w += a->dp[1];
  15418. a->dp[1] = (sp_int_digit)w;
  15419. w >>= SP_WORD_SIZE;
  15420. a->dp[2] = (sp_int_digit)w;
  15421. a->used = 3;
  15422. /* mp is SP_WORD_SIZE */
  15423. bits = SP_WORD_SIZE;
  15424. }
  15425. else {
  15426. /* 1. mask = (1 << (NumBits(m) % WORD_SIZE)) - 1
  15427. * Mask when last digit of modulus doesn't have highest bit set.
  15428. */
  15429. sp_int_digit mask = (sp_int_digit)
  15430. (((sp_int_digit)1 << (bits & (SP_WORD_SIZE - 1))) - 1);
  15431. /* Overflow. */
  15432. sp_int_word o = 0;
  15433. /* 2. For i = 0..NumDigits(m)-1 */
  15434. for (i = 0; i < m->used; i++) {
  15435. int j;
  15436. /* 2.1. mu = (mp * DigitMask(a, i)) & WORD_MASK */
  15437. mu = mp * a->dp[i];
  15438. /* 2.2. If i == NumDigits(m)-1 and mask != 0 then mu & = mask */
  15439. if ((i == m->used - 1) && (mask != 0)) {
  15440. mu &= mask;
  15441. }
  15442. /* 2.3. a += mu * DigitMask(m, 0) */
  15443. w = a->dp[i];
  15444. w += (sp_int_word)mu * m->dp[0];
  15445. a->dp[i] = (sp_int_digit)w;
  15446. w >>= SP_WORD_SIZE;
  15447. /* 2.4. For j = 1 up to NumDigits(m)-2 */
  15448. for (j = 1; j < m->used - 1; j++) {
  15449. /* 2.4.1 a += mu * DigitMask(m, j) */
  15450. w += a->dp[i + j];
  15451. w += (sp_int_word)mu * m->dp[j];
  15452. a->dp[i + j] = (sp_int_digit)w;
  15453. w >>= SP_WORD_SIZE;
  15454. }
  15455. /* Handle overflow. */
  15456. w += o;
  15457. w += a->dp[i + j];
  15458. o = (sp_int_digit)(w >> SP_WORD_SIZE);
  15459. /* 2.5 a += mu * DigitMask(m, NumDigits(m)-1)) */
  15460. w = ((sp_int_word)mu * m->dp[j]) + (sp_int_digit)w;
  15461. a->dp[i + j] = (sp_int_digit)w;
  15462. w >>= SP_WORD_SIZE;
  15463. o += w;
  15464. }
  15465. /* Handle overflow. */
  15466. o += a->dp[m->used * 2 - 1];
  15467. a->dp[m->used * 2 - 1] = (sp_int_digit)o;
  15468. o >>= SP_WORD_SIZE;
  15469. a->dp[m->used * 2] = (sp_int_digit)o;
  15470. a->used = m->used * 2 + 1;
  15471. }
  15472. /* Remove leading zeros. */
  15473. sp_clamp(a);
  15474. /* 3. a >>= NumBits(m) */
  15475. (void)sp_rshb(a, bits, a);
  15476. /* 4. a = a mod m */
  15477. if (_sp_cmp_abs(a, m) != MP_LT) {
  15478. _sp_sub_off(a, m, a, 0);
  15479. }
  15480. #if 0
  15481. sp_print(a, "rr");
  15482. #endif
  15483. return MP_OKAY;
  15484. #else /* !SQR_MUL_ASM */
  15485. int i;
  15486. int j;
  15487. int bits;
  15488. sp_int_digit mu;
  15489. sp_int_digit o;
  15490. sp_int_digit mask;
  15491. #if 0
  15492. sp_print(a, "a");
  15493. sp_print(m, "m");
  15494. #endif
  15495. bits = sp_count_bits(m);
  15496. mask = ((sp_int_digit)1 << (bits & (SP_WORD_SIZE - 1))) - 1;
  15497. for (i = a->used; i < m->used * 2; i++) {
  15498. a->dp[i] = 0;
  15499. }
  15500. if (m->used <= 1) {
  15501. sp_int_digit l;
  15502. sp_int_digit h;
  15503. /* mu = (mp * DigitMask(a, i)) & WORD_MASK */
  15504. mu = mp * a->dp[0];
  15505. /* a += mu * m */
  15506. l = a->dp[0];
  15507. h = 0;
  15508. SP_ASM_MUL_ADD_NO(l, h, mu, m->dp[0]);
  15509. a->dp[0] = l;
  15510. l = h;
  15511. h = 0;
  15512. SP_ASM_ADDC(l, h, a->dp[1]);
  15513. a->dp[1] = l;
  15514. a->dp[2] = h;
  15515. a->used = m->used * 2 + 1;
  15516. /* mp is SP_WORD_SIZE */
  15517. bits = SP_WORD_SIZE;
  15518. }
  15519. #ifndef WOLFSSL_HAVE_SP_ECC
  15520. #if SP_WORD_SIZE == 64
  15521. #if SP_INT_DIGITS >= 8
  15522. else if ((m->used == 4) && (mask == 0)) {
  15523. sp_int_digit l;
  15524. sp_int_digit h;
  15525. sp_int_digit o2;
  15526. l = 0;
  15527. h = 0;
  15528. o = 0;
  15529. o2 = 0;
  15530. /* For i = 0..NumDigits(m)-1 */
  15531. for (i = 0; i < 4; i++) {
  15532. /* mu = (mp * DigitMask(a, i)) & WORD_MASK */
  15533. mu = mp * a->dp[0];
  15534. l = a->dp[0];
  15535. /* a = (a + mu * m) >> WORD_SIZE */
  15536. SP_ASM_MUL_ADD_NO(l, h, mu, m->dp[0]);
  15537. l = h;
  15538. h = 0;
  15539. SP_ASM_ADDC(l, h, a->dp[1]);
  15540. SP_ASM_MUL_ADD_NO(l, h, mu, m->dp[1]);
  15541. a->dp[0] = l;
  15542. l = h;
  15543. h = 0;
  15544. SP_ASM_ADDC(l, h, a->dp[2]);
  15545. SP_ASM_MUL_ADD_NO(l, h, mu, m->dp[2]);
  15546. a->dp[1] = l;
  15547. l = h;
  15548. h = o2;
  15549. o2 = 0;
  15550. SP_ASM_ADDC_REG(l, h, o);
  15551. SP_ASM_ADDC(l, h, a->dp[i + 3]);
  15552. SP_ASM_MUL_ADD(l, h, o2, mu, m->dp[3]);
  15553. a->dp[2] = l;
  15554. o = h;
  15555. l = h;
  15556. h = 0;
  15557. }
  15558. /* Handle overflow. */
  15559. h = o2;
  15560. SP_ASM_ADDC(l, h, a->dp[7]);
  15561. a->dp[3] = l;
  15562. a->dp[4] = h;
  15563. a->used = 5;
  15564. /* Remove leading zeros. */
  15565. sp_clamp(a);
  15566. /* a = a mod m */
  15567. if (_sp_cmp_abs(a, m) != MP_LT) {
  15568. sp_sub(a, m, a);
  15569. }
  15570. return MP_OKAY;
  15571. }
  15572. #endif /* SP_INT_DIGITS >= 8 */
  15573. #if SP_INT_DIGITS >= 12
  15574. else if ((m->used == 6) && (mask == 0)) {
  15575. sp_int_digit l;
  15576. sp_int_digit h;
  15577. sp_int_digit o2;
  15578. l = 0;
  15579. h = 0;
  15580. o = 0;
  15581. o2 = 0;
  15582. /* For i = 0..NumDigits(m)-1 */
  15583. for (i = 0; i < 6; i++) {
  15584. /* mu = (mp * DigitMask(a, i)) & WORD_MASK */
  15585. mu = mp * a->dp[0];
  15586. l = a->dp[0];
  15587. /* a = (a + mu * m) >> WORD_SIZE */
  15588. SP_ASM_MUL_ADD_NO(l, h, mu, m->dp[0]);
  15589. l = h;
  15590. h = 0;
  15591. SP_ASM_ADDC(l, h, a->dp[1]);
  15592. SP_ASM_MUL_ADD_NO(l, h, mu, m->dp[1]);
  15593. a->dp[0] = l;
  15594. l = h;
  15595. h = 0;
  15596. SP_ASM_ADDC(l, h, a->dp[2]);
  15597. SP_ASM_MUL_ADD_NO(l, h, mu, m->dp[2]);
  15598. a->dp[1] = l;
  15599. l = h;
  15600. h = 0;
  15601. SP_ASM_ADDC(l, h, a->dp[3]);
  15602. SP_ASM_MUL_ADD_NO(l, h, mu, m->dp[3]);
  15603. a->dp[2] = l;
  15604. l = h;
  15605. h = 0;
  15606. SP_ASM_ADDC(l, h, a->dp[4]);
  15607. SP_ASM_MUL_ADD_NO(l, h, mu, m->dp[4]);
  15608. a->dp[3] = l;
  15609. l = h;
  15610. h = o2;
  15611. o2 = 0;
  15612. SP_ASM_ADDC_REG(l, h, o);
  15613. SP_ASM_ADDC(l, h, a->dp[i + 5]);
  15614. SP_ASM_MUL_ADD(l, h, o2, mu, m->dp[5]);
  15615. a->dp[4] = l;
  15616. o = h;
  15617. l = h;
  15618. h = 0;
  15619. }
  15620. /* Handle overflow. */
  15621. h = o2;
  15622. SP_ASM_ADDC(l, h, a->dp[11]);
  15623. a->dp[5] = l;
  15624. a->dp[6] = h;
  15625. a->used = 7;
  15626. /* Remove leading zeros. */
  15627. sp_clamp(a);
  15628. /* a = a mod m */
  15629. if (_sp_cmp_abs(a, m) != MP_LT) {
  15630. sp_sub(a, m, a);
  15631. }
  15632. return MP_OKAY;
  15633. }
  15634. #endif /* SP_INT_DIGITS >= 12 */
  15635. #elif SP_WORD_SIZE == 32
  15636. else if ((m->used <= 12) && (mask == 0)) {
  15637. sp_int_digit l;
  15638. sp_int_digit h;
  15639. sp_int_digit o2;
  15640. sp_int_digit* ad;
  15641. const sp_int_digit* md;
  15642. o = 0;
  15643. o2 = 0;
  15644. ad = a->dp;
  15645. /* For i = 0..NumDigits(m)-1 */
  15646. for (i = 0; i < m->used; i++) {
  15647. md = m->dp;
  15648. /* mu = (mp * DigitMask(a, i)) & WORD_MASK */
  15649. mu = mp * ad[0];
  15650. /* a = (a + mu * m, 0) >> WORD_SIZE */
  15651. l = ad[0];
  15652. h = 0;
  15653. SP_ASM_MUL_ADD_NO(l, h, mu, *(md++));
  15654. l = h;
  15655. for (j = 1; j + 1 < m->used - 1; j += 2) {
  15656. h = 0;
  15657. SP_ASM_ADDC(l, h, ad[j]);
  15658. SP_ASM_MUL_ADD_NO(l, h, mu, *(md++));
  15659. ad[j - 1] = l;
  15660. l = 0;
  15661. SP_ASM_ADDC(h, l, ad[j + 1]);
  15662. SP_ASM_MUL_ADD_NO(h, l, mu, *(md++));
  15663. ad[j] = h;
  15664. }
  15665. for (; j < m->used - 1; j++) {
  15666. h = 0;
  15667. SP_ASM_ADDC(l, h, ad[j]);
  15668. SP_ASM_MUL_ADD_NO(l, h, mu, *(md++));
  15669. ad[j - 1] = l;
  15670. l = h;
  15671. }
  15672. h = o2;
  15673. o2 = 0;
  15674. SP_ASM_ADDC_REG(l, h, o);
  15675. SP_ASM_ADDC(l, h, ad[i + j]);
  15676. SP_ASM_MUL_ADD(l, h, o2, mu, *md);
  15677. ad[j - 1] = l;
  15678. o = h;
  15679. }
  15680. /* Handle overflow. */
  15681. l = o;
  15682. h = o2;
  15683. SP_ASM_ADDC(l, h, a->dp[m->used * 2 - 1]);
  15684. a->dp[m->used - 1] = l;
  15685. a->dp[m->used] = h;
  15686. a->used = m->used + 1;
  15687. /* Remove leading zeros. */
  15688. sp_clamp(a);
  15689. /* a = a mod m */
  15690. if (_sp_cmp_abs(a, m) != MP_LT) {
  15691. sp_sub(a, m, a);
  15692. }
  15693. return MP_OKAY;
  15694. }
  15695. #endif /* SP_WORD_SIZE == 64 | 32 */
  15696. #endif /* WOLFSSL_HAVE_SP_ECC */
  15697. else {
  15698. sp_int_digit l;
  15699. sp_int_digit h;
  15700. sp_int_digit o2;
  15701. sp_int_digit* ad;
  15702. const sp_int_digit* md;
  15703. o = 0;
  15704. o2 = 0;
  15705. ad = a->dp;
  15706. /* 2. For i = 0..NumDigits(m)-1 */
  15707. for (i = 0; i < m->used; i++, ad++) {
  15708. md = m->dp;
  15709. /* 2.1. mu = (mp * DigitMask(a, i)) & WORD_MASK */
  15710. mu = mp * ad[0];
  15711. /* 2.2. If i == NumDigits(m)-1 and mask != 0 then mu & = mask */
  15712. if ((i == m->used - 1) && (mask != 0)) {
  15713. mu &= mask;
  15714. }
  15715. /* 2.3 a += mu * DigitMask(m, 0) */
  15716. l = ad[0];
  15717. h = 0;
  15718. SP_ASM_MUL_ADD_NO(l, h, mu, *(md++));
  15719. ad[0] = l;
  15720. l = h;
  15721. /* 2.4. If i == NumDigits(m)-1 and mask != 0 then mu & = mask */
  15722. for (j = 1; j + 1 < m->used - 1; j += 2) {
  15723. h = 0;
  15724. /* 2.4.1. a += mu * DigitMask(m, j) */
  15725. SP_ASM_ADDC(l, h, ad[j + 0]);
  15726. SP_ASM_MUL_ADD_NO(l, h, mu, *(md++));
  15727. ad[j + 0] = l;
  15728. l = 0;
  15729. /* 2.4.1. a += mu * DigitMask(m, j) */
  15730. SP_ASM_ADDC(h, l, ad[j + 1]);
  15731. SP_ASM_MUL_ADD_NO(h, l, mu, *(md++));
  15732. ad[j + 1] = h;
  15733. }
  15734. for (; j < m->used - 1; j++) {
  15735. h = 0;
  15736. /* 2.4.1. a += mu * DigitMask(m, j) */
  15737. SP_ASM_ADDC(l, h, ad[j]);
  15738. SP_ASM_MUL_ADD_NO(l, h, mu, *(md++));
  15739. ad[j] = l;
  15740. l = h;
  15741. }
  15742. h = o2;
  15743. o2 = 0;
  15744. SP_ASM_ADDC_REG(l, h, o);
  15745. /* 2.5 a += mu * DigitMask(m, NumDigits(m)-1) */
  15746. SP_ASM_ADDC(l, h, ad[j]);
  15747. SP_ASM_MUL_ADD(l, h, o2, mu, *md);
  15748. ad[j] = l;
  15749. o = h;
  15750. }
  15751. /* Handle overflow. */
  15752. l = o;
  15753. h = o2;
  15754. SP_ASM_ADDC(l, h, a->dp[m->used * 2 - 1]);
  15755. a->dp[m->used * 2 - 1] = l;
  15756. a->dp[m->used * 2] = h;
  15757. a->used = m->used * 2 + 1;
  15758. }
  15759. /* Remove leading zeros. */
  15760. sp_clamp(a);
  15761. (void)sp_rshb(a, bits, a);
  15762. /* a = a mod m */
  15763. if (_sp_cmp_abs(a, m) != MP_LT) {
  15764. sp_sub(a, m, a);
  15765. }
  15766. #if 0
  15767. sp_print(a, "rr");
  15768. #endif
  15769. return MP_OKAY;
  15770. #endif /* !SQR_MUL_ASM */
  15771. }
  15772. #if !defined(WOLFSSL_RSA_VERIFY_ONLY) || \
  15773. (defined(WOLFSSL_SP_MATH_ALL) && defined(HAVE_ECC))
  15774. /* Reduce a number in Montgomery form.
  15775. *
  15776. * @param [in,out] a SP integer to Montgomery reduce.
  15777. * @param [in] m SP integer that is the modulus.
  15778. * @param [in] mp SP integer digit that is the bottom digit of inv(-m).
  15779. *
  15780. * @return MP_OKAY on success.
  15781. * @return MP_VAL when a or m is NULL or m is zero.
  15782. */
  15783. int sp_mont_red(sp_int* a, const sp_int* m, sp_int_digit mp)
  15784. {
  15785. int err;
  15786. /* Validate parameters. */
  15787. if ((a == NULL) || (m == NULL) || sp_iszero(m)) {
  15788. err = MP_VAL;
  15789. }
  15790. /* Ensure a has enough space for calculation. */
  15791. else if (a->size < m->used * 2 + 1) {
  15792. err = MP_VAL;
  15793. }
  15794. else {
  15795. /* Perform Montogomery Reduction. */
  15796. err = _sp_mont_red(a, m, mp);
  15797. }
  15798. return err;
  15799. }
  15800. #endif
  15801. /* Calculate the bottom digit of the inverse of negative m.
  15802. * (rho * m) mod 2^n = -1, where n is the number of bits in a digit.
  15803. *
  15804. * Used when performing Montgomery Reduction.
  15805. * m must be odd.
  15806. * Jeffrey Hurchalla’s method.
  15807. * https://arxiv.org/pdf/2204.04342.pdf
  15808. *
  15809. * @param [in] m SP integer that is the modulus.
  15810. * @param [out] mp SP integer digit that is the bottom digit of inv(-m).
  15811. */
  15812. static void _sp_mont_setup(const sp_int* m, sp_int_digit* rho)
  15813. {
  15814. sp_int_digit d = m->dp[0];
  15815. sp_int_digit x = (3 * d) ^ 2;
  15816. sp_int_digit y = 1 - d * x;
  15817. #if SP_WORD_SIZE >= 16
  15818. x *= 1 + y; y *= y;
  15819. #endif
  15820. #if SP_WORD_SIZE >= 32
  15821. x *= 1 + y; y *= y;
  15822. #endif
  15823. #if SP_WORD_SIZE >= 64
  15824. x *= 1 + y; y *= y;
  15825. #endif
  15826. x *= 1 + y;
  15827. /* rho = -1/m mod d, subtract x (unsigned) from 0, assign negative */
  15828. *rho = (sp_int_digit)((sp_int_digit)0 - (sp_int_sdigit)x);
  15829. }
  15830. /* Calculate the bottom digit of the inverse of negative m.
  15831. * (rho * m) mod 2^n = -1, where n is the number of bits in a digit.
  15832. *
  15833. * Used when performing Montgomery Reduction.
  15834. *
  15835. * @param [in] m SP integer that is the modulus.
  15836. * @param [out] mp SP integer digit that is the bottom digit of inv(-m).
  15837. *
  15838. * @return MP_OKAY on success.
  15839. * @return MP_VAL when m or rho is NULL.
  15840. */
  15841. int sp_mont_setup(const sp_int* m, sp_int_digit* rho)
  15842. {
  15843. int err = MP_OKAY;
  15844. /* Validate parameters. */
  15845. if ((m == NULL) || (rho == NULL)) {
  15846. err = MP_VAL;
  15847. }
  15848. /* Calculation only works with odd modulus. */
  15849. if ((err == MP_OKAY) && !sp_isodd(m)) {
  15850. err = MP_VAL;
  15851. }
  15852. if (err == MP_OKAY) {
  15853. /* Calculate negative of inverse mod 2^n. */
  15854. _sp_mont_setup(m, rho);
  15855. }
  15856. return err;
  15857. }
  15858. /* Calculate the normalization value of m.
  15859. * norm = 2^k - m, where k is the number of bits in m
  15860. *
  15861. * @param [out] norm SP integer that normalises numbers into Montgomery
  15862. * form.
  15863. * @param [in] m SP integer that is the modulus.
  15864. *
  15865. * @return MP_OKAY on success.
  15866. * @return MP_VAL when norm or m is NULL, or number of bits in m is maximual.
  15867. */
  15868. int sp_mont_norm(sp_int* norm, const sp_int* m)
  15869. {
  15870. int err = MP_OKAY;
  15871. int bits = 0;
  15872. /* Validate parameters. */
  15873. if ((norm == NULL) || (m == NULL)) {
  15874. err = MP_VAL;
  15875. }
  15876. if (err == MP_OKAY) {
  15877. /* Find top bit and ensure norm has enough space. */
  15878. bits = sp_count_bits(m);
  15879. if (bits >= norm->size * SP_WORD_SIZE) {
  15880. err = MP_VAL;
  15881. }
  15882. }
  15883. if (err == MP_OKAY) {
  15884. /* Round up for case when m is less than a word - no advantage in using
  15885. * a smaller mask and would take more operations.
  15886. */
  15887. if (bits < SP_WORD_SIZE) {
  15888. bits = SP_WORD_SIZE;
  15889. }
  15890. /* Smallest number greater than m of form 2^n. */
  15891. _sp_zero(norm);
  15892. err = sp_set_bit(norm, bits);
  15893. }
  15894. if (err == MP_OKAY) {
  15895. /* norm = 2^n % m */
  15896. err = sp_sub(norm, m, norm);
  15897. }
  15898. if ((err == MP_OKAY) && (bits == SP_WORD_SIZE)) {
  15899. /* Sub made norm one word and now finish calculation. */
  15900. norm->dp[0] %= m->dp[0];
  15901. }
  15902. if (err == MP_OKAY) {
  15903. /* Remove leading zeros. */
  15904. sp_clamp(norm);
  15905. }
  15906. return err;
  15907. }
  15908. #endif /* WOLFSSL_SP_MATH_ALL || WOLFSSL_HAVE_SP_DH ||
  15909. * WOLFCRYPT_HAVE_ECCSI || WOLFCRYPT_HAVE_SAKKE */
  15910. /*********************************
  15911. * To and from binary and strings.
  15912. *********************************/
  15913. /* Calculate the number of 8-bit values required to represent the
  15914. * multi-precision number.
  15915. *
  15916. * When a is NULL, return s 0.
  15917. *
  15918. * @param [in] a SP integer.
  15919. *
  15920. * @return The count of 8-bit values.
  15921. * @return 0 when a is NULL.
  15922. */
  15923. int sp_unsigned_bin_size(const sp_int* a)
  15924. {
  15925. int cnt = 0;
  15926. if (a != NULL) {
  15927. cnt = (sp_count_bits(a) + 7) / 8;
  15928. }
  15929. return cnt;
  15930. }
  15931. /* Convert a number as an array of bytes in big-endian format to a
  15932. * multi-precision number.
  15933. *
  15934. * @param [out] a SP integer.
  15935. * @param [in] in Array of bytes.
  15936. * @param [in] inSz Number of data bytes in array.
  15937. *
  15938. * @return MP_OKAY on success.
  15939. * @return MP_VAL when the number is too big to fit in an SP.
  15940. */
  15941. int sp_read_unsigned_bin(sp_int* a, const byte* in, word32 inSz)
  15942. {
  15943. int err = MP_OKAY;
  15944. /* Validate parameters. */
  15945. if ((a == NULL) || ((in == NULL) && (inSz > 0))) {
  15946. err = MP_VAL;
  15947. }
  15948. /* Check a has enough space for number. */
  15949. if ((err == MP_OKAY) && (inSz > (word32)a->size * SP_WORD_SIZEOF)) {
  15950. err = MP_VAL;
  15951. }
  15952. if (err == MP_OKAY) {
  15953. /* Load full digits at a time from in. */
  15954. int i;
  15955. int j = 0;
  15956. a->used = (inSz + SP_WORD_SIZEOF - 1) / SP_WORD_SIZEOF;
  15957. #if defined(BIG_ENDIAN_ORDER) && !defined(WOLFSSL_SP_INT_DIGIT_ALIGN)
  15958. /* Data endian matches respresentation of number.
  15959. * Directly copy if we don't have alignment issues.
  15960. */
  15961. for (i = inSz-1; i > SP_WORD_SIZEOF-1; i -= SP_WORD_SIZEOF) {
  15962. a->dp[j++] = *(sp_int_digit*)(in + i - (SP_WORD_SIZEOF - 1));
  15963. }
  15964. #else
  15965. /* Construct digit from required number of bytes. */
  15966. for (i = inSz-1; i >= SP_WORD_SIZEOF - 1; i -= SP_WORD_SIZEOF) {
  15967. a->dp[j] = ((sp_int_digit)in[i - 0] << 0);
  15968. #if SP_WORD_SIZE >= 16
  15969. a->dp[j] |= ((sp_int_digit)in[i - 1] << 8);
  15970. #endif
  15971. #if SP_WORD_SIZE >= 32
  15972. a->dp[j] |= ((sp_int_digit)in[i - 2] << 16) |
  15973. ((sp_int_digit)in[i - 3] << 24);
  15974. #endif
  15975. #if SP_WORD_SIZE >= 64
  15976. a->dp[j] |= ((sp_int_digit)in[i - 4] << 32) |
  15977. ((sp_int_digit)in[i - 5] << 40) |
  15978. ((sp_int_digit)in[i - 6] << 48) |
  15979. ((sp_int_digit)in[i - 7] << 56);
  15980. #endif
  15981. j++;
  15982. }
  15983. #endif
  15984. #if SP_WORD_SIZE >= 16
  15985. /* Handle leftovers. */
  15986. if (i >= 0) {
  15987. #ifdef BIG_ENDIAN_ORDER
  15988. int s;
  15989. /* Place remaining bytes into last digit. */
  15990. a->dp[a->used - 1] = 0;
  15991. for (s = 0; i >= 0; i--,s += 8) {
  15992. a->dp[j] |= ((sp_int_digit)in[i]) << s;
  15993. }
  15994. #else
  15995. /* Cast digits to an array of bytes so we can insert directly. */
  15996. byte *d = (byte*)a->dp;
  15997. /* Zero out all bytes in last digit. */
  15998. a->dp[a->used - 1] = 0;
  15999. /* Place remaining bytes directly into digit. */
  16000. switch (i) {
  16001. #if SP_WORD_SIZE >= 64
  16002. case 6: d[inSz - 1 - 6] = in[6]; FALL_THROUGH;
  16003. case 5: d[inSz - 1 - 5] = in[5]; FALL_THROUGH;
  16004. case 4: d[inSz - 1 - 4] = in[4]; FALL_THROUGH;
  16005. case 3: d[inSz - 1 - 3] = in[3]; FALL_THROUGH;
  16006. #endif
  16007. #if SP_WORD_SIZE >= 32
  16008. case 2: d[inSz - 1 - 2] = in[2]; FALL_THROUGH;
  16009. case 1: d[inSz - 1 - 1] = in[1]; FALL_THROUGH;
  16010. #endif
  16011. case 0: d[inSz - 1 - 0] = in[0];
  16012. }
  16013. #endif /* LITTLE_ENDIAN_ORDER */
  16014. }
  16015. #endif
  16016. sp_clamp(a);
  16017. }
  16018. return err;
  16019. }
  16020. /* Convert the multi-precision number to an array of bytes in big-endian format.
  16021. *
  16022. * The array must be large enough for encoded number - use mp_unsigned_bin_size
  16023. * to calculate the number of bytes required.
  16024. *
  16025. * @param [in] a SP integer.
  16026. * @param [out] out Array to put encoding into.
  16027. *
  16028. * @return MP_OKAY on success.
  16029. * @return MP_VAL when a or out is NULL.
  16030. */
  16031. int sp_to_unsigned_bin(const sp_int* a, byte* out)
  16032. {
  16033. /* Write assuming output buffer is big enough. */
  16034. return sp_to_unsigned_bin_len(a, out, sp_unsigned_bin_size(a));
  16035. }
  16036. /* Convert the multi-precision number to an array of bytes in big-endian format.
  16037. *
  16038. * The array must be large enough for encoded number - use mp_unsigned_bin_size
  16039. * to calculate the number of bytes required.
  16040. * Front-pads the output array with zeros to make number the size of the array.
  16041. *
  16042. * @param [in] a SP integer.
  16043. * @param [out] out Array to put encoding into.
  16044. * @param [in] outSz Size of the array in bytes.
  16045. *
  16046. * @return MP_OKAY on success.
  16047. * @return MP_VAL when a or out is NULL.
  16048. */
  16049. int sp_to_unsigned_bin_len(const sp_int* a, byte* out, int outSz)
  16050. {
  16051. int err = MP_OKAY;
  16052. /* Validate parameters. */
  16053. if ((a == NULL) || (out == NULL)) {
  16054. err = MP_VAL;
  16055. }
  16056. if (err == MP_OKAY) {
  16057. /* Start at the end of the buffer - least significant byte. */
  16058. int j = outSz - 1;
  16059. if (!sp_iszero(a)) {
  16060. int i;
  16061. /* Put each digit in. */
  16062. for (i = 0; (j >= 0) && (i < a->used); i++) {
  16063. int b;
  16064. /* Place each byte of a digit into the buffer. */
  16065. for (b = 0; b < SP_WORD_SIZE; b += 8) {
  16066. out[j--] = (byte)(a->dp[i] >> b);
  16067. /* Stop if the output buffer is filled. */
  16068. if (j < 0) {
  16069. break;
  16070. }
  16071. }
  16072. }
  16073. }
  16074. /* Front pad buffer with 0s. */
  16075. for (; j >= 0; j--) {
  16076. out[j] = 0;
  16077. }
  16078. }
  16079. return err;
  16080. }
  16081. #if defined(WOLFSSL_SP_MATH_ALL) && !defined(NO_RSA) && \
  16082. !defined(WOLFSSL_RSA_VERIFY_ONLY)
  16083. /* Store the number in big-endian format in array at an offset.
  16084. * The array must be large enough for encoded number - use mp_unsigned_bin_size
  16085. * to calculate the number of bytes required.
  16086. *
  16087. * @param [in] o Offset into array o start encoding.
  16088. * @param [in] a SP integer.
  16089. * @param [out] out Array to put encoding into.
  16090. *
  16091. * @return Index of next byte after data.
  16092. * @return MP_VAL when a or out is NULL.
  16093. */
  16094. int sp_to_unsigned_bin_at_pos(int o, const sp_int* a, unsigned char* out)
  16095. {
  16096. /* Get length of data that will be written. */
  16097. int len = sp_unsigned_bin_size(a);
  16098. /* Write number to buffer at offset. */
  16099. int ret = sp_to_unsigned_bin_len(a, out + o, len);
  16100. if (ret == MP_OKAY) {
  16101. /* Return offset of next byte after number. */
  16102. ret = o + len;
  16103. }
  16104. return ret;
  16105. }
  16106. #endif /* WOLFSSL_SP_MATH_ALL && !NO_RSA && !WOLFSSL_RSA_VERIFY_ONLY */
  16107. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(NO_RSA) && \
  16108. !defined(WOLFSSL_RSA_VERIFY_ONLY)) || defined(HAVE_ECC) || !defined(NO_DSA)
  16109. /* Convert hexadecimal number as string in big-endian format to a
  16110. * multi-precision number.
  16111. *
  16112. * Assumes negative sign and leading zeros have been stripped.
  16113. *
  16114. * @param [out] a SP integer.
  16115. * @param [in] in NUL terminated string.
  16116. *
  16117. * @return MP_OKAY on success.
  16118. * @return MP_VAL when radix not supported, value is negative, or a character
  16119. * is not valid.
  16120. */
  16121. static int _sp_read_radix_16(sp_int* a, const char* in)
  16122. {
  16123. int err = MP_OKAY;
  16124. int i;
  16125. int s = 0;
  16126. int j = 0;
  16127. sp_int_digit d;
  16128. /* Make all nibbles in digit 0. */
  16129. d = 0;
  16130. /* Step through string a character at a time starting at end - least
  16131. * significant byte. */
  16132. for (i = (int)(XSTRLEN(in) - 1); i >= 0; i--) {
  16133. /* Convert character from hex. */
  16134. int ch = (int)HexCharToByte(in[i]);
  16135. /* Check for invalid character. */
  16136. if (ch < 0) {
  16137. err = MP_VAL;
  16138. break;
  16139. }
  16140. /* Check whether we have filled the digit. */
  16141. if (s == SP_WORD_SIZE) {
  16142. /* Store digit and move index to next in a. */
  16143. a->dp[j++] = d;
  16144. /* Fail if we are out of space in a. */
  16145. if (j >= a->size) {
  16146. err = MP_VAL;
  16147. break;
  16148. }
  16149. /* Set shift back to 0 - lowest nibble. */
  16150. s = 0;
  16151. /* Make all nibbles in digit 0. */
  16152. d = 0;
  16153. }
  16154. /* Put next nibble into digit. */
  16155. d |= ((sp_int_digit)ch) << s;
  16156. /* Update shift for next nibble. */
  16157. s += 4;
  16158. }
  16159. if (err == MP_OKAY) {
  16160. /* If space, store last digit. */
  16161. if (j < a->size) {
  16162. a->dp[j] = d;
  16163. }
  16164. /* Update used count. */
  16165. a->used = j + 1;
  16166. /* Remove leading zeros. */
  16167. sp_clamp(a);
  16168. }
  16169. return err;
  16170. }
  16171. #endif /* (WOLFSSL_SP_MATH_ALL && !NO_RSA && !WOLFSSL_RSA_VERIFY_ONLY) ||
  16172. * HAVE_ECC || !NO_DSA */
  16173. #ifdef WOLFSSL_SP_READ_RADIX_10
  16174. /* Convert decimal number as string in big-endian format to a multi-precision
  16175. * number.
  16176. *
  16177. * Assumes negative sign and leading zeros have been stripped.
  16178. *
  16179. * @param [out] a SP integer.
  16180. * @param [in] in NUL terminated string.
  16181. *
  16182. * @return MP_OKAY on success.
  16183. * @return MP_VAL when radix not supported, value is negative, or a character
  16184. * is not valid.
  16185. */
  16186. static int _sp_read_radix_10(sp_int* a, const char* in)
  16187. {
  16188. int err = MP_OKAY;
  16189. int i;
  16190. char ch;
  16191. /* Start with a being zero. */
  16192. _sp_zero(a);
  16193. /* Process all characters. */
  16194. for (i = 0; in[i] != '\0'; i++) {
  16195. /* Get character. */
  16196. ch = in[i];
  16197. /* Check character is valid. */
  16198. if ((ch >= '0') && (ch <= '9')) {
  16199. /* Assume '0'..'9' are continuous valus as characters. */
  16200. ch -= '0';
  16201. }
  16202. else {
  16203. /* Return error on invalid character. */
  16204. err = MP_VAL;
  16205. break;
  16206. }
  16207. /* Multiply a by 10. */
  16208. err = _sp_mul_d(a, 10, a, 0);
  16209. if (err != MP_OKAY) {
  16210. break;
  16211. }
  16212. /* Add character value. */
  16213. err = _sp_add_d(a, ch, a);
  16214. if (err != MP_OKAY) {
  16215. break;
  16216. }
  16217. }
  16218. return err;
  16219. }
  16220. #endif /* WOLFSSL_SP_READ_RADIX_10 */
  16221. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(NO_RSA) && \
  16222. !defined(WOLFSSL_RSA_VERIFY_ONLY)) || defined(HAVE_ECC) || !defined(NO_DSA)
  16223. /* Convert a number as string in big-endian format to a big number.
  16224. * Only supports base-16 (hexadecimal) and base-10 (decimal).
  16225. *
  16226. * Negative values supported when WOLFSSL_SP_INT_NEGATIVE is defined.
  16227. *
  16228. * @param [out] a SP integer.
  16229. * @param [in] in NUL terminated string.
  16230. * @param [in] radix Number of values in a digit.
  16231. *
  16232. * @return MP_OKAY on success.
  16233. * @return MP_VAL when a or in is NULL, radix not supported, value is negative,
  16234. * or a character is not valid.
  16235. */
  16236. int sp_read_radix(sp_int* a, const char* in, int radix)
  16237. {
  16238. int err = MP_OKAY;
  16239. if ((a == NULL) || (in == NULL)) {
  16240. err = MP_VAL;
  16241. }
  16242. if (err == MP_OKAY) {
  16243. #ifndef WOLFSSL_SP_INT_NEGATIVE
  16244. if (*in == '-') {
  16245. err = MP_VAL;
  16246. }
  16247. else
  16248. #endif
  16249. {
  16250. #ifdef WOLFSSL_SP_INT_NEGATIVE
  16251. if (*in == '-') {
  16252. /* Make number negative if signed string. */
  16253. a->sign = MP_NEG;
  16254. in++;
  16255. }
  16256. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  16257. /* Skip leading zeros. */
  16258. while (*in == '0') {
  16259. in++;
  16260. }
  16261. if (radix == 16) {
  16262. err = _sp_read_radix_16(a, in);
  16263. }
  16264. #ifdef WOLFSSL_SP_READ_RADIX_10
  16265. else if (radix == 10) {
  16266. err = _sp_read_radix_10(a, in);
  16267. }
  16268. #endif
  16269. else {
  16270. err = MP_VAL;
  16271. }
  16272. #ifdef WOLFSSL_SP_INT_NEGATIVE
  16273. /* Ensure not negative when zero. */
  16274. if ((err == MP_OKAY) && sp_iszero(a)) {
  16275. a->sign = MP_ZPOS;
  16276. }
  16277. #endif
  16278. }
  16279. }
  16280. return err;
  16281. }
  16282. #endif /* (WOLFSSL_SP_MATH_ALL && !NO_RSA && !WOLFSSL_RSA_VERIFY_ONLY) ||
  16283. * HAVE_ECC || !NO_DSA */
  16284. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  16285. defined(WC_MP_TO_RADIX)
  16286. /* Put the big-endian, hex string encoding of a into str.
  16287. *
  16288. * Assumes str is large enough for result.
  16289. * Use sp_radix_size() to calculate required length.
  16290. *
  16291. * @param [in] a SP integer to convert.
  16292. * @param [out] str String to hold hex string result.
  16293. *
  16294. * @return MP_OKAY on success.
  16295. * @return MP_VAL when a or str is NULL.
  16296. */
  16297. int sp_tohex(const sp_int* a, char* str)
  16298. {
  16299. int err = MP_OKAY;
  16300. /* Validate parameters. */
  16301. if ((a == NULL) || (str == NULL)) {
  16302. err = MP_VAL;
  16303. }
  16304. if (err == MP_OKAY) {
  16305. /* Quick out if number is zero. */
  16306. if (sp_iszero(a) == MP_YES) {
  16307. #ifndef WC_DISABLE_RADIX_ZERO_PAD
  16308. /* Make string represent complete bytes. */
  16309. *str++ = '0';
  16310. #endif /* WC_DISABLE_RADIX_ZERO_PAD */
  16311. *str++ = '0';
  16312. }
  16313. else {
  16314. int i;
  16315. int j;
  16316. sp_int_digit d;
  16317. #ifdef WOLFSSL_SP_INT_NEGATIVE
  16318. if (a->sign == MP_NEG) {
  16319. /* Add negative sign character. */
  16320. *str = '-';
  16321. str++;
  16322. }
  16323. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  16324. /* Start at last digit - most significant digit. */
  16325. i = a->used - 1;
  16326. d = a->dp[i];
  16327. #ifndef WC_DISABLE_RADIX_ZERO_PAD
  16328. /* Find highest non-zero byte in most-significant word. */
  16329. for (j = SP_WORD_SIZE - 8; j >= 0; j -= 8) {
  16330. /* When a byte at this index is not 0 break out to start
  16331. * writing.
  16332. */
  16333. if (((d >> j) & 0xff) != 0) {
  16334. break;
  16335. }
  16336. /* Skip this digit if it was 0. */
  16337. if (j == 0) {
  16338. j = SP_WORD_SIZE - 8;
  16339. d = a->dp[--i];
  16340. }
  16341. }
  16342. /* Start with high nibble of byte. */
  16343. j += 4;
  16344. #else
  16345. /* Find highest non-zero nibble in most-significant word. */
  16346. for (j = SP_WORD_SIZE - 4; j >= 0; j -= 4) {
  16347. /* When a nibble at this index is not 0 break out to start
  16348. * writing.
  16349. */
  16350. if (((d >> j) & 0xf) != 0) {
  16351. break;
  16352. }
  16353. /* Skip this digit if it was 0. */
  16354. if (j == 0) {
  16355. j = SP_WORD_SIZE - 4;
  16356. d = a->dp[--i];
  16357. }
  16358. }
  16359. #endif /* WC_DISABLE_RADIX_ZERO_PAD */
  16360. /* Write out as much as required from most-significant digit. */
  16361. for (; j >= 0; j -= 4) {
  16362. *(str++) = ByteToHex((byte)(d >> j));
  16363. }
  16364. /* Write rest of digits. */
  16365. for (--i; i >= 0; i--) {
  16366. /* Get digit from memory. */
  16367. d = a->dp[i];
  16368. /* Write out all nibbles of digit. */
  16369. for (j = SP_WORD_SIZE - 4; j >= 0; j -= 4) {
  16370. *(str++) = (byte)ByteToHex((byte)(d >> j));
  16371. }
  16372. }
  16373. }
  16374. /* Terminate string. */
  16375. *str = '\0';
  16376. }
  16377. return err;
  16378. }
  16379. #endif /* (WOLFSSL_SP_MATH_ALL && !WOLFSSL_RSA_VERIFY_ONLY) || WC_MP_TO_RADIX */
  16380. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  16381. defined(WOLFSSL_KEY_GEN) || defined(HAVE_COMP_KEY) || \
  16382. defined(WC_MP_TO_RADIX)
  16383. /* Put the big-endian, decimal string encoding of a into str.
  16384. *
  16385. * Assumes str is large enough for result.
  16386. * Use sp_radix_size() to calculate required length.
  16387. *
  16388. * @param [in] a SP integer to convert.
  16389. * @param [out] str String to hold hex string result.
  16390. *
  16391. * @return MP_OKAY on success.
  16392. * @return MP_VAL when a or str is NULL.
  16393. * @return MP_MEM when dynamic memory allocation fails.
  16394. */
  16395. int sp_todecimal(const sp_int* a, char* str)
  16396. {
  16397. int err = MP_OKAY;
  16398. int i;
  16399. int j;
  16400. sp_int_digit d = 0;
  16401. /* Validate parameters. */
  16402. if ((a == NULL) || (str == NULL)) {
  16403. err = MP_VAL;
  16404. }
  16405. /* Quick out if number is zero. */
  16406. else if (sp_iszero(a) == MP_YES) {
  16407. *str++ = '0';
  16408. *str = '\0';
  16409. }
  16410. else {
  16411. /* Temporary that is divided by 10. */
  16412. DECL_SP_INT(t, a->used + 1);
  16413. ALLOC_SP_INT_SIZE(t, a->used + 1, err, NULL);
  16414. if (err == MP_OKAY) {
  16415. err = sp_copy(a, t);
  16416. }
  16417. if (err == MP_OKAY) {
  16418. #ifdef WOLFSSL_SP_INT_NEGATIVE
  16419. if (a->sign == MP_NEG) {
  16420. /* Add negative sign character. */
  16421. *str = '-';
  16422. str++;
  16423. }
  16424. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  16425. /* Write out little endian. */
  16426. i = 0;
  16427. do {
  16428. /* Divide by 10 and get remainder of division. */
  16429. (void)sp_div_d(t, 10, t, &d);
  16430. /* Write out remainder as a character. */
  16431. str[i++] = (char)('0' + d);
  16432. }
  16433. /* Keep going while we there is a value to write. */
  16434. while (!sp_iszero(t));
  16435. /* Terminate string. */
  16436. str[i] = '\0';
  16437. if (err == MP_OKAY) {
  16438. /* Reverse string to big endian. */
  16439. for (j = 0; j <= (i - 1) / 2; j++) {
  16440. int c = (unsigned char)str[j];
  16441. str[j] = str[i - 1 - j];
  16442. str[i - 1 - j] = (char)c;
  16443. }
  16444. }
  16445. }
  16446. FREE_SP_INT(t, NULL);
  16447. }
  16448. return err;
  16449. }
  16450. #endif /* WOLFSSL_SP_MATH_ALL || WOLFSSL_KEY_GEN || HAVE_COMP_KEY */
  16451. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  16452. defined(WC_MP_TO_RADIX)
  16453. /* Put the string version, big-endian, of a in str using the given radix.
  16454. *
  16455. * @param [in] a SP integer to convert.
  16456. * @param [out] str String to hold hex string result.
  16457. * @param [in] radix Base of character.
  16458. * Valid values: MP_RADIX_HEX, MP_RADIX_DEC.
  16459. *
  16460. * @return MP_OKAY on success.
  16461. * @return MP_VAL when a or str is NULL, or radix not supported.
  16462. */
  16463. int sp_toradix(const sp_int* a, char* str, int radix)
  16464. {
  16465. int err = MP_OKAY;
  16466. /* Validate parameters. */
  16467. if ((a == NULL) || (str == NULL)) {
  16468. err = MP_VAL;
  16469. }
  16470. /* Handle base 16 if requested. */
  16471. else if (radix == MP_RADIX_HEX) {
  16472. err = sp_tohex(a, str);
  16473. }
  16474. #if defined(WOLFSSL_SP_MATH_ALL) || defined(WOLFSSL_KEY_GEN) || \
  16475. defined(HAVE_COMP_KEY)
  16476. /* Handle base 10 if requested. */
  16477. else if (radix == MP_RADIX_DEC) {
  16478. err = sp_todecimal(a, str);
  16479. }
  16480. #endif /* WOLFSSL_SP_MATH_ALL || WOLFSSL_KEY_GEN || HAVE_COMP_KEY */
  16481. else {
  16482. /* Base not supported. */
  16483. err = MP_VAL;
  16484. }
  16485. return err;
  16486. }
  16487. #endif /* (WOLFSSL_SP_MATH_ALL && !WOLFSSL_RSA_VERIFY_ONLY) || WC_MP_TO_RADIX */
  16488. #if (defined(WOLFSSL_SP_MATH_ALL) && !defined(WOLFSSL_RSA_VERIFY_ONLY)) || \
  16489. defined(WC_MP_TO_RADIX)
  16490. /* Calculate the length of the string version, big-endian, of a using the given
  16491. * radix.
  16492. *
  16493. * @param [in] a SP integer to convert.
  16494. * @param [in] radix Base of character.
  16495. * Valid values: MP_RADIX_HEX, MP_RADIX_DEC.
  16496. * @param [out] size The number of characters in encoding.
  16497. *
  16498. * @return MP_OKAY on success.
  16499. * @return MP_VAL when a or size is NULL, or radix not supported.
  16500. */
  16501. int sp_radix_size(const sp_int* a, int radix, int* size)
  16502. {
  16503. int err = MP_OKAY;
  16504. /* Validate parameters. */
  16505. if ((a == NULL) || (size == NULL)) {
  16506. err = MP_VAL;
  16507. }
  16508. /* Handle base 16 if requested. */
  16509. else if (radix == MP_RADIX_HEX) {
  16510. if (a->used == 0) {
  16511. #ifndef WC_DISABLE_RADIX_ZERO_PAD
  16512. /* 00 and '\0' */
  16513. *size = 2 + 1;
  16514. #else
  16515. /* Zero and '\0' */
  16516. *size = 1 + 1;
  16517. #endif /* WC_DISABLE_RADIX_ZERO_PAD */
  16518. }
  16519. else {
  16520. /* Count of nibbles. */
  16521. int cnt = (sp_count_bits(a) + 3) / 4;
  16522. #ifndef WC_DISABLE_RADIX_ZERO_PAD
  16523. /* Must have even number of nibbles to have complete bytes. */
  16524. if (cnt & 1) {
  16525. cnt++;
  16526. }
  16527. #endif /* WC_DISABLE_RADIX_ZERO_PAD */
  16528. #ifdef WOLFSSL_SP_INT_NEGATIVE
  16529. /* Add to count of characters for negative sign. */
  16530. if (a->sign == MP_NEG) {
  16531. cnt++;
  16532. }
  16533. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  16534. /* One more for \0 */
  16535. *size = cnt + 1;
  16536. }
  16537. }
  16538. #if defined(WOLFSSL_SP_MATH_ALL) || defined(WOLFSSL_KEY_GEN) || \
  16539. defined(HAVE_COMP_KEY)
  16540. /* Handle base 10 if requested. */
  16541. else if (radix == MP_RADIX_DEC) {
  16542. int i;
  16543. sp_int_digit d;
  16544. /* quick out if its zero */
  16545. if (sp_iszero(a) == MP_YES) {
  16546. /* Zero and '\0' */
  16547. *size = 1 + 1;
  16548. }
  16549. else {
  16550. DECL_SP_INT(t, a->used + 1);
  16551. /* Temporary to be divided by 10. */
  16552. ALLOC_SP_INT(t, a->used + 1, err, NULL);
  16553. if (err == MP_OKAY) {
  16554. t->size = a->used + 1;
  16555. err = sp_copy(a, t);
  16556. }
  16557. if (err == MP_OKAY) {
  16558. /* Count number of times number can be divided by 10. */
  16559. for (i = 0; !sp_iszero(t); i++) {
  16560. (void)sp_div_d(t, 10, t, &d);
  16561. }
  16562. #ifdef WOLFSSL_SP_INT_NEGATIVE
  16563. /* Add to count of characters for negative sign. */
  16564. if (a->sign == MP_NEG) {
  16565. i++;
  16566. }
  16567. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  16568. /* One more for \0 */
  16569. *size = i + 1;
  16570. }
  16571. FREE_SP_INT(t, NULL);
  16572. }
  16573. }
  16574. #endif /* WOLFSSL_SP_MATH_ALL || WOLFSSL_KEY_GEN || HAVE_COMP_KEY */
  16575. else {
  16576. /* Base not supported. */
  16577. err = MP_VAL;
  16578. }
  16579. return err;
  16580. }
  16581. #endif /* (WOLFSSL_SP_MATH_ALL && !WOLFSSL_RSA_VERIFY_ONLY) || WC_MP_TO_RADIX */
  16582. /***************************************
  16583. * Prime number generation and checking.
  16584. ***************************************/
  16585. #if defined(WOLFSSL_KEY_GEN) && (!defined(NO_RSA) || !defined(NO_DH) || \
  16586. !defined(NO_DSA)) && !defined(WC_NO_RNG)
  16587. #ifndef WOLFSSL_SP_MILLER_RABIN_CNT
  16588. /* Always done 8 iterations of Miller-Rabin on check of primality when
  16589. * generating.
  16590. */
  16591. #define WOLFSSL_SP_MILLER_RABIN_CNT 8
  16592. #endif
  16593. /* Generate a random prime for RSA only.
  16594. *
  16595. * @param [out] r SP integer to hold result.
  16596. * @param [in] len Number of bytes in prime.
  16597. * @param [in] rng Random number generator.
  16598. * @param [in] heap Heap hint. Unused.
  16599. *
  16600. * @return MP_OKAY on success
  16601. * @return MP_VAL when r or rng is NULL, length is not supported or random
  16602. * number generator fails.
  16603. */
  16604. int sp_rand_prime(sp_int* r, int len, WC_RNG* rng, void* heap)
  16605. {
  16606. static const byte USE_BBS = 3;
  16607. int err = MP_OKAY;
  16608. byte low_bits = 1;
  16609. int isPrime = MP_NO;
  16610. #if defined(WOLFSSL_SP_MATH_ALL) || defined(BIG_ENDIAN_ORDER)
  16611. int bits = 0;
  16612. #endif /* WOLFSSL_SP_MATH_ALL */
  16613. int digits = 0;
  16614. (void)heap;
  16615. /* Check NULL parameters and 0 is not prime so 0 bytes is invalid. */
  16616. if ((r == NULL) || (rng == NULL) || (len == 0)) {
  16617. err = MP_VAL;
  16618. }
  16619. if (err == MP_OKAY) {
  16620. /* Get type. */
  16621. if (len < 0) {
  16622. low_bits = USE_BBS;
  16623. len = -len;
  16624. }
  16625. /* Get number of digits required to handle required number of bytes. */
  16626. digits = (len + SP_WORD_SIZEOF - 1) / SP_WORD_SIZEOF;
  16627. /* Ensure result has space. */
  16628. if (r->size < digits) {
  16629. err = MP_VAL;
  16630. }
  16631. }
  16632. if (err == MP_OKAY) {
  16633. #ifndef WOLFSSL_SP_MATH_ALL
  16634. /* For minimal maths, support only what's in SP and needed for DH. */
  16635. #if defined(WOLFSSL_HAVE_SP_DH) && defined(WOLFSSL_KEY_GEN)
  16636. if (len == 32) {
  16637. }
  16638. else
  16639. #endif /* WOLFSSL_HAVE_SP_DH && WOLFSSL_KEY_GEN */
  16640. /* Generate RSA primes that are half the modulus length. */
  16641. #ifdef WOLFSSL_SP_4096
  16642. if (len == 256) {
  16643. /* Support 2048-bit operations compiled in. */
  16644. }
  16645. else
  16646. #endif
  16647. #ifndef WOLFSSL_SP_NO_3072
  16648. if (len == 192) {
  16649. /* Support 1536-bit operations compiled in. */
  16650. }
  16651. else
  16652. #endif
  16653. #ifndef WOLFSSL_SP_NO_2048
  16654. if (len == 128) {
  16655. /* Support 1024-bit operations compiled in. */
  16656. }
  16657. else
  16658. #endif
  16659. {
  16660. /* Bit length not supported in SP. */
  16661. err = MP_VAL;
  16662. }
  16663. #endif /* !WOLFSSL_SP_MATH_ALL */
  16664. #ifdef WOLFSSL_SP_INT_NEGATIVE
  16665. /* Generated number is always positive. */
  16666. r->sign = MP_ZPOS;
  16667. #endif /* WOLFSSL_SP_INT_NEGATIVE */
  16668. /* Set number of digits that will be used. */
  16669. r->used = digits;
  16670. #if defined(WOLFSSL_SP_MATH_ALL) || defined(BIG_ENDIAN_ORDER)
  16671. /* Calculate number of bits in last digit. */
  16672. bits = (len * 8) & SP_WORD_MASK;
  16673. #endif /* WOLFSSL_SP_MATH_ALL || BIG_ENDIAN_ORDER */
  16674. }
  16675. /* Assume the candidate is probably prime and then test until it is proven
  16676. * composite.
  16677. */
  16678. while ((err == MP_OKAY) && (isPrime == MP_NO)) {
  16679. #ifdef SHOW_GEN
  16680. printf(".");
  16681. fflush(stdout);
  16682. #endif /* SHOW_GEN */
  16683. /* Generate bytes into digit array. */
  16684. err = wc_RNG_GenerateBlock(rng, (byte*)r->dp, len);
  16685. if (err != 0) {
  16686. err = MP_VAL;
  16687. break;
  16688. }
  16689. /* Set top bits to ensure bit length required is generated.
  16690. * Also set second top to help ensure product of two primes is
  16691. * going to be twice the number of bits of each.
  16692. */
  16693. #ifdef LITTLE_ENDIAN_ORDER
  16694. ((byte*)r->dp)[len-1] |= 0x80 | 0x40;
  16695. #else
  16696. ((byte*)(r->dp + r->used - 1))[0] |= 0x80 | 0x40;
  16697. #endif /* LITTLE_ENDIAN_ORDER */
  16698. /* Set mandatory low bits
  16699. * - bottom bit to make odd.
  16700. * - For BBS, second lowest too to make Blum integer (3 mod 4).
  16701. */
  16702. r->dp[0] |= low_bits;
  16703. #ifdef BIG_ENDIAN_ORDER
  16704. /* Bytes were put into wrong place when less than full digit. */
  16705. if (bits != 0) {
  16706. r->dp[r->used-1] >>= SP_WORD_SIZE - bits;
  16707. }
  16708. #endif /* BIG_ENDIAN_ORDER */
  16709. #ifdef WOLFSSL_SP_MATH_ALL
  16710. /* Mask top digit when less than a digit requested. */
  16711. if (bits > 0) {
  16712. r->dp[r->used - 1] &= ((sp_int_digit)1 << bits) - 1;
  16713. }
  16714. #endif /* WOLFSSL_SP_MATH_ALL */
  16715. /* Running Miller-Rabin up to 3 times gives us a 2^{-80} chance
  16716. * of a 1024-bit candidate being a false positive, when it is our
  16717. * prime candidate. (Note 4.49 of Handbook of Applied Cryptography.)
  16718. */
  16719. sp_prime_is_prime_ex(r, WOLFSSL_SP_MILLER_RABIN_CNT, &isPrime, rng);
  16720. }
  16721. return err;
  16722. }
  16723. #endif /* WOLFSSL_KEY_GEN && (!NO_DH || !NO_DSA) && !WC_NO_RNG */
  16724. #ifdef WOLFSSL_SP_PRIME_GEN
  16725. /* Miller-Rabin test of "a" to the base of "b" as described in
  16726. * HAC pp. 139 Algorithm 4.24
  16727. *
  16728. * Sets result to 0 if definitely composite or 1 if probably prime.
  16729. * Randomly the chance of error is no more than 1/4 and often
  16730. * very much lower.
  16731. *
  16732. * a is assumed to be odd.
  16733. *
  16734. * @param [in] a SP integer to check.
  16735. * @param [in] b SP integer that is a small prime.
  16736. * @param [out] result MP_YES when number is likey prime.
  16737. * MP_NO otherwise.
  16738. * @param [in] n1 SP integer temporary.
  16739. * @param [in] r SP integer temporary.
  16740. *
  16741. * @return MP_OKAY on success.
  16742. * @return MP_MEM when dynamic memory allocation fails.
  16743. */
  16744. static int sp_prime_miller_rabin(const sp_int* a, sp_int* b, int* result,
  16745. sp_int* n1, sp_int* r)
  16746. {
  16747. int err = MP_OKAY;
  16748. int s = 0;
  16749. sp_int* y = b;
  16750. /* Assume not prime. */
  16751. *result = MP_NO;
  16752. /* Ensure small prime is 2 or more. */
  16753. if (sp_cmp_d(b, 1) != MP_GT) {
  16754. err = MP_VAL;
  16755. }
  16756. if (err == MP_OKAY) {
  16757. /* n1 = a - 1 (a is assumed odd.) */
  16758. (void)sp_copy(a, n1);
  16759. n1->dp[0]--;
  16760. /* Set 2**s * r = n1 */
  16761. /* Count the number of least significant bits which are zero. */
  16762. s = sp_cnt_lsb(n1);
  16763. /* Divide n - 1 by 2**s into r. */
  16764. (void)sp_rshb(n1, s, r);
  16765. /* Compute y = b**r mod a */
  16766. err = sp_exptmod(b, r, a, y);
  16767. }
  16768. if (err == MP_OKAY) {
  16769. /* Assume probably prime until shown otherwise. */
  16770. *result = MP_YES;
  16771. /* If y != 1 and y != n1 do */
  16772. if ((sp_cmp_d(y, 1) != MP_EQ) && (_sp_cmp(y, n1) != MP_EQ)) {
  16773. int j = 1;
  16774. /* While j <= s-1 and y != n1 */
  16775. while ((j <= (s - 1)) && (_sp_cmp(y, n1) != MP_EQ)) {
  16776. /* Square for bit shifted down. */
  16777. err = sp_sqrmod(y, a, y);
  16778. if (err != MP_OKAY) {
  16779. break;
  16780. }
  16781. /* If y == 1 then composite. */
  16782. if (sp_cmp_d(y, 1) == MP_EQ) {
  16783. *result = MP_NO;
  16784. break;
  16785. }
  16786. ++j;
  16787. }
  16788. /* If y != n1 then composite. */
  16789. if ((*result == MP_YES) && (_sp_cmp(y, n1) != MP_EQ)) {
  16790. *result = MP_NO;
  16791. }
  16792. }
  16793. }
  16794. return err;
  16795. }
  16796. #if SP_WORD_SIZE == 8
  16797. /* Number of pre-computed primes. First n primes - fitting in a digit. */
  16798. #define SP_PRIME_SIZE 54
  16799. static const sp_int_digit sp_primes[SP_PRIME_SIZE] = {
  16800. 0x02, 0x03, 0x05, 0x07, 0x0B, 0x0D, 0x11, 0x13,
  16801. 0x17, 0x1D, 0x1F, 0x25, 0x29, 0x2B, 0x2F, 0x35,
  16802. 0x3B, 0x3D, 0x43, 0x47, 0x49, 0x4F, 0x53, 0x59,
  16803. 0x61, 0x65, 0x67, 0x6B, 0x6D, 0x71, 0x7F, 0x83,
  16804. 0x89, 0x8B, 0x95, 0x97, 0x9D, 0xA3, 0xA7, 0xAD,
  16805. 0xB3, 0xB5, 0xBF, 0xC1, 0xC5, 0xC7, 0xD3, 0xDF,
  16806. 0xE3, 0xE5, 0xE9, 0xEF, 0xF1, 0xFB
  16807. };
  16808. #else
  16809. /* Number of pre-computed primes. First n primes. */
  16810. #define SP_PRIME_SIZE 256
  16811. /* The first 256 primes. */
  16812. static const sp_uint16 sp_primes[SP_PRIME_SIZE] = {
  16813. 0x0002, 0x0003, 0x0005, 0x0007, 0x000B, 0x000D, 0x0011, 0x0013,
  16814. 0x0017, 0x001D, 0x001F, 0x0025, 0x0029, 0x002B, 0x002F, 0x0035,
  16815. 0x003B, 0x003D, 0x0043, 0x0047, 0x0049, 0x004F, 0x0053, 0x0059,
  16816. 0x0061, 0x0065, 0x0067, 0x006B, 0x006D, 0x0071, 0x007F, 0x0083,
  16817. 0x0089, 0x008B, 0x0095, 0x0097, 0x009D, 0x00A3, 0x00A7, 0x00AD,
  16818. 0x00B3, 0x00B5, 0x00BF, 0x00C1, 0x00C5, 0x00C7, 0x00D3, 0x00DF,
  16819. 0x00E3, 0x00E5, 0x00E9, 0x00EF, 0x00F1, 0x00FB, 0x0101, 0x0107,
  16820. 0x010D, 0x010F, 0x0115, 0x0119, 0x011B, 0x0125, 0x0133, 0x0137,
  16821. 0x0139, 0x013D, 0x014B, 0x0151, 0x015B, 0x015D, 0x0161, 0x0167,
  16822. 0x016F, 0x0175, 0x017B, 0x017F, 0x0185, 0x018D, 0x0191, 0x0199,
  16823. 0x01A3, 0x01A5, 0x01AF, 0x01B1, 0x01B7, 0x01BB, 0x01C1, 0x01C9,
  16824. 0x01CD, 0x01CF, 0x01D3, 0x01DF, 0x01E7, 0x01EB, 0x01F3, 0x01F7,
  16825. 0x01FD, 0x0209, 0x020B, 0x021D, 0x0223, 0x022D, 0x0233, 0x0239,
  16826. 0x023B, 0x0241, 0x024B, 0x0251, 0x0257, 0x0259, 0x025F, 0x0265,
  16827. 0x0269, 0x026B, 0x0277, 0x0281, 0x0283, 0x0287, 0x028D, 0x0293,
  16828. 0x0295, 0x02A1, 0x02A5, 0x02AB, 0x02B3, 0x02BD, 0x02C5, 0x02CF,
  16829. 0x02D7, 0x02DD, 0x02E3, 0x02E7, 0x02EF, 0x02F5, 0x02F9, 0x0301,
  16830. 0x0305, 0x0313, 0x031D, 0x0329, 0x032B, 0x0335, 0x0337, 0x033B,
  16831. 0x033D, 0x0347, 0x0355, 0x0359, 0x035B, 0x035F, 0x036D, 0x0371,
  16832. 0x0373, 0x0377, 0x038B, 0x038F, 0x0397, 0x03A1, 0x03A9, 0x03AD,
  16833. 0x03B3, 0x03B9, 0x03C7, 0x03CB, 0x03D1, 0x03D7, 0x03DF, 0x03E5,
  16834. 0x03F1, 0x03F5, 0x03FB, 0x03FD, 0x0407, 0x0409, 0x040F, 0x0419,
  16835. 0x041B, 0x0425, 0x0427, 0x042D, 0x043F, 0x0443, 0x0445, 0x0449,
  16836. 0x044F, 0x0455, 0x045D, 0x0463, 0x0469, 0x047F, 0x0481, 0x048B,
  16837. 0x0493, 0x049D, 0x04A3, 0x04A9, 0x04B1, 0x04BD, 0x04C1, 0x04C7,
  16838. 0x04CD, 0x04CF, 0x04D5, 0x04E1, 0x04EB, 0x04FD, 0x04FF, 0x0503,
  16839. 0x0509, 0x050B, 0x0511, 0x0515, 0x0517, 0x051B, 0x0527, 0x0529,
  16840. 0x052F, 0x0551, 0x0557, 0x055D, 0x0565, 0x0577, 0x0581, 0x058F,
  16841. 0x0593, 0x0595, 0x0599, 0x059F, 0x05A7, 0x05AB, 0x05AD, 0x05B3,
  16842. 0x05BF, 0x05C9, 0x05CB, 0x05CF, 0x05D1, 0x05D5, 0x05DB, 0x05E7,
  16843. 0x05F3, 0x05FB, 0x0607, 0x060D, 0x0611, 0x0617, 0x061F, 0x0623,
  16844. 0x062B, 0x062F, 0x063D, 0x0641, 0x0647, 0x0649, 0x064D, 0x0653
  16845. };
  16846. #endif
  16847. /* Compare the first n primes with a.
  16848. *
  16849. * @param [in] a Number to check.
  16850. * @param [out] result Whether number was found to be prime.
  16851. * @return 0 when no small prime matches.
  16852. * @return 1 when small prime matches.
  16853. */
  16854. static WC_INLINE int sp_cmp_primes(const sp_int* a, int* result)
  16855. {
  16856. int i;
  16857. int haveRes = 0;
  16858. *result = MP_NO;
  16859. /* Check one digit a against primes table. */
  16860. for (i = 0; i < SP_PRIME_SIZE; i++) {
  16861. if (sp_cmp_d(a, sp_primes[i]) == MP_EQ) {
  16862. *result = MP_YES;
  16863. haveRes = 1;
  16864. break;
  16865. }
  16866. }
  16867. return haveRes;
  16868. }
  16869. /* Using composites is only faster when using 64-bit values. */
  16870. #if !defined(WOLFSSL_SP_SMALL) && (SP_WORD_SIZE == 64)
  16871. /* Number of composites. */
  16872. #define SP_COMP_CNT 38
  16873. /* Products of small primes that fit into 64-bits. */
  16874. static sp_int_digit sp_comp[SP_COMP_CNT] = {
  16875. 0x088886ffdb344692, 0x34091fa96ffdf47b, 0x3c47d8d728a77ebb,
  16876. 0x077ab7da9d709ea9, 0x310df3e7bd4bc897, 0xe657d7a1fd5161d1,
  16877. 0x02ad3dbe0cca85ff, 0x0787f9a02c3388a7, 0x1113c5cc6d101657,
  16878. 0x2456c94f936bdb15, 0x4236a30b85ffe139, 0x805437b38eada69d,
  16879. 0x00723e97bddcd2af, 0x00a5a792ee239667, 0x00e451352ebca269,
  16880. 0x013a7955f14b7805, 0x01d37cbd653b06ff, 0x0288fe4eca4d7cdf,
  16881. 0x039fddb60d3af63d, 0x04cd73f19080fb03, 0x0639c390b9313f05,
  16882. 0x08a1c420d25d388f, 0x0b4b5322977db499, 0x0e94c170a802ee29,
  16883. 0x11f6a0e8356100df, 0x166c8898f7b3d683, 0x1babda0a0afd724b,
  16884. 0x2471b07c44024abf, 0x2d866dbc2558ad71, 0x3891410d45fb47df,
  16885. 0x425d5866b049e263, 0x51f767298e2cf13b, 0x6d9f9ece5fc74f13,
  16886. 0x7f5ffdb0f56ee64d, 0x943740d46a1bc71f, 0xaf2d7ca25cec848f,
  16887. 0xcec010484e4ad877, 0xef972c3cfafbcd25
  16888. };
  16889. /* Index of next prime after those used to create composite. */
  16890. static int sp_comp_idx[SP_COMP_CNT] = {
  16891. 15, 25, 34, 42, 50, 58, 65, 72, 79, 86, 93, 100, 106, 112, 118,
  16892. 124, 130, 136, 142, 148, 154, 160, 166, 172, 178, 184, 190, 196, 202, 208,
  16893. 214, 220, 226, 232, 238, 244, 250, 256
  16894. };
  16895. #endif
  16896. /* Determines whether any of the first n small primes divide a evenly.
  16897. *
  16898. * @param [in] a Number to check.
  16899. * @param [in, out] haveRes Boolean indicating a no prime result found.
  16900. * @param [in, out] result Whether a is known to be prime.
  16901. * @return MP_OKAY on success.
  16902. * @return Negative on failure.
  16903. */
  16904. static WC_INLINE int sp_div_primes(const sp_int* a, int* haveRes, int* result)
  16905. {
  16906. int i;
  16907. #if !defined(WOLFSSL_SP_SMALL) && (SP_WORD_SIZE == 64)
  16908. int j;
  16909. #endif
  16910. sp_int_digit d;
  16911. int err = MP_OKAY;
  16912. #if defined(WOLFSSL_SP_SMALL) || (SP_WORD_SIZE < 64)
  16913. /* Do trial division of a with all known small primes. */
  16914. for (i = 0; i < SP_PRIME_SIZE; i++) {
  16915. /* Small prime divides a when remainder is 0. */
  16916. err = sp_mod_d(a, (sp_int_digit)sp_primes[i], &d);
  16917. if ((err != MP_OKAY) || (d == 0)) {
  16918. *result = MP_NO;
  16919. *haveRes = 1;
  16920. break;
  16921. }
  16922. }
  16923. #else
  16924. /* Start with first prime in composite. */
  16925. i = 0;
  16926. for (j = 0; (!(*haveRes)) && (j < SP_COMP_CNT); j++) {
  16927. /* Reduce a down to a single word. */
  16928. err = sp_mod_d(a, sp_comp[j], &d);
  16929. if ((err != MP_OKAY) || (d == 0)) {
  16930. *result = MP_NO;
  16931. *haveRes = 1;
  16932. break;
  16933. }
  16934. /* Do trial division of d with small primes that make up composite. */
  16935. for (; i < sp_comp_idx[j]; i++) {
  16936. /* Small prime divides a when remainder is 0. */
  16937. if (d % sp_primes[i] == 0) {
  16938. *result = MP_NO;
  16939. *haveRes = 1;
  16940. break;
  16941. }
  16942. }
  16943. }
  16944. #endif
  16945. return err;
  16946. }
  16947. /* Check whether a is prime.
  16948. * Checks against a number of small primes and does t iterations of
  16949. * Miller-Rabin.
  16950. *
  16951. * @param [in] a SP integer to check.
  16952. * @param [in] trials Number of trials of Miller-Rabin test to perform.
  16953. * @param [out] result MP_YES when number is prime.
  16954. * MP_NO otherwise.
  16955. *
  16956. * @return MP_OKAY on success.
  16957. * @return MP_VAL when a or result is NULL, or trials is out of range.
  16958. * @return MP_MEM when dynamic memory allocation fails.
  16959. */
  16960. int sp_prime_is_prime(const sp_int* a, int trials, int* result)
  16961. {
  16962. int err = MP_OKAY;
  16963. int i;
  16964. int haveRes = 0;
  16965. /* Validate parameters. */
  16966. if ((a == NULL) || (result == NULL)) {
  16967. if (result != NULL) {
  16968. *result = MP_NO;
  16969. }
  16970. err = MP_VAL;
  16971. }
  16972. /* Check validity of Miller-Rabin iterations count.
  16973. * Must do at least one and need a unique pre-computed prime for each
  16974. * iteration.
  16975. */
  16976. if ((err == MP_OKAY) && ((trials <= 0) || (trials > SP_PRIME_SIZE))) {
  16977. *result = MP_NO;
  16978. err = MP_VAL;
  16979. }
  16980. /* Short-cut, 1 is not prime. */
  16981. if ((err == MP_OKAY) && sp_isone(a)) {
  16982. *result = MP_NO;
  16983. haveRes = 1;
  16984. }
  16985. SAVE_VECTOR_REGISTERS(err = _svr_ret;);
  16986. /* Check against known small primes when a has 1 digit. */
  16987. if ((err == MP_OKAY) && (!haveRes) && (a->used == 1) &&
  16988. (a->dp[0] <= sp_primes[SP_PRIME_SIZE - 1])) {
  16989. haveRes = sp_cmp_primes(a, result);
  16990. }
  16991. /* Check all small primes for even divisibility. */
  16992. if ((err == MP_OKAY) && (!haveRes)) {
  16993. err = sp_div_primes(a, &haveRes, result);
  16994. }
  16995. if ((err == MP_OKAY) && (!haveRes)) {
  16996. sp_int* n1;
  16997. sp_int* r;
  16998. DECL_SP_INT_ARRAY(t, a->used + 1, 2);
  16999. DECL_SP_INT(b, a->used * 2 + 1);
  17000. ALLOC_SP_INT_ARRAY(t, a->used + 1, 2, err, NULL);
  17001. /* Allocate number that will hold modular exponentiation result. */
  17002. ALLOC_SP_INT(b, a->used * 2 + 1, err, NULL);
  17003. if (err == MP_OKAY) {
  17004. n1 = t[0];
  17005. r = t[1];
  17006. _sp_init_size(n1, a->used + 1);
  17007. _sp_init_size(r, a->used + 1);
  17008. _sp_init_size(b, a->used * 2 + 1);
  17009. /* Do requested number of trials of Miller-Rabin test. */
  17010. for (i = 0; i < trials; i++) {
  17011. /* Miller-Rabin test with known small prime. */
  17012. sp_set(b, sp_primes[i]);
  17013. err = sp_prime_miller_rabin(a, b, result, n1, r);
  17014. if ((err != MP_OKAY) || (*result == MP_NO)) {
  17015. break;
  17016. }
  17017. }
  17018. /* Clear temporary values. */
  17019. sp_clear(n1);
  17020. sp_clear(r);
  17021. sp_clear(b);
  17022. }
  17023. /* Free allocated temporary. */
  17024. FREE_SP_INT(b, NULL);
  17025. FREE_SP_INT_ARRAY(t, NULL);
  17026. }
  17027. RESTORE_VECTOR_REGISTERS();
  17028. return err;
  17029. }
  17030. /* Check whether a is prime.
  17031. * Checks against a number of small primes and does t iterations of
  17032. * Miller-Rabin.
  17033. *
  17034. * @param [in] a SP integer to check.
  17035. * @param [in] trials Number of iterations of Miller-Rabin test to perform.
  17036. * @param [out] result MP_YES when number is prime.
  17037. * MP_NO otherwise.
  17038. * @param [in] rng Random number generator for Miller-Rabin testing.
  17039. *
  17040. * @return MP_OKAY on success.
  17041. * @return MP_VAL when a, result or rng is NULL.
  17042. * @return MP_MEM when dynamic memory allocation fails.
  17043. */
  17044. int sp_prime_is_prime_ex(const sp_int* a, int trials, int* result, WC_RNG* rng)
  17045. {
  17046. int err = MP_OKAY;
  17047. int ret = MP_YES;
  17048. int haveRes = 0;
  17049. #ifndef WC_NO_RNG
  17050. sp_int *b = NULL;
  17051. sp_int *c = NULL;
  17052. sp_int *n1 = NULL;
  17053. sp_int *r = NULL;
  17054. #endif /* WC_NO_RNG */
  17055. if ((a == NULL) || (result == NULL) || (rng == NULL)) {
  17056. err = MP_VAL;
  17057. }
  17058. if ((err == MP_OKAY) && (a->used < 0)) {
  17059. err = MP_VAL;
  17060. }
  17061. #ifdef WOLFSSL_SP_INT_NEGATIVE
  17062. if ((err == MP_OKAY) && (a->sign == MP_NEG)) {
  17063. err = MP_VAL;
  17064. }
  17065. #endif
  17066. if ((err == MP_OKAY) && sp_isone(a)) {
  17067. ret = MP_NO;
  17068. haveRes = 1;
  17069. }
  17070. SAVE_VECTOR_REGISTERS(err = _svr_ret;);
  17071. /* Check against known small primes when a has 1 digit. */
  17072. if ((err == MP_OKAY) && (!haveRes) && (a->used == 1) &&
  17073. (a->dp[0] <= (sp_int_digit)sp_primes[SP_PRIME_SIZE - 1])) {
  17074. haveRes = sp_cmp_primes(a, &ret);
  17075. }
  17076. /* Check all small primes for even divisibility. */
  17077. if ((err == MP_OKAY) && (!haveRes)) {
  17078. err = sp_div_primes(a, &haveRes, &ret);
  17079. }
  17080. #ifndef WC_NO_RNG
  17081. /* now do a miller rabin with up to t random numbers, this should
  17082. * give a (1/4)^t chance of a false prime. */
  17083. if ((err == MP_OKAY) && (!haveRes)) {
  17084. int bits = sp_count_bits(a);
  17085. word32 baseSz = (bits + 7) / 8;
  17086. DECL_SP_INT_ARRAY(ds, a->used + 1, 2);
  17087. DECL_SP_INT_ARRAY(d, a->used * 2 + 1, 2);
  17088. ALLOC_SP_INT_ARRAY(ds, a->used + 1, 2, err, NULL);
  17089. ALLOC_SP_INT_ARRAY(d, a->used * 2 + 1, 2, err, NULL);
  17090. if (err == MP_OKAY) {
  17091. c = ds[0];
  17092. n1 = ds[1];
  17093. b = d[0];
  17094. r = d[1];
  17095. _sp_init_size(c , a->used + 1);
  17096. _sp_init_size(n1, a->used + 1);
  17097. _sp_init_size(b , a->used * 2 + 1);
  17098. _sp_init_size(r , a->used * 2 + 1);
  17099. _sp_sub_d(a, 2, c);
  17100. bits &= SP_WORD_MASK;
  17101. /* Keep trying random numbers until all trials complete. */
  17102. while (trials > 0) {
  17103. /* Generate random trial number. */
  17104. err = wc_RNG_GenerateBlock(rng, (byte*)b->dp, baseSz);
  17105. if (err != MP_OKAY) {
  17106. break;
  17107. }
  17108. b->used = a->used;
  17109. #ifdef BIG_ENDIAN_ORDER
  17110. /* Fix top digit if fewer bytes than a full digit generated. */
  17111. if (((baseSz * 8) & SP_WORD_MASK) != 0) {
  17112. b->dp[b->used-1] >>=
  17113. SP_WORD_SIZE - ((baseSz * 8) & SP_WORD_MASK);
  17114. }
  17115. #endif /* BIG_ENDIAN_ORDER */
  17116. /* Ensure the top word has no more bits than necessary. */
  17117. if (bits > 0) {
  17118. b->dp[b->used - 1] &= ((sp_int_digit)1 << bits) - 1;
  17119. sp_clamp(b);
  17120. }
  17121. /* Can't use random value it is: 0, 1, a-2, a-1, >= a */
  17122. if ((sp_cmp_d(b, 2) != MP_GT) || (_sp_cmp(b, c) != MP_LT)) {
  17123. continue;
  17124. }
  17125. /* Perform Miller-Rabin test with random value. */
  17126. err = sp_prime_miller_rabin(a, b, &ret, n1, r);
  17127. if ((err != MP_OKAY) || (ret == MP_NO)) {
  17128. break;
  17129. }
  17130. /* Trial complete. */
  17131. trials--;
  17132. }
  17133. /* Zeroize temporary values used when generating private prime. */
  17134. sp_forcezero(n1);
  17135. sp_forcezero(r);
  17136. sp_forcezero(b);
  17137. sp_forcezero(c);
  17138. }
  17139. FREE_SP_INT_ARRAY(d, NULL);
  17140. FREE_SP_INT_ARRAY(ds, NULL);
  17141. }
  17142. #else
  17143. (void)trials;
  17144. #endif /* !WC_NO_RNG */
  17145. if (result != NULL) {
  17146. *result = ret;
  17147. }
  17148. RESTORE_VECTOR_REGISTERS();
  17149. return err;
  17150. }
  17151. #endif /* WOLFSSL_SP_PRIME_GEN */
  17152. #if !defined(NO_RSA) && defined(WOLFSSL_KEY_GEN)
  17153. /* Calculates the Greatest Common Denominator (GCD) of a and b into r.
  17154. *
  17155. * a and b are positive integers.
  17156. *
  17157. * Euclidian Algorithm:
  17158. * 1. If a > b then a = b, b = a
  17159. * 2. u = a
  17160. * 3. v = b % a
  17161. * 4. While v != 0
  17162. * 4.1. t = u % v
  17163. * 4.2. u <= v, v <= t, t <= u
  17164. * 5. r = u
  17165. *
  17166. * @param [in] a SP integer of first operand.
  17167. * @param [in] b SP integer of second operand.
  17168. * @param [out] r SP integer to hold result.
  17169. *
  17170. * @return MP_OKAY on success.
  17171. * @return MP_MEM when dynamic memory allocation fails.
  17172. */
  17173. static WC_INLINE int _sp_gcd(const sp_int* a, const sp_int* b, sp_int* r)
  17174. {
  17175. int err = MP_OKAY;
  17176. sp_int* u = NULL;
  17177. sp_int* v = NULL;
  17178. sp_int* t = NULL;
  17179. /* Used for swapping sp_ints. */
  17180. sp_int* s;
  17181. /* Determine maximum digit length numbers will reach. */
  17182. int used = (a->used >= b->used) ? a->used + 1 : b->used + 1;
  17183. DECL_SP_INT_ARRAY(d, used, 3);
  17184. SAVE_VECTOR_REGISTERS(err = _svr_ret;);
  17185. ALLOC_SP_INT_ARRAY(d, used, 3, err, NULL);
  17186. if (err == MP_OKAY) {
  17187. u = d[0];
  17188. v = d[1];
  17189. t = d[2];
  17190. _sp_init_size(u, used);
  17191. _sp_init_size(v, used);
  17192. _sp_init_size(t, used);
  17193. /* 1. If a > b then a = b, b = a.
  17194. * Make a <= b.
  17195. */
  17196. if (_sp_cmp(a, b) == MP_GT) {
  17197. const sp_int* tmp;
  17198. tmp = a;
  17199. a = b;
  17200. b = tmp;
  17201. }
  17202. /* 2. u = a, v = b mod a */
  17203. sp_copy(a, u);
  17204. /* 3. v = b mod a */
  17205. if (a->used == 1) {
  17206. err = sp_mod_d(b, a->dp[0], &v->dp[0]);
  17207. v->used = (v->dp[0] != 0);
  17208. }
  17209. else {
  17210. err = sp_mod(b, a, v);
  17211. }
  17212. }
  17213. /* 4. While v != 0 */
  17214. /* Keep reducing larger by smaller until smaller is 0 or u and v both one
  17215. * digit.
  17216. */
  17217. while ((err == MP_OKAY) && (!sp_iszero(v)) && (u->used > 1)) {
  17218. /* u' = v, v' = u mod v */
  17219. /* 4.1 t = u mod v */
  17220. if (v->used == 1) {
  17221. err = sp_mod_d(u, v->dp[0], &t->dp[0]);
  17222. t->used = (t->dp[0] != 0);
  17223. }
  17224. else {
  17225. err = sp_mod(u, v, t);
  17226. }
  17227. /* 4.2. u <= v, v <= t, t <= u */
  17228. s = u; u = v; v = t; t = s;
  17229. }
  17230. /* Only one digit remaining in u and v. */
  17231. while ((err == MP_OKAY) && (!sp_iszero(v))) {
  17232. /* u' = v, v' = u mod v */
  17233. /* 4.1 t = u mod v */
  17234. t->dp[0] = u->dp[0] % v->dp[0];
  17235. t->used = (t->dp[0] != 0);
  17236. /* 4.2. u <= v, v <= t, t <= u */
  17237. s = u; u = v; v = t; t = s;
  17238. }
  17239. if (err == MP_OKAY) {
  17240. /* 5. r = u */
  17241. err = sp_copy(u, r);
  17242. }
  17243. FREE_SP_INT_ARRAY(d, NULL);
  17244. RESTORE_VECTOR_REGISTERS();
  17245. return err;
  17246. }
  17247. /* Calculates the Greatest Common Denominator (GCD) of a and b into r.
  17248. *
  17249. * a and b are positive integers.
  17250. *
  17251. * @param [in] a SP integer of first operand.
  17252. * @param [in] b SP integer of second operand.
  17253. * @param [out] r SP integer to hold result.
  17254. *
  17255. * @return MP_OKAY on success.
  17256. * @return MP_VAL when a, b or r is NULL or too large.
  17257. * @return MP_MEM when dynamic memory allocation fails.
  17258. */
  17259. int sp_gcd(const sp_int* a, const sp_int* b, sp_int* r)
  17260. {
  17261. int err = MP_OKAY;
  17262. /* Validate parameters. */
  17263. if ((a == NULL) || (b == NULL) || (r == NULL)) {
  17264. err = MP_VAL;
  17265. }
  17266. /* Check that we have space in numbers to do work. */
  17267. else if ((a->used >= SP_INT_DIGITS) || (b->used >= SP_INT_DIGITS)) {
  17268. err = MP_VAL;
  17269. }
  17270. #ifdef WOLFSSL_SP_INT_NEGATIVE
  17271. else if ((a->sign == MP_NEG) || (b->sign >= MP_NEG)) {
  17272. err = MP_VAL;
  17273. }
  17274. #endif
  17275. else if (sp_iszero(a)) {
  17276. /* GCD of 0 and 0 is undefined - all integers divide 0. */
  17277. if (sp_iszero(b)) {
  17278. err = MP_VAL;
  17279. }
  17280. else {
  17281. /* GCD of 0 and b is b - b divides 0. */
  17282. err = sp_copy(b, r);
  17283. }
  17284. }
  17285. else if (sp_iszero(b)) {
  17286. /* GCD of 0 and a is a - a divides 0. */
  17287. err = sp_copy(a, r);
  17288. }
  17289. else {
  17290. /* Calculate GCD. */
  17291. err = _sp_gcd(a, b, r);
  17292. }
  17293. return err;
  17294. }
  17295. #endif /* WOLFSSL_SP_MATH_ALL && !NO_RSA && WOLFSSL_KEY_GEN */
  17296. #if !defined(NO_RSA) && defined(WOLFSSL_KEY_GEN) && \
  17297. (!defined(WC_RSA_BLINDING) || defined(HAVE_FIPS) || defined(HAVE_SELFTEST))
  17298. /* Calculates the Lowest Common Multiple (LCM) of a and b and stores in r.
  17299. * Smallest number divisible by both numbers.
  17300. *
  17301. * a and b are positive integers.
  17302. *
  17303. * lcm(a, b) = (a / gcd(a, b)) * b
  17304. * Divide the common divisor from a and multiply by b.
  17305. *
  17306. * Algorithm:
  17307. * 1. t0 = gcd(a, b)
  17308. * 2. If a > b then
  17309. * 2.1. t1 = a / t0
  17310. * 2.2. r = b * t1
  17311. * 3. Else
  17312. * 3.1. t1 = b / t0
  17313. * 3.2. r = a * t1
  17314. *
  17315. * @param [in] a SP integer of first operand.
  17316. * @param [in] b SP integer of second operand.
  17317. * @param [out] r SP integer to hold result.
  17318. *
  17319. * @return MP_OKAY on success.
  17320. * @return MP_VAL when a, b or r is NULL; or a or b is zero.
  17321. * @return MP_MEM when dynamic memory allocation fails.
  17322. */
  17323. int sp_lcm(const sp_int* a, const sp_int* b, sp_int* r)
  17324. {
  17325. int err = MP_OKAY;
  17326. /* Determine maximum digit length numbers will reach. */
  17327. int used = ((a == NULL) || (b == NULL)) ? 1 :
  17328. (a->used >= b->used ? a->used + 1: b->used + 1);
  17329. DECL_SP_INT_ARRAY(t, used, 2);
  17330. /* Validate parameters. */
  17331. if ((a == NULL) || (b == NULL) || (r == NULL)) {
  17332. err = MP_VAL;
  17333. }
  17334. #ifdef WOLFSSL_SP_INT_NEGATIVE
  17335. /* Ensure a and b are positive. */
  17336. else if ((a->sign == MP_NEG) || (b->sign >= MP_NEG)) {
  17337. err = MP_VAL;
  17338. }
  17339. #endif
  17340. /* Ensure r has space for maximumal result. */
  17341. else if (r->size < a->used + b->used) {
  17342. err = MP_VAL;
  17343. }
  17344. /* LCM of 0 and any number is undefined as 0 is not in the set of values
  17345. * being used.
  17346. */
  17347. if ((err == MP_OKAY) && (mp_iszero(a) || mp_iszero(b))) {
  17348. err = MP_VAL;
  17349. }
  17350. ALLOC_SP_INT_ARRAY(t, used, 2, err, NULL);
  17351. if (err == MP_OKAY) {
  17352. _sp_init_size(t[0], used);
  17353. _sp_init_size(t[1], used);
  17354. SAVE_VECTOR_REGISTERS(err = _svr_ret;);
  17355. if (err == MP_OKAY) {
  17356. /* 1. t0 = gcd(a, b) */
  17357. err = sp_gcd(a, b, t[0]);
  17358. }
  17359. if (err == MP_OKAY) {
  17360. /* Divide the greater by the common divisor and multiply by other
  17361. * to operate on the smallest length numbers.
  17362. */
  17363. /* 2. If a > b then */
  17364. if (_sp_cmp_abs(a, b) == MP_GT) {
  17365. /* 2.1. t1 = a / t0 */
  17366. err = sp_div(a, t[0], t[1], NULL);
  17367. if (err == MP_OKAY) {
  17368. /* 2.2. r = b * t1 */
  17369. err = sp_mul(b, t[1], r);
  17370. }
  17371. }
  17372. /* 3. Else */
  17373. else {
  17374. /* 3.1. t1 = b / t0 */
  17375. err = sp_div(b, t[0], t[1], NULL);
  17376. if (err == MP_OKAY) {
  17377. /* 3.2. r = a * t1 */
  17378. err = sp_mul(a, t[1], r);
  17379. }
  17380. }
  17381. }
  17382. RESTORE_VECTOR_REGISTERS();
  17383. }
  17384. FREE_SP_INT_ARRAY(t, NULL);
  17385. return err;
  17386. }
  17387. #endif /* WOLFSSL_SP_MATH_ALL && !NO_RSA && WOLFSSL_KEY_GEN */
  17388. /* Returns the run time settings.
  17389. *
  17390. * @return Settings value.
  17391. */
  17392. word32 CheckRunTimeSettings(void)
  17393. {
  17394. return CTC_SETTINGS;
  17395. }
  17396. /* Returns the fast math settings.
  17397. *
  17398. * @return Setting - number of bits in a digit.
  17399. */
  17400. word32 CheckRunTimeFastMath(void)
  17401. {
  17402. return SP_WORD_SIZE;
  17403. }
  17404. #ifdef WOLFSSL_CHECK_MEM_ZERO
  17405. /* Add an MP to check.
  17406. *
  17407. * @param [in] name Name of address to check.
  17408. * @param [in] sp sp_int that needs to be checked.
  17409. */
  17410. void sp_memzero_add(const char* name, sp_int* sp)
  17411. {
  17412. wc_MemZero_Add(name, sp->dp, sp->size * sizeof(sp_digit));
  17413. }
  17414. /* Check the memory in the data pointer for memory that must be zero.
  17415. *
  17416. * @param [in] sp sp_int that needs to be checked.
  17417. */
  17418. void sp_memzero_check(sp_int* sp)
  17419. {
  17420. wc_MemZero_Check(sp->dp, sp->size * sizeof(sp_digit));
  17421. }
  17422. #endif /* WOLFSSL_CHECK_MEM_ZERO */
  17423. #endif /* WOLFSSL_SP_MATH || WOLFSSL_SP_MATH_ALL */