1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195 |
- # qhasm: int64 workp
- # qhasm: input workp
- # qhasm: int64 caller1
- # qhasm: int64 caller2
- # qhasm: int64 caller3
- # qhasm: int64 caller4
- # qhasm: int64 caller5
- # qhasm: int64 caller6
- # qhasm: int64 caller7
- # qhasm: caller caller1
- # qhasm: caller caller2
- # qhasm: caller caller3
- # qhasm: caller caller4
- # qhasm: caller caller5
- # qhasm: caller caller6
- # qhasm: caller caller7
- # qhasm: stack64 caller1_stack
- # qhasm: stack64 caller2_stack
- # qhasm: stack64 caller3_stack
- # qhasm: stack64 caller4_stack
- # qhasm: stack64 caller5_stack
- # qhasm: stack64 caller6_stack
- # qhasm: stack64 caller7_stack
- # qhasm: int64 t10
- # qhasm: int64 t11
- # qhasm: int64 t12
- # qhasm: int64 t13
- # qhasm: int64 t14
- # qhasm: stack64 t10_stack
- # qhasm: stack64 t11_stack
- # qhasm: stack64 t12_stack
- # qhasm: stack64 t13_stack
- # qhasm: stack64 t14_stack
- # qhasm: int64 t20
- # qhasm: int64 t21
- # qhasm: int64 t22
- # qhasm: int64 t23
- # qhasm: int64 t24
- # qhasm: stack64 t20_stack
- # qhasm: stack64 t21_stack
- # qhasm: stack64 t22_stack
- # qhasm: stack64 t23_stack
- # qhasm: stack64 t24_stack
- # qhasm: int64 t30
- # qhasm: int64 t31
- # qhasm: int64 t32
- # qhasm: int64 t33
- # qhasm: int64 t34
- # qhasm: stack64 t30_stack
- # qhasm: stack64 t31_stack
- # qhasm: stack64 t32_stack
- # qhasm: stack64 t33_stack
- # qhasm: stack64 t34_stack
- # qhasm: int64 t40
- # qhasm: int64 t41
- # qhasm: int64 t42
- # qhasm: int64 t43
- # qhasm: int64 t44
- # qhasm: stack64 t40_stack
- # qhasm: stack64 t41_stack
- # qhasm: stack64 t42_stack
- # qhasm: stack64 t43_stack
- # qhasm: stack64 t44_stack
- # qhasm: int64 t50
- # qhasm: int64 t51
- # qhasm: int64 t52
- # qhasm: int64 t53
- # qhasm: int64 t54
- # qhasm: stack64 t50_stack
- # qhasm: stack64 t51_stack
- # qhasm: stack64 t52_stack
- # qhasm: stack64 t53_stack
- # qhasm: stack64 t54_stack
- # qhasm: int64 t60
- # qhasm: int64 t61
- # qhasm: int64 t62
- # qhasm: int64 t63
- # qhasm: int64 t64
- # qhasm: stack64 t60_stack
- # qhasm: stack64 t61_stack
- # qhasm: stack64 t62_stack
- # qhasm: stack64 t63_stack
- # qhasm: stack64 t64_stack
- # qhasm: int64 t70
- # qhasm: int64 t71
- # qhasm: int64 t72
- # qhasm: int64 t73
- # qhasm: int64 t74
- # qhasm: stack64 t70_stack
- # qhasm: stack64 t71_stack
- # qhasm: stack64 t72_stack
- # qhasm: stack64 t73_stack
- # qhasm: stack64 t74_stack
- # qhasm: int64 t80
- # qhasm: int64 t81
- # qhasm: int64 t82
- # qhasm: int64 t83
- # qhasm: int64 t84
- # qhasm: stack64 t80_stack
- # qhasm: stack64 t81_stack
- # qhasm: stack64 t82_stack
- # qhasm: stack64 t83_stack
- # qhasm: stack64 t84_stack
- # qhasm: int64 t90
- # qhasm: int64 t91
- # qhasm: int64 t92
- # qhasm: int64 t93
- # qhasm: int64 t94
- # qhasm: stack64 t90_stack
- # qhasm: stack64 t91_stack
- # qhasm: stack64 t92_stack
- # qhasm: stack64 t93_stack
- # qhasm: stack64 t94_stack
- # qhasm: int64 xp0
- # qhasm: int64 xp1
- # qhasm: int64 xp2
- # qhasm: int64 xp3
- # qhasm: int64 xp4
- # qhasm: int64 zp0
- # qhasm: int64 zp1
- # qhasm: int64 zp2
- # qhasm: int64 zp3
- # qhasm: int64 zp4
- # qhasm: int64 xq0
- # qhasm: int64 xq1
- # qhasm: int64 xq2
- # qhasm: int64 xq3
- # qhasm: int64 xq4
- # qhasm: int64 zq0
- # qhasm: int64 zq1
- # qhasm: int64 zq2
- # qhasm: int64 zq3
- # qhasm: int64 zq4
- # qhasm: int64 mulr01
- # qhasm: int64 mulr11
- # qhasm: int64 mulr21
- # qhasm: int64 mulr31
- # qhasm: int64 mulr41
- # qhasm: int64 mulrax
- # qhasm: int64 mulrdx
- # qhasm: int64 mult
- # qhasm: int64 mulredmask
- # qhasm: stack64 mulx219_stack
- # qhasm: stack64 mulx319_stack
- # qhasm: stack64 mulx419_stack
- # qhasm: int64 squarer01
- # qhasm: int64 squarer11
- # qhasm: int64 squarer21
- # qhasm: int64 squarer31
- # qhasm: int64 squarer41
- # qhasm: int64 squarerax
- # qhasm: int64 squarerdx
- # qhasm: int64 squaret
- # qhasm: int64 squareredmask
- # qhasm: int64 mul121666rax
- # qhasm: int64 mul121666rdx
- # qhasm: enter crypto_scalarmult_curve25519_amd64_51_ladderstep
- .text
- .p2align 5
- .globl _crypto_scalarmult_curve25519_amd64_51_ladderstep
- .globl crypto_scalarmult_curve25519_amd64_51_ladderstep
- _crypto_scalarmult_curve25519_amd64_51_ladderstep:
- crypto_scalarmult_curve25519_amd64_51_ladderstep:
- mov %rsp,%r11
- and $31,%r11
- add $352,%r11
- sub %r11,%rsp
- # qhasm: caller1_stack = caller1
- # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
- # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
- movq %r11,0(%rsp)
- # qhasm: caller2_stack = caller2
- # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
- # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
- movq %r12,8(%rsp)
- # qhasm: caller3_stack = caller3
- # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
- # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
- movq %r13,16(%rsp)
- # qhasm: caller4_stack = caller4
- # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
- # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
- movq %r14,24(%rsp)
- # qhasm: caller5_stack = caller5
- # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
- # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
- movq %r15,32(%rsp)
- # qhasm: caller6_stack = caller6
- # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
- # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
- movq %rbx,40(%rsp)
- # qhasm: caller7_stack = caller7
- # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
- # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
- movq %rbp,48(%rsp)
- # qhasm: t10 = *(uint64 *)(workp + 40)
- # asm 1: movq 40(<workp=int64#1),>t10=int64#2
- # asm 2: movq 40(<workp=%rdi),>t10=%rsi
- movq 40(%rdi),%rsi
- # qhasm: t11 = *(uint64 *)(workp + 48)
- # asm 1: movq 48(<workp=int64#1),>t11=int64#3
- # asm 2: movq 48(<workp=%rdi),>t11=%rdx
- movq 48(%rdi),%rdx
- # qhasm: t12 = *(uint64 *)(workp + 56)
- # asm 1: movq 56(<workp=int64#1),>t12=int64#4
- # asm 2: movq 56(<workp=%rdi),>t12=%rcx
- movq 56(%rdi),%rcx
- # qhasm: t13 = *(uint64 *)(workp + 64)
- # asm 1: movq 64(<workp=int64#1),>t13=int64#5
- # asm 2: movq 64(<workp=%rdi),>t13=%r8
- movq 64(%rdi),%r8
- # qhasm: t14 = *(uint64 *)(workp + 72)
- # asm 1: movq 72(<workp=int64#1),>t14=int64#6
- # asm 2: movq 72(<workp=%rdi),>t14=%r9
- movq 72(%rdi),%r9
- # qhasm: t20 = t10
- # asm 1: mov <t10=int64#2,>t20=int64#7
- # asm 2: mov <t10=%rsi,>t20=%rax
- mov %rsi,%rax
- # qhasm: t21 = t11
- # asm 1: mov <t11=int64#3,>t21=int64#8
- # asm 2: mov <t11=%rdx,>t21=%r10
- mov %rdx,%r10
- # qhasm: t22 = t12
- # asm 1: mov <t12=int64#4,>t22=int64#9
- # asm 2: mov <t12=%rcx,>t22=%r11
- mov %rcx,%r11
- # qhasm: t23 = t13
- # asm 1: mov <t13=int64#5,>t23=int64#10
- # asm 2: mov <t13=%r8,>t23=%r12
- mov %r8,%r12
- # qhasm: t24 = t14
- # asm 1: mov <t14=int64#6,>t24=int64#11
- # asm 2: mov <t14=%r9,>t24=%r13
- mov %r9,%r13
- # qhasm: t20 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P0
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P0,<t20=int64#7
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P0,<t20=%rax
- add crypto_scalarmult_curve25519_amd64_51_2P0,%rax
- # qhasm: t21 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t21=int64#8
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t21=%r10
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%r10
- # qhasm: t22 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t22=int64#9
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t22=%r11
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%r11
- # qhasm: t23 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t23=int64#10
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t23=%r12
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%r12
- # qhasm: t24 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t24=int64#11
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t24=%r13
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%r13
- # qhasm: t10 += *(uint64 *)(workp + 80)
- # asm 1: addq 80(<workp=int64#1),<t10=int64#2
- # asm 2: addq 80(<workp=%rdi),<t10=%rsi
- addq 80(%rdi),%rsi
- # qhasm: t11 += *(uint64 *)(workp + 88)
- # asm 1: addq 88(<workp=int64#1),<t11=int64#3
- # asm 2: addq 88(<workp=%rdi),<t11=%rdx
- addq 88(%rdi),%rdx
- # qhasm: t12 += *(uint64 *)(workp + 96)
- # asm 1: addq 96(<workp=int64#1),<t12=int64#4
- # asm 2: addq 96(<workp=%rdi),<t12=%rcx
- addq 96(%rdi),%rcx
- # qhasm: t13 += *(uint64 *)(workp + 104)
- # asm 1: addq 104(<workp=int64#1),<t13=int64#5
- # asm 2: addq 104(<workp=%rdi),<t13=%r8
- addq 104(%rdi),%r8
- # qhasm: t14 += *(uint64 *)(workp + 112)
- # asm 1: addq 112(<workp=int64#1),<t14=int64#6
- # asm 2: addq 112(<workp=%rdi),<t14=%r9
- addq 112(%rdi),%r9
- # qhasm: t20 -= *(uint64 *)(workp + 80)
- # asm 1: subq 80(<workp=int64#1),<t20=int64#7
- # asm 2: subq 80(<workp=%rdi),<t20=%rax
- subq 80(%rdi),%rax
- # qhasm: t21 -= *(uint64 *)(workp + 88)
- # asm 1: subq 88(<workp=int64#1),<t21=int64#8
- # asm 2: subq 88(<workp=%rdi),<t21=%r10
- subq 88(%rdi),%r10
- # qhasm: t22 -= *(uint64 *)(workp + 96)
- # asm 1: subq 96(<workp=int64#1),<t22=int64#9
- # asm 2: subq 96(<workp=%rdi),<t22=%r11
- subq 96(%rdi),%r11
- # qhasm: t23 -= *(uint64 *)(workp + 104)
- # asm 1: subq 104(<workp=int64#1),<t23=int64#10
- # asm 2: subq 104(<workp=%rdi),<t23=%r12
- subq 104(%rdi),%r12
- # qhasm: t24 -= *(uint64 *)(workp + 112)
- # asm 1: subq 112(<workp=int64#1),<t24=int64#11
- # asm 2: subq 112(<workp=%rdi),<t24=%r13
- subq 112(%rdi),%r13
- # qhasm: t10_stack = t10
- # asm 1: movq <t10=int64#2,>t10_stack=stack64#8
- # asm 2: movq <t10=%rsi,>t10_stack=56(%rsp)
- movq %rsi,56(%rsp)
- # qhasm: t11_stack = t11
- # asm 1: movq <t11=int64#3,>t11_stack=stack64#9
- # asm 2: movq <t11=%rdx,>t11_stack=64(%rsp)
- movq %rdx,64(%rsp)
- # qhasm: t12_stack = t12
- # asm 1: movq <t12=int64#4,>t12_stack=stack64#10
- # asm 2: movq <t12=%rcx,>t12_stack=72(%rsp)
- movq %rcx,72(%rsp)
- # qhasm: t13_stack = t13
- # asm 1: movq <t13=int64#5,>t13_stack=stack64#11
- # asm 2: movq <t13=%r8,>t13_stack=80(%rsp)
- movq %r8,80(%rsp)
- # qhasm: t14_stack = t14
- # asm 1: movq <t14=int64#6,>t14_stack=stack64#12
- # asm 2: movq <t14=%r9,>t14_stack=88(%rsp)
- movq %r9,88(%rsp)
- # qhasm: t20_stack = t20
- # asm 1: movq <t20=int64#7,>t20_stack=stack64#13
- # asm 2: movq <t20=%rax,>t20_stack=96(%rsp)
- movq %rax,96(%rsp)
- # qhasm: t21_stack = t21
- # asm 1: movq <t21=int64#8,>t21_stack=stack64#14
- # asm 2: movq <t21=%r10,>t21_stack=104(%rsp)
- movq %r10,104(%rsp)
- # qhasm: t22_stack = t22
- # asm 1: movq <t22=int64#9,>t22_stack=stack64#15
- # asm 2: movq <t22=%r11,>t22_stack=112(%rsp)
- movq %r11,112(%rsp)
- # qhasm: t23_stack = t23
- # asm 1: movq <t23=int64#10,>t23_stack=stack64#16
- # asm 2: movq <t23=%r12,>t23_stack=120(%rsp)
- movq %r12,120(%rsp)
- # qhasm: t24_stack = t24
- # asm 1: movq <t24=int64#11,>t24_stack=stack64#17
- # asm 2: movq <t24=%r13,>t24_stack=128(%rsp)
- movq %r13,128(%rsp)
- # qhasm: squarerax = t20_stack
- # asm 1: movq <t20_stack=stack64#13,>squarerax=int64#7
- # asm 2: movq <t20_stack=96(%rsp),>squarerax=%rax
- movq 96(%rsp),%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t20_stack
- # asm 1: mulq <t20_stack=stack64#13
- # asm 2: mulq <t20_stack=96(%rsp)
- mulq 96(%rsp)
- # qhasm: t70 = squarerax
- # asm 1: mov <squarerax=int64#7,>t70=int64#2
- # asm 2: mov <squarerax=%rax,>t70=%rsi
- mov %rax,%rsi
- # qhasm: squarer01 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer01=int64#4
- # asm 2: mov <squarerdx=%rdx,>squarer01=%rcx
- mov %rdx,%rcx
- # qhasm: squarerax = t20_stack
- # asm 1: movq <t20_stack=stack64#13,>squarerax=int64#7
- # asm 2: movq <t20_stack=96(%rsp),>squarerax=%rax
- movq 96(%rsp),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t21_stack
- # asm 1: mulq <t21_stack=stack64#14
- # asm 2: mulq <t21_stack=104(%rsp)
- mulq 104(%rsp)
- # qhasm: t71 = squarerax
- # asm 1: mov <squarerax=int64#7,>t71=int64#5
- # asm 2: mov <squarerax=%rax,>t71=%r8
- mov %rax,%r8
- # qhasm: squarer11 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer11=int64#6
- # asm 2: mov <squarerdx=%rdx,>squarer11=%r9
- mov %rdx,%r9
- # qhasm: squarerax = t20_stack
- # asm 1: movq <t20_stack=stack64#13,>squarerax=int64#7
- # asm 2: movq <t20_stack=96(%rsp),>squarerax=%rax
- movq 96(%rsp),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t22_stack
- # asm 1: mulq <t22_stack=stack64#15
- # asm 2: mulq <t22_stack=112(%rsp)
- mulq 112(%rsp)
- # qhasm: t72 = squarerax
- # asm 1: mov <squarerax=int64#7,>t72=int64#8
- # asm 2: mov <squarerax=%rax,>t72=%r10
- mov %rax,%r10
- # qhasm: squarer21 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer21=int64#9
- # asm 2: mov <squarerdx=%rdx,>squarer21=%r11
- mov %rdx,%r11
- # qhasm: squarerax = t20_stack
- # asm 1: movq <t20_stack=stack64#13,>squarerax=int64#7
- # asm 2: movq <t20_stack=96(%rsp),>squarerax=%rax
- movq 96(%rsp),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t23_stack
- # asm 1: mulq <t23_stack=stack64#16
- # asm 2: mulq <t23_stack=120(%rsp)
- mulq 120(%rsp)
- # qhasm: t73 = squarerax
- # asm 1: mov <squarerax=int64#7,>t73=int64#10
- # asm 2: mov <squarerax=%rax,>t73=%r12
- mov %rax,%r12
- # qhasm: squarer31 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer31=int64#11
- # asm 2: mov <squarerdx=%rdx,>squarer31=%r13
- mov %rdx,%r13
- # qhasm: squarerax = t20_stack
- # asm 1: movq <t20_stack=stack64#13,>squarerax=int64#7
- # asm 2: movq <t20_stack=96(%rsp),>squarerax=%rax
- movq 96(%rsp),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t24_stack
- # asm 1: mulq <t24_stack=stack64#17
- # asm 2: mulq <t24_stack=128(%rsp)
- mulq 128(%rsp)
- # qhasm: t74 = squarerax
- # asm 1: mov <squarerax=int64#7,>t74=int64#12
- # asm 2: mov <squarerax=%rax,>t74=%r14
- mov %rax,%r14
- # qhasm: squarer41 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer41=int64#13
- # asm 2: mov <squarerdx=%rdx,>squarer41=%r15
- mov %rdx,%r15
- # qhasm: squarerax = t21_stack
- # asm 1: movq <t21_stack=stack64#14,>squarerax=int64#7
- # asm 2: movq <t21_stack=104(%rsp),>squarerax=%rax
- movq 104(%rsp),%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t21_stack
- # asm 1: mulq <t21_stack=stack64#14
- # asm 2: mulq <t21_stack=104(%rsp)
- mulq 104(%rsp)
- # qhasm: carry? t72 += squarerax
- # asm 1: add <squarerax=int64#7,<t72=int64#8
- # asm 2: add <squarerax=%rax,<t72=%r10
- add %rax,%r10
- # qhasm: squarer21 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer21=int64#9
- # asm 2: adc <squarerdx=%rdx,<squarer21=%r11
- adc %rdx,%r11
- # qhasm: squarerax = t21_stack
- # asm 1: movq <t21_stack=stack64#14,>squarerax=int64#7
- # asm 2: movq <t21_stack=104(%rsp),>squarerax=%rax
- movq 104(%rsp),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t22_stack
- # asm 1: mulq <t22_stack=stack64#15
- # asm 2: mulq <t22_stack=112(%rsp)
- mulq 112(%rsp)
- # qhasm: carry? t73 += squarerax
- # asm 1: add <squarerax=int64#7,<t73=int64#10
- # asm 2: add <squarerax=%rax,<t73=%r12
- add %rax,%r12
- # qhasm: squarer31 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer31=int64#11
- # asm 2: adc <squarerdx=%rdx,<squarer31=%r13
- adc %rdx,%r13
- # qhasm: squarerax = t21_stack
- # asm 1: movq <t21_stack=stack64#14,>squarerax=int64#7
- # asm 2: movq <t21_stack=104(%rsp),>squarerax=%rax
- movq 104(%rsp),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t23_stack
- # asm 1: mulq <t23_stack=stack64#16
- # asm 2: mulq <t23_stack=120(%rsp)
- mulq 120(%rsp)
- # qhasm: carry? t74 += squarerax
- # asm 1: add <squarerax=int64#7,<t74=int64#12
- # asm 2: add <squarerax=%rax,<t74=%r14
- add %rax,%r14
- # qhasm: squarer41 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer41=int64#13
- # asm 2: adc <squarerdx=%rdx,<squarer41=%r15
- adc %rdx,%r15
- # qhasm: squarerax = t21_stack
- # asm 1: movq <t21_stack=stack64#14,>squarerax=int64#3
- # asm 2: movq <t21_stack=104(%rsp),>squarerax=%rdx
- movq 104(%rsp),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t24_stack
- # asm 1: mulq <t24_stack=stack64#17
- # asm 2: mulq <t24_stack=128(%rsp)
- mulq 128(%rsp)
- # qhasm: carry? t70 += squarerax
- # asm 1: add <squarerax=int64#7,<t70=int64#2
- # asm 2: add <squarerax=%rax,<t70=%rsi
- add %rax,%rsi
- # qhasm: squarer01 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer01=int64#4
- # asm 2: adc <squarerdx=%rdx,<squarer01=%rcx
- adc %rdx,%rcx
- # qhasm: squarerax = t22_stack
- # asm 1: movq <t22_stack=stack64#15,>squarerax=int64#7
- # asm 2: movq <t22_stack=112(%rsp),>squarerax=%rax
- movq 112(%rsp),%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t22_stack
- # asm 1: mulq <t22_stack=stack64#15
- # asm 2: mulq <t22_stack=112(%rsp)
- mulq 112(%rsp)
- # qhasm: carry? t74 += squarerax
- # asm 1: add <squarerax=int64#7,<t74=int64#12
- # asm 2: add <squarerax=%rax,<t74=%r14
- add %rax,%r14
- # qhasm: squarer41 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer41=int64#13
- # asm 2: adc <squarerdx=%rdx,<squarer41=%r15
- adc %rdx,%r15
- # qhasm: squarerax = t22_stack
- # asm 1: movq <t22_stack=stack64#15,>squarerax=int64#3
- # asm 2: movq <t22_stack=112(%rsp),>squarerax=%rdx
- movq 112(%rsp),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t23_stack
- # asm 1: mulq <t23_stack=stack64#16
- # asm 2: mulq <t23_stack=120(%rsp)
- mulq 120(%rsp)
- # qhasm: carry? t70 += squarerax
- # asm 1: add <squarerax=int64#7,<t70=int64#2
- # asm 2: add <squarerax=%rax,<t70=%rsi
- add %rax,%rsi
- # qhasm: squarer01 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer01=int64#4
- # asm 2: adc <squarerdx=%rdx,<squarer01=%rcx
- adc %rdx,%rcx
- # qhasm: squarerax = t22_stack
- # asm 1: movq <t22_stack=stack64#15,>squarerax=int64#3
- # asm 2: movq <t22_stack=112(%rsp),>squarerax=%rdx
- movq 112(%rsp),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t24_stack
- # asm 1: mulq <t24_stack=stack64#17
- # asm 2: mulq <t24_stack=128(%rsp)
- mulq 128(%rsp)
- # qhasm: carry? t71 += squarerax
- # asm 1: add <squarerax=int64#7,<t71=int64#5
- # asm 2: add <squarerax=%rax,<t71=%r8
- add %rax,%r8
- # qhasm: squarer11 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer11=int64#6
- # asm 2: adc <squarerdx=%rdx,<squarer11=%r9
- adc %rdx,%r9
- # qhasm: squarerax = t23_stack
- # asm 1: movq <t23_stack=stack64#16,>squarerax=int64#3
- # asm 2: movq <t23_stack=120(%rsp),>squarerax=%rdx
- movq 120(%rsp),%rdx
- # qhasm: squarerax *= 19
- # asm 1: imulq $19,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $19,<squarerax=%rdx,>squarerax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t23_stack
- # asm 1: mulq <t23_stack=stack64#16
- # asm 2: mulq <t23_stack=120(%rsp)
- mulq 120(%rsp)
- # qhasm: carry? t71 += squarerax
- # asm 1: add <squarerax=int64#7,<t71=int64#5
- # asm 2: add <squarerax=%rax,<t71=%r8
- add %rax,%r8
- # qhasm: squarer11 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer11=int64#6
- # asm 2: adc <squarerdx=%rdx,<squarer11=%r9
- adc %rdx,%r9
- # qhasm: squarerax = t23_stack
- # asm 1: movq <t23_stack=stack64#16,>squarerax=int64#3
- # asm 2: movq <t23_stack=120(%rsp),>squarerax=%rdx
- movq 120(%rsp),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t24_stack
- # asm 1: mulq <t24_stack=stack64#17
- # asm 2: mulq <t24_stack=128(%rsp)
- mulq 128(%rsp)
- # qhasm: carry? t72 += squarerax
- # asm 1: add <squarerax=int64#7,<t72=int64#8
- # asm 2: add <squarerax=%rax,<t72=%r10
- add %rax,%r10
- # qhasm: squarer21 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer21=int64#9
- # asm 2: adc <squarerdx=%rdx,<squarer21=%r11
- adc %rdx,%r11
- # qhasm: squarerax = t24_stack
- # asm 1: movq <t24_stack=stack64#17,>squarerax=int64#3
- # asm 2: movq <t24_stack=128(%rsp),>squarerax=%rdx
- movq 128(%rsp),%rdx
- # qhasm: squarerax *= 19
- # asm 1: imulq $19,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $19,<squarerax=%rdx,>squarerax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t24_stack
- # asm 1: mulq <t24_stack=stack64#17
- # asm 2: mulq <t24_stack=128(%rsp)
- mulq 128(%rsp)
- # qhasm: carry? t73 += squarerax
- # asm 1: add <squarerax=int64#7,<t73=int64#10
- # asm 2: add <squarerax=%rax,<t73=%r12
- add %rax,%r12
- # qhasm: squarer31 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer31=int64#11
- # asm 2: adc <squarerdx=%rdx,<squarer31=%r13
- adc %rdx,%r13
- # qhasm: squareredmask = *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_REDMASK51
- # asm 1: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>squareredmask=int64#3
- # asm 2: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>squareredmask=%rdx
- movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,%rdx
- # qhasm: squarer01 = (squarer01.t70) << 13
- # asm 1: shld $13,<t70=int64#2,<squarer01=int64#4
- # asm 2: shld $13,<t70=%rsi,<squarer01=%rcx
- shld $13,%rsi,%rcx
- # qhasm: t70 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t70=int64#2
- # asm 2: and <squareredmask=%rdx,<t70=%rsi
- and %rdx,%rsi
- # qhasm: squarer11 = (squarer11.t71) << 13
- # asm 1: shld $13,<t71=int64#5,<squarer11=int64#6
- # asm 2: shld $13,<t71=%r8,<squarer11=%r9
- shld $13,%r8,%r9
- # qhasm: t71 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t71=int64#5
- # asm 2: and <squareredmask=%rdx,<t71=%r8
- and %rdx,%r8
- # qhasm: t71 += squarer01
- # asm 1: add <squarer01=int64#4,<t71=int64#5
- # asm 2: add <squarer01=%rcx,<t71=%r8
- add %rcx,%r8
- # qhasm: squarer21 = (squarer21.t72) << 13
- # asm 1: shld $13,<t72=int64#8,<squarer21=int64#9
- # asm 2: shld $13,<t72=%r10,<squarer21=%r11
- shld $13,%r10,%r11
- # qhasm: t72 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t72=int64#8
- # asm 2: and <squareredmask=%rdx,<t72=%r10
- and %rdx,%r10
- # qhasm: t72 += squarer11
- # asm 1: add <squarer11=int64#6,<t72=int64#8
- # asm 2: add <squarer11=%r9,<t72=%r10
- add %r9,%r10
- # qhasm: squarer31 = (squarer31.t73) << 13
- # asm 1: shld $13,<t73=int64#10,<squarer31=int64#11
- # asm 2: shld $13,<t73=%r12,<squarer31=%r13
- shld $13,%r12,%r13
- # qhasm: t73 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t73=int64#10
- # asm 2: and <squareredmask=%rdx,<t73=%r12
- and %rdx,%r12
- # qhasm: t73 += squarer21
- # asm 1: add <squarer21=int64#9,<t73=int64#10
- # asm 2: add <squarer21=%r11,<t73=%r12
- add %r11,%r12
- # qhasm: squarer41 = (squarer41.t74) << 13
- # asm 1: shld $13,<t74=int64#12,<squarer41=int64#13
- # asm 2: shld $13,<t74=%r14,<squarer41=%r15
- shld $13,%r14,%r15
- # qhasm: t74 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t74=int64#12
- # asm 2: and <squareredmask=%rdx,<t74=%r14
- and %rdx,%r14
- # qhasm: t74 += squarer31
- # asm 1: add <squarer31=int64#11,<t74=int64#12
- # asm 2: add <squarer31=%r13,<t74=%r14
- add %r13,%r14
- # qhasm: squarer41 = squarer41 * 19
- # asm 1: imulq $19,<squarer41=int64#13,>squarer41=int64#4
- # asm 2: imulq $19,<squarer41=%r15,>squarer41=%rcx
- imulq $19,%r15,%rcx
- # qhasm: t70 += squarer41
- # asm 1: add <squarer41=int64#4,<t70=int64#2
- # asm 2: add <squarer41=%rcx,<t70=%rsi
- add %rcx,%rsi
- # qhasm: squaret = t70
- # asm 1: mov <t70=int64#2,>squaret=int64#4
- # asm 2: mov <t70=%rsi,>squaret=%rcx
- mov %rsi,%rcx
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += t71
- # asm 1: add <t71=int64#5,<squaret=int64#4
- # asm 2: add <t71=%r8,<squaret=%rcx
- add %r8,%rcx
- # qhasm: t70 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t70=int64#2
- # asm 2: and <squareredmask=%rdx,<t70=%rsi
- and %rdx,%rsi
- # qhasm: t71 = squaret
- # asm 1: mov <squaret=int64#4,>t71=int64#5
- # asm 2: mov <squaret=%rcx,>t71=%r8
- mov %rcx,%r8
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += t72
- # asm 1: add <t72=int64#8,<squaret=int64#4
- # asm 2: add <t72=%r10,<squaret=%rcx
- add %r10,%rcx
- # qhasm: t71 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t71=int64#5
- # asm 2: and <squareredmask=%rdx,<t71=%r8
- and %rdx,%r8
- # qhasm: t72 = squaret
- # asm 1: mov <squaret=int64#4,>t72=int64#6
- # asm 2: mov <squaret=%rcx,>t72=%r9
- mov %rcx,%r9
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += t73
- # asm 1: add <t73=int64#10,<squaret=int64#4
- # asm 2: add <t73=%r12,<squaret=%rcx
- add %r12,%rcx
- # qhasm: t72 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t72=int64#6
- # asm 2: and <squareredmask=%rdx,<t72=%r9
- and %rdx,%r9
- # qhasm: t73 = squaret
- # asm 1: mov <squaret=int64#4,>t73=int64#7
- # asm 2: mov <squaret=%rcx,>t73=%rax
- mov %rcx,%rax
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += t74
- # asm 1: add <t74=int64#12,<squaret=int64#4
- # asm 2: add <t74=%r14,<squaret=%rcx
- add %r14,%rcx
- # qhasm: t73 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t73=int64#7
- # asm 2: and <squareredmask=%rdx,<t73=%rax
- and %rdx,%rax
- # qhasm: t74 = squaret
- # asm 1: mov <squaret=int64#4,>t74=int64#8
- # asm 2: mov <squaret=%rcx,>t74=%r10
- mov %rcx,%r10
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret *= 19
- # asm 1: imulq $19,<squaret=int64#4,>squaret=int64#4
- # asm 2: imulq $19,<squaret=%rcx,>squaret=%rcx
- imulq $19,%rcx,%rcx
- # qhasm: t70 += squaret
- # asm 1: add <squaret=int64#4,<t70=int64#2
- # asm 2: add <squaret=%rcx,<t70=%rsi
- add %rcx,%rsi
- # qhasm: t74 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t74=int64#8
- # asm 2: and <squareredmask=%rdx,<t74=%r10
- and %rdx,%r10
- # qhasm: t70_stack = t70
- # asm 1: movq <t70=int64#2,>t70_stack=stack64#18
- # asm 2: movq <t70=%rsi,>t70_stack=136(%rsp)
- movq %rsi,136(%rsp)
- # qhasm: t71_stack = t71
- # asm 1: movq <t71=int64#5,>t71_stack=stack64#19
- # asm 2: movq <t71=%r8,>t71_stack=144(%rsp)
- movq %r8,144(%rsp)
- # qhasm: t72_stack = t72
- # asm 1: movq <t72=int64#6,>t72_stack=stack64#20
- # asm 2: movq <t72=%r9,>t72_stack=152(%rsp)
- movq %r9,152(%rsp)
- # qhasm: t73_stack = t73
- # asm 1: movq <t73=int64#7,>t73_stack=stack64#21
- # asm 2: movq <t73=%rax,>t73_stack=160(%rsp)
- movq %rax,160(%rsp)
- # qhasm: t74_stack = t74
- # asm 1: movq <t74=int64#8,>t74_stack=stack64#22
- # asm 2: movq <t74=%r10,>t74_stack=168(%rsp)
- movq %r10,168(%rsp)
- # qhasm: squarerax = t10_stack
- # asm 1: movq <t10_stack=stack64#8,>squarerax=int64#7
- # asm 2: movq <t10_stack=56(%rsp),>squarerax=%rax
- movq 56(%rsp),%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t10_stack
- # asm 1: mulq <t10_stack=stack64#8
- # asm 2: mulq <t10_stack=56(%rsp)
- mulq 56(%rsp)
- # qhasm: t60 = squarerax
- # asm 1: mov <squarerax=int64#7,>t60=int64#2
- # asm 2: mov <squarerax=%rax,>t60=%rsi
- mov %rax,%rsi
- # qhasm: squarer01 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer01=int64#4
- # asm 2: mov <squarerdx=%rdx,>squarer01=%rcx
- mov %rdx,%rcx
- # qhasm: squarerax = t10_stack
- # asm 1: movq <t10_stack=stack64#8,>squarerax=int64#7
- # asm 2: movq <t10_stack=56(%rsp),>squarerax=%rax
- movq 56(%rsp),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t11_stack
- # asm 1: mulq <t11_stack=stack64#9
- # asm 2: mulq <t11_stack=64(%rsp)
- mulq 64(%rsp)
- # qhasm: t61 = squarerax
- # asm 1: mov <squarerax=int64#7,>t61=int64#5
- # asm 2: mov <squarerax=%rax,>t61=%r8
- mov %rax,%r8
- # qhasm: squarer11 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer11=int64#6
- # asm 2: mov <squarerdx=%rdx,>squarer11=%r9
- mov %rdx,%r9
- # qhasm: squarerax = t10_stack
- # asm 1: movq <t10_stack=stack64#8,>squarerax=int64#7
- # asm 2: movq <t10_stack=56(%rsp),>squarerax=%rax
- movq 56(%rsp),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t12_stack
- # asm 1: mulq <t12_stack=stack64#10
- # asm 2: mulq <t12_stack=72(%rsp)
- mulq 72(%rsp)
- # qhasm: t62 = squarerax
- # asm 1: mov <squarerax=int64#7,>t62=int64#8
- # asm 2: mov <squarerax=%rax,>t62=%r10
- mov %rax,%r10
- # qhasm: squarer21 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer21=int64#9
- # asm 2: mov <squarerdx=%rdx,>squarer21=%r11
- mov %rdx,%r11
- # qhasm: squarerax = t10_stack
- # asm 1: movq <t10_stack=stack64#8,>squarerax=int64#7
- # asm 2: movq <t10_stack=56(%rsp),>squarerax=%rax
- movq 56(%rsp),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t13_stack
- # asm 1: mulq <t13_stack=stack64#11
- # asm 2: mulq <t13_stack=80(%rsp)
- mulq 80(%rsp)
- # qhasm: t63 = squarerax
- # asm 1: mov <squarerax=int64#7,>t63=int64#10
- # asm 2: mov <squarerax=%rax,>t63=%r12
- mov %rax,%r12
- # qhasm: squarer31 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer31=int64#11
- # asm 2: mov <squarerdx=%rdx,>squarer31=%r13
- mov %rdx,%r13
- # qhasm: squarerax = t10_stack
- # asm 1: movq <t10_stack=stack64#8,>squarerax=int64#7
- # asm 2: movq <t10_stack=56(%rsp),>squarerax=%rax
- movq 56(%rsp),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t14_stack
- # asm 1: mulq <t14_stack=stack64#12
- # asm 2: mulq <t14_stack=88(%rsp)
- mulq 88(%rsp)
- # qhasm: t64 = squarerax
- # asm 1: mov <squarerax=int64#7,>t64=int64#12
- # asm 2: mov <squarerax=%rax,>t64=%r14
- mov %rax,%r14
- # qhasm: squarer41 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer41=int64#13
- # asm 2: mov <squarerdx=%rdx,>squarer41=%r15
- mov %rdx,%r15
- # qhasm: squarerax = t11_stack
- # asm 1: movq <t11_stack=stack64#9,>squarerax=int64#7
- # asm 2: movq <t11_stack=64(%rsp),>squarerax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t11_stack
- # asm 1: mulq <t11_stack=stack64#9
- # asm 2: mulq <t11_stack=64(%rsp)
- mulq 64(%rsp)
- # qhasm: carry? t62 += squarerax
- # asm 1: add <squarerax=int64#7,<t62=int64#8
- # asm 2: add <squarerax=%rax,<t62=%r10
- add %rax,%r10
- # qhasm: squarer21 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer21=int64#9
- # asm 2: adc <squarerdx=%rdx,<squarer21=%r11
- adc %rdx,%r11
- # qhasm: squarerax = t11_stack
- # asm 1: movq <t11_stack=stack64#9,>squarerax=int64#7
- # asm 2: movq <t11_stack=64(%rsp),>squarerax=%rax
- movq 64(%rsp),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t12_stack
- # asm 1: mulq <t12_stack=stack64#10
- # asm 2: mulq <t12_stack=72(%rsp)
- mulq 72(%rsp)
- # qhasm: carry? t63 += squarerax
- # asm 1: add <squarerax=int64#7,<t63=int64#10
- # asm 2: add <squarerax=%rax,<t63=%r12
- add %rax,%r12
- # qhasm: squarer31 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer31=int64#11
- # asm 2: adc <squarerdx=%rdx,<squarer31=%r13
- adc %rdx,%r13
- # qhasm: squarerax = t11_stack
- # asm 1: movq <t11_stack=stack64#9,>squarerax=int64#7
- # asm 2: movq <t11_stack=64(%rsp),>squarerax=%rax
- movq 64(%rsp),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t13_stack
- # asm 1: mulq <t13_stack=stack64#11
- # asm 2: mulq <t13_stack=80(%rsp)
- mulq 80(%rsp)
- # qhasm: carry? t64 += squarerax
- # asm 1: add <squarerax=int64#7,<t64=int64#12
- # asm 2: add <squarerax=%rax,<t64=%r14
- add %rax,%r14
- # qhasm: squarer41 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer41=int64#13
- # asm 2: adc <squarerdx=%rdx,<squarer41=%r15
- adc %rdx,%r15
- # qhasm: squarerax = t11_stack
- # asm 1: movq <t11_stack=stack64#9,>squarerax=int64#3
- # asm 2: movq <t11_stack=64(%rsp),>squarerax=%rdx
- movq 64(%rsp),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t14_stack
- # asm 1: mulq <t14_stack=stack64#12
- # asm 2: mulq <t14_stack=88(%rsp)
- mulq 88(%rsp)
- # qhasm: carry? t60 += squarerax
- # asm 1: add <squarerax=int64#7,<t60=int64#2
- # asm 2: add <squarerax=%rax,<t60=%rsi
- add %rax,%rsi
- # qhasm: squarer01 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer01=int64#4
- # asm 2: adc <squarerdx=%rdx,<squarer01=%rcx
- adc %rdx,%rcx
- # qhasm: squarerax = t12_stack
- # asm 1: movq <t12_stack=stack64#10,>squarerax=int64#7
- # asm 2: movq <t12_stack=72(%rsp),>squarerax=%rax
- movq 72(%rsp),%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t12_stack
- # asm 1: mulq <t12_stack=stack64#10
- # asm 2: mulq <t12_stack=72(%rsp)
- mulq 72(%rsp)
- # qhasm: carry? t64 += squarerax
- # asm 1: add <squarerax=int64#7,<t64=int64#12
- # asm 2: add <squarerax=%rax,<t64=%r14
- add %rax,%r14
- # qhasm: squarer41 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer41=int64#13
- # asm 2: adc <squarerdx=%rdx,<squarer41=%r15
- adc %rdx,%r15
- # qhasm: squarerax = t12_stack
- # asm 1: movq <t12_stack=stack64#10,>squarerax=int64#3
- # asm 2: movq <t12_stack=72(%rsp),>squarerax=%rdx
- movq 72(%rsp),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t13_stack
- # asm 1: mulq <t13_stack=stack64#11
- # asm 2: mulq <t13_stack=80(%rsp)
- mulq 80(%rsp)
- # qhasm: carry? t60 += squarerax
- # asm 1: add <squarerax=int64#7,<t60=int64#2
- # asm 2: add <squarerax=%rax,<t60=%rsi
- add %rax,%rsi
- # qhasm: squarer01 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer01=int64#4
- # asm 2: adc <squarerdx=%rdx,<squarer01=%rcx
- adc %rdx,%rcx
- # qhasm: squarerax = t12_stack
- # asm 1: movq <t12_stack=stack64#10,>squarerax=int64#3
- # asm 2: movq <t12_stack=72(%rsp),>squarerax=%rdx
- movq 72(%rsp),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t14_stack
- # asm 1: mulq <t14_stack=stack64#12
- # asm 2: mulq <t14_stack=88(%rsp)
- mulq 88(%rsp)
- # qhasm: carry? t61 += squarerax
- # asm 1: add <squarerax=int64#7,<t61=int64#5
- # asm 2: add <squarerax=%rax,<t61=%r8
- add %rax,%r8
- # qhasm: squarer11 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer11=int64#6
- # asm 2: adc <squarerdx=%rdx,<squarer11=%r9
- adc %rdx,%r9
- # qhasm: squarerax = t13_stack
- # asm 1: movq <t13_stack=stack64#11,>squarerax=int64#3
- # asm 2: movq <t13_stack=80(%rsp),>squarerax=%rdx
- movq 80(%rsp),%rdx
- # qhasm: squarerax *= 19
- # asm 1: imulq $19,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $19,<squarerax=%rdx,>squarerax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t13_stack
- # asm 1: mulq <t13_stack=stack64#11
- # asm 2: mulq <t13_stack=80(%rsp)
- mulq 80(%rsp)
- # qhasm: carry? t61 += squarerax
- # asm 1: add <squarerax=int64#7,<t61=int64#5
- # asm 2: add <squarerax=%rax,<t61=%r8
- add %rax,%r8
- # qhasm: squarer11 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer11=int64#6
- # asm 2: adc <squarerdx=%rdx,<squarer11=%r9
- adc %rdx,%r9
- # qhasm: squarerax = t13_stack
- # asm 1: movq <t13_stack=stack64#11,>squarerax=int64#3
- # asm 2: movq <t13_stack=80(%rsp),>squarerax=%rdx
- movq 80(%rsp),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t14_stack
- # asm 1: mulq <t14_stack=stack64#12
- # asm 2: mulq <t14_stack=88(%rsp)
- mulq 88(%rsp)
- # qhasm: carry? t62 += squarerax
- # asm 1: add <squarerax=int64#7,<t62=int64#8
- # asm 2: add <squarerax=%rax,<t62=%r10
- add %rax,%r10
- # qhasm: squarer21 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer21=int64#9
- # asm 2: adc <squarerdx=%rdx,<squarer21=%r11
- adc %rdx,%r11
- # qhasm: squarerax = t14_stack
- # asm 1: movq <t14_stack=stack64#12,>squarerax=int64#3
- # asm 2: movq <t14_stack=88(%rsp),>squarerax=%rdx
- movq 88(%rsp),%rdx
- # qhasm: squarerax *= 19
- # asm 1: imulq $19,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $19,<squarerax=%rdx,>squarerax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * t14_stack
- # asm 1: mulq <t14_stack=stack64#12
- # asm 2: mulq <t14_stack=88(%rsp)
- mulq 88(%rsp)
- # qhasm: carry? t63 += squarerax
- # asm 1: add <squarerax=int64#7,<t63=int64#10
- # asm 2: add <squarerax=%rax,<t63=%r12
- add %rax,%r12
- # qhasm: squarer31 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer31=int64#11
- # asm 2: adc <squarerdx=%rdx,<squarer31=%r13
- adc %rdx,%r13
- # qhasm: squareredmask = *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_REDMASK51
- # asm 1: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>squareredmask=int64#3
- # asm 2: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>squareredmask=%rdx
- movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,%rdx
- # qhasm: squarer01 = (squarer01.t60) << 13
- # asm 1: shld $13,<t60=int64#2,<squarer01=int64#4
- # asm 2: shld $13,<t60=%rsi,<squarer01=%rcx
- shld $13,%rsi,%rcx
- # qhasm: t60 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t60=int64#2
- # asm 2: and <squareredmask=%rdx,<t60=%rsi
- and %rdx,%rsi
- # qhasm: squarer11 = (squarer11.t61) << 13
- # asm 1: shld $13,<t61=int64#5,<squarer11=int64#6
- # asm 2: shld $13,<t61=%r8,<squarer11=%r9
- shld $13,%r8,%r9
- # qhasm: t61 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t61=int64#5
- # asm 2: and <squareredmask=%rdx,<t61=%r8
- and %rdx,%r8
- # qhasm: t61 += squarer01
- # asm 1: add <squarer01=int64#4,<t61=int64#5
- # asm 2: add <squarer01=%rcx,<t61=%r8
- add %rcx,%r8
- # qhasm: squarer21 = (squarer21.t62) << 13
- # asm 1: shld $13,<t62=int64#8,<squarer21=int64#9
- # asm 2: shld $13,<t62=%r10,<squarer21=%r11
- shld $13,%r10,%r11
- # qhasm: t62 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t62=int64#8
- # asm 2: and <squareredmask=%rdx,<t62=%r10
- and %rdx,%r10
- # qhasm: t62 += squarer11
- # asm 1: add <squarer11=int64#6,<t62=int64#8
- # asm 2: add <squarer11=%r9,<t62=%r10
- add %r9,%r10
- # qhasm: squarer31 = (squarer31.t63) << 13
- # asm 1: shld $13,<t63=int64#10,<squarer31=int64#11
- # asm 2: shld $13,<t63=%r12,<squarer31=%r13
- shld $13,%r12,%r13
- # qhasm: t63 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t63=int64#10
- # asm 2: and <squareredmask=%rdx,<t63=%r12
- and %rdx,%r12
- # qhasm: t63 += squarer21
- # asm 1: add <squarer21=int64#9,<t63=int64#10
- # asm 2: add <squarer21=%r11,<t63=%r12
- add %r11,%r12
- # qhasm: squarer41 = (squarer41.t64) << 13
- # asm 1: shld $13,<t64=int64#12,<squarer41=int64#13
- # asm 2: shld $13,<t64=%r14,<squarer41=%r15
- shld $13,%r14,%r15
- # qhasm: t64 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t64=int64#12
- # asm 2: and <squareredmask=%rdx,<t64=%r14
- and %rdx,%r14
- # qhasm: t64 += squarer31
- # asm 1: add <squarer31=int64#11,<t64=int64#12
- # asm 2: add <squarer31=%r13,<t64=%r14
- add %r13,%r14
- # qhasm: squarer41 = squarer41 * 19
- # asm 1: imulq $19,<squarer41=int64#13,>squarer41=int64#4
- # asm 2: imulq $19,<squarer41=%r15,>squarer41=%rcx
- imulq $19,%r15,%rcx
- # qhasm: t60 += squarer41
- # asm 1: add <squarer41=int64#4,<t60=int64#2
- # asm 2: add <squarer41=%rcx,<t60=%rsi
- add %rcx,%rsi
- # qhasm: squaret = t60
- # asm 1: mov <t60=int64#2,>squaret=int64#4
- # asm 2: mov <t60=%rsi,>squaret=%rcx
- mov %rsi,%rcx
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += t61
- # asm 1: add <t61=int64#5,<squaret=int64#4
- # asm 2: add <t61=%r8,<squaret=%rcx
- add %r8,%rcx
- # qhasm: t60 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t60=int64#2
- # asm 2: and <squareredmask=%rdx,<t60=%rsi
- and %rdx,%rsi
- # qhasm: t61 = squaret
- # asm 1: mov <squaret=int64#4,>t61=int64#5
- # asm 2: mov <squaret=%rcx,>t61=%r8
- mov %rcx,%r8
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += t62
- # asm 1: add <t62=int64#8,<squaret=int64#4
- # asm 2: add <t62=%r10,<squaret=%rcx
- add %r10,%rcx
- # qhasm: t61 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t61=int64#5
- # asm 2: and <squareredmask=%rdx,<t61=%r8
- and %rdx,%r8
- # qhasm: t62 = squaret
- # asm 1: mov <squaret=int64#4,>t62=int64#6
- # asm 2: mov <squaret=%rcx,>t62=%r9
- mov %rcx,%r9
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += t63
- # asm 1: add <t63=int64#10,<squaret=int64#4
- # asm 2: add <t63=%r12,<squaret=%rcx
- add %r12,%rcx
- # qhasm: t62 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t62=int64#6
- # asm 2: and <squareredmask=%rdx,<t62=%r9
- and %rdx,%r9
- # qhasm: t63 = squaret
- # asm 1: mov <squaret=int64#4,>t63=int64#7
- # asm 2: mov <squaret=%rcx,>t63=%rax
- mov %rcx,%rax
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += t64
- # asm 1: add <t64=int64#12,<squaret=int64#4
- # asm 2: add <t64=%r14,<squaret=%rcx
- add %r14,%rcx
- # qhasm: t63 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t63=int64#7
- # asm 2: and <squareredmask=%rdx,<t63=%rax
- and %rdx,%rax
- # qhasm: t64 = squaret
- # asm 1: mov <squaret=int64#4,>t64=int64#8
- # asm 2: mov <squaret=%rcx,>t64=%r10
- mov %rcx,%r10
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret *= 19
- # asm 1: imulq $19,<squaret=int64#4,>squaret=int64#4
- # asm 2: imulq $19,<squaret=%rcx,>squaret=%rcx
- imulq $19,%rcx,%rcx
- # qhasm: t60 += squaret
- # asm 1: add <squaret=int64#4,<t60=int64#2
- # asm 2: add <squaret=%rcx,<t60=%rsi
- add %rcx,%rsi
- # qhasm: t64 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<t64=int64#8
- # asm 2: and <squareredmask=%rdx,<t64=%r10
- and %rdx,%r10
- # qhasm: t60_stack = t60
- # asm 1: movq <t60=int64#2,>t60_stack=stack64#23
- # asm 2: movq <t60=%rsi,>t60_stack=176(%rsp)
- movq %rsi,176(%rsp)
- # qhasm: t61_stack = t61
- # asm 1: movq <t61=int64#5,>t61_stack=stack64#24
- # asm 2: movq <t61=%r8,>t61_stack=184(%rsp)
- movq %r8,184(%rsp)
- # qhasm: t62_stack = t62
- # asm 1: movq <t62=int64#6,>t62_stack=stack64#25
- # asm 2: movq <t62=%r9,>t62_stack=192(%rsp)
- movq %r9,192(%rsp)
- # qhasm: t63_stack = t63
- # asm 1: movq <t63=int64#7,>t63_stack=stack64#26
- # asm 2: movq <t63=%rax,>t63_stack=200(%rsp)
- movq %rax,200(%rsp)
- # qhasm: t64_stack = t64
- # asm 1: movq <t64=int64#8,>t64_stack=stack64#27
- # asm 2: movq <t64=%r10,>t64_stack=208(%rsp)
- movq %r10,208(%rsp)
- # qhasm: t50 = t60
- # asm 1: mov <t60=int64#2,>t50=int64#2
- # asm 2: mov <t60=%rsi,>t50=%rsi
- mov %rsi,%rsi
- # qhasm: t51 = t61
- # asm 1: mov <t61=int64#5,>t51=int64#3
- # asm 2: mov <t61=%r8,>t51=%rdx
- mov %r8,%rdx
- # qhasm: t52 = t62
- # asm 1: mov <t62=int64#6,>t52=int64#4
- # asm 2: mov <t62=%r9,>t52=%rcx
- mov %r9,%rcx
- # qhasm: t53 = t63
- # asm 1: mov <t63=int64#7,>t53=int64#5
- # asm 2: mov <t63=%rax,>t53=%r8
- mov %rax,%r8
- # qhasm: t54 = t64
- # asm 1: mov <t64=int64#8,>t54=int64#6
- # asm 2: mov <t64=%r10,>t54=%r9
- mov %r10,%r9
- # qhasm: t50 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P0
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P0,<t50=int64#2
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P0,<t50=%rsi
- add crypto_scalarmult_curve25519_amd64_51_2P0,%rsi
- # qhasm: t51 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t51=int64#3
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t51=%rdx
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%rdx
- # qhasm: t52 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t52=int64#4
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t52=%rcx
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%rcx
- # qhasm: t53 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t53=int64#5
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t53=%r8
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%r8
- # qhasm: t54 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t54=int64#6
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t54=%r9
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%r9
- # qhasm: t50 -= t70_stack
- # asm 1: subq <t70_stack=stack64#18,<t50=int64#2
- # asm 2: subq <t70_stack=136(%rsp),<t50=%rsi
- subq 136(%rsp),%rsi
- # qhasm: t51 -= t71_stack
- # asm 1: subq <t71_stack=stack64#19,<t51=int64#3
- # asm 2: subq <t71_stack=144(%rsp),<t51=%rdx
- subq 144(%rsp),%rdx
- # qhasm: t52 -= t72_stack
- # asm 1: subq <t72_stack=stack64#20,<t52=int64#4
- # asm 2: subq <t72_stack=152(%rsp),<t52=%rcx
- subq 152(%rsp),%rcx
- # qhasm: t53 -= t73_stack
- # asm 1: subq <t73_stack=stack64#21,<t53=int64#5
- # asm 2: subq <t73_stack=160(%rsp),<t53=%r8
- subq 160(%rsp),%r8
- # qhasm: t54 -= t74_stack
- # asm 1: subq <t74_stack=stack64#22,<t54=int64#6
- # asm 2: subq <t74_stack=168(%rsp),<t54=%r9
- subq 168(%rsp),%r9
- # qhasm: t50_stack = t50
- # asm 1: movq <t50=int64#2,>t50_stack=stack64#28
- # asm 2: movq <t50=%rsi,>t50_stack=216(%rsp)
- movq %rsi,216(%rsp)
- # qhasm: t51_stack = t51
- # asm 1: movq <t51=int64#3,>t51_stack=stack64#29
- # asm 2: movq <t51=%rdx,>t51_stack=224(%rsp)
- movq %rdx,224(%rsp)
- # qhasm: t52_stack = t52
- # asm 1: movq <t52=int64#4,>t52_stack=stack64#30
- # asm 2: movq <t52=%rcx,>t52_stack=232(%rsp)
- movq %rcx,232(%rsp)
- # qhasm: t53_stack = t53
- # asm 1: movq <t53=int64#5,>t53_stack=stack64#31
- # asm 2: movq <t53=%r8,>t53_stack=240(%rsp)
- movq %r8,240(%rsp)
- # qhasm: t54_stack = t54
- # asm 1: movq <t54=int64#6,>t54_stack=stack64#32
- # asm 2: movq <t54=%r9,>t54_stack=248(%rsp)
- movq %r9,248(%rsp)
- # qhasm: t30 = *(uint64 *)(workp + 120)
- # asm 1: movq 120(<workp=int64#1),>t30=int64#2
- # asm 2: movq 120(<workp=%rdi),>t30=%rsi
- movq 120(%rdi),%rsi
- # qhasm: t31 = *(uint64 *)(workp + 128)
- # asm 1: movq 128(<workp=int64#1),>t31=int64#3
- # asm 2: movq 128(<workp=%rdi),>t31=%rdx
- movq 128(%rdi),%rdx
- # qhasm: t32 = *(uint64 *)(workp + 136)
- # asm 1: movq 136(<workp=int64#1),>t32=int64#4
- # asm 2: movq 136(<workp=%rdi),>t32=%rcx
- movq 136(%rdi),%rcx
- # qhasm: t33 = *(uint64 *)(workp + 144)
- # asm 1: movq 144(<workp=int64#1),>t33=int64#5
- # asm 2: movq 144(<workp=%rdi),>t33=%r8
- movq 144(%rdi),%r8
- # qhasm: t34 = *(uint64 *)(workp + 152)
- # asm 1: movq 152(<workp=int64#1),>t34=int64#6
- # asm 2: movq 152(<workp=%rdi),>t34=%r9
- movq 152(%rdi),%r9
- # qhasm: t40 = t30
- # asm 1: mov <t30=int64#2,>t40=int64#7
- # asm 2: mov <t30=%rsi,>t40=%rax
- mov %rsi,%rax
- # qhasm: t41 = t31
- # asm 1: mov <t31=int64#3,>t41=int64#8
- # asm 2: mov <t31=%rdx,>t41=%r10
- mov %rdx,%r10
- # qhasm: t42 = t32
- # asm 1: mov <t32=int64#4,>t42=int64#9
- # asm 2: mov <t32=%rcx,>t42=%r11
- mov %rcx,%r11
- # qhasm: t43 = t33
- # asm 1: mov <t33=int64#5,>t43=int64#10
- # asm 2: mov <t33=%r8,>t43=%r12
- mov %r8,%r12
- # qhasm: t44 = t34
- # asm 1: mov <t34=int64#6,>t44=int64#11
- # asm 2: mov <t34=%r9,>t44=%r13
- mov %r9,%r13
- # qhasm: t40 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P0
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P0,<t40=int64#7
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P0,<t40=%rax
- add crypto_scalarmult_curve25519_amd64_51_2P0,%rax
- # qhasm: t41 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t41=int64#8
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t41=%r10
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%r10
- # qhasm: t42 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t42=int64#9
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t42=%r11
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%r11
- # qhasm: t43 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t43=int64#10
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t43=%r12
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%r12
- # qhasm: t44 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t44=int64#11
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<t44=%r13
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%r13
- # qhasm: t30 += *(uint64 *)(workp + 160)
- # asm 1: addq 160(<workp=int64#1),<t30=int64#2
- # asm 2: addq 160(<workp=%rdi),<t30=%rsi
- addq 160(%rdi),%rsi
- # qhasm: t31 += *(uint64 *)(workp + 168)
- # asm 1: addq 168(<workp=int64#1),<t31=int64#3
- # asm 2: addq 168(<workp=%rdi),<t31=%rdx
- addq 168(%rdi),%rdx
- # qhasm: t32 += *(uint64 *)(workp + 176)
- # asm 1: addq 176(<workp=int64#1),<t32=int64#4
- # asm 2: addq 176(<workp=%rdi),<t32=%rcx
- addq 176(%rdi),%rcx
- # qhasm: t33 += *(uint64 *)(workp + 184)
- # asm 1: addq 184(<workp=int64#1),<t33=int64#5
- # asm 2: addq 184(<workp=%rdi),<t33=%r8
- addq 184(%rdi),%r8
- # qhasm: t34 += *(uint64 *)(workp + 192)
- # asm 1: addq 192(<workp=int64#1),<t34=int64#6
- # asm 2: addq 192(<workp=%rdi),<t34=%r9
- addq 192(%rdi),%r9
- # qhasm: t40 -= *(uint64 *)(workp + 160)
- # asm 1: subq 160(<workp=int64#1),<t40=int64#7
- # asm 2: subq 160(<workp=%rdi),<t40=%rax
- subq 160(%rdi),%rax
- # qhasm: t41 -= *(uint64 *)(workp + 168)
- # asm 1: subq 168(<workp=int64#1),<t41=int64#8
- # asm 2: subq 168(<workp=%rdi),<t41=%r10
- subq 168(%rdi),%r10
- # qhasm: t42 -= *(uint64 *)(workp + 176)
- # asm 1: subq 176(<workp=int64#1),<t42=int64#9
- # asm 2: subq 176(<workp=%rdi),<t42=%r11
- subq 176(%rdi),%r11
- # qhasm: t43 -= *(uint64 *)(workp + 184)
- # asm 1: subq 184(<workp=int64#1),<t43=int64#10
- # asm 2: subq 184(<workp=%rdi),<t43=%r12
- subq 184(%rdi),%r12
- # qhasm: t44 -= *(uint64 *)(workp + 192)
- # asm 1: subq 192(<workp=int64#1),<t44=int64#11
- # asm 2: subq 192(<workp=%rdi),<t44=%r13
- subq 192(%rdi),%r13
- # qhasm: t30_stack = t30
- # asm 1: movq <t30=int64#2,>t30_stack=stack64#33
- # asm 2: movq <t30=%rsi,>t30_stack=256(%rsp)
- movq %rsi,256(%rsp)
- # qhasm: t31_stack = t31
- # asm 1: movq <t31=int64#3,>t31_stack=stack64#34
- # asm 2: movq <t31=%rdx,>t31_stack=264(%rsp)
- movq %rdx,264(%rsp)
- # qhasm: t32_stack = t32
- # asm 1: movq <t32=int64#4,>t32_stack=stack64#35
- # asm 2: movq <t32=%rcx,>t32_stack=272(%rsp)
- movq %rcx,272(%rsp)
- # qhasm: t33_stack = t33
- # asm 1: movq <t33=int64#5,>t33_stack=stack64#36
- # asm 2: movq <t33=%r8,>t33_stack=280(%rsp)
- movq %r8,280(%rsp)
- # qhasm: t34_stack = t34
- # asm 1: movq <t34=int64#6,>t34_stack=stack64#37
- # asm 2: movq <t34=%r9,>t34_stack=288(%rsp)
- movq %r9,288(%rsp)
- # qhasm: t40_stack = t40
- # asm 1: movq <t40=int64#7,>t40_stack=stack64#38
- # asm 2: movq <t40=%rax,>t40_stack=296(%rsp)
- movq %rax,296(%rsp)
- # qhasm: t41_stack = t41
- # asm 1: movq <t41=int64#8,>t41_stack=stack64#39
- # asm 2: movq <t41=%r10,>t41_stack=304(%rsp)
- movq %r10,304(%rsp)
- # qhasm: t42_stack = t42
- # asm 1: movq <t42=int64#9,>t42_stack=stack64#40
- # asm 2: movq <t42=%r11,>t42_stack=312(%rsp)
- movq %r11,312(%rsp)
- # qhasm: t43_stack = t43
- # asm 1: movq <t43=int64#10,>t43_stack=stack64#41
- # asm 2: movq <t43=%r12,>t43_stack=320(%rsp)
- movq %r12,320(%rsp)
- # qhasm: t44_stack = t44
- # asm 1: movq <t44=int64#11,>t44_stack=stack64#42
- # asm 2: movq <t44=%r13,>t44_stack=328(%rsp)
- movq %r13,328(%rsp)
- # qhasm: mulrax = t33_stack
- # asm 1: movq <t33_stack=stack64#36,>mulrax=int64#2
- # asm 2: movq <t33_stack=280(%rsp),>mulrax=%rsi
- movq 280(%rsp),%rsi
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#2,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rsi,>mulrax=%rax
- imulq $19,%rsi,%rax
- # qhasm: mulx319_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#43
- # asm 2: movq <mulrax=%rax,>mulx319_stack=336(%rsp)
- movq %rax,336(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * t22_stack
- # asm 1: mulq <t22_stack=stack64#15
- # asm 2: mulq <t22_stack=112(%rsp)
- mulq 112(%rsp)
- # qhasm: t90 = mulrax
- # asm 1: mov <mulrax=int64#7,>t90=int64#2
- # asm 2: mov <mulrax=%rax,>t90=%rsi
- mov %rax,%rsi
- # qhasm: mulr01 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr01=int64#4
- # asm 2: mov <mulrdx=%rdx,>mulr01=%rcx
- mov %rdx,%rcx
- # qhasm: mulrax = t34_stack
- # asm 1: movq <t34_stack=stack64#37,>mulrax=int64#3
- # asm 2: movq <t34_stack=288(%rsp),>mulrax=%rdx
- movq 288(%rsp),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: mulx419_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#44
- # asm 2: movq <mulrax=%rax,>mulx419_stack=344(%rsp)
- movq %rax,344(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * t21_stack
- # asm 1: mulq <t21_stack=stack64#14
- # asm 2: mulq <t21_stack=104(%rsp)
- mulq 104(%rsp)
- # qhasm: carry? t90 += mulrax
- # asm 1: add <mulrax=int64#7,<t90=int64#2
- # asm 2: add <mulrax=%rax,<t90=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = t30_stack
- # asm 1: movq <t30_stack=stack64#33,>mulrax=int64#7
- # asm 2: movq <t30_stack=256(%rsp),>mulrax=%rax
- movq 256(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t20_stack
- # asm 1: mulq <t20_stack=stack64#13
- # asm 2: mulq <t20_stack=96(%rsp)
- mulq 96(%rsp)
- # qhasm: carry? t90 += mulrax
- # asm 1: add <mulrax=int64#7,<t90=int64#2
- # asm 2: add <mulrax=%rax,<t90=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = t30_stack
- # asm 1: movq <t30_stack=stack64#33,>mulrax=int64#7
- # asm 2: movq <t30_stack=256(%rsp),>mulrax=%rax
- movq 256(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t21_stack
- # asm 1: mulq <t21_stack=stack64#14
- # asm 2: mulq <t21_stack=104(%rsp)
- mulq 104(%rsp)
- # qhasm: t91 = mulrax
- # asm 1: mov <mulrax=int64#7,>t91=int64#5
- # asm 2: mov <mulrax=%rax,>t91=%r8
- mov %rax,%r8
- # qhasm: mulr11 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr11=int64#6
- # asm 2: mov <mulrdx=%rdx,>mulr11=%r9
- mov %rdx,%r9
- # qhasm: mulrax = t30_stack
- # asm 1: movq <t30_stack=stack64#33,>mulrax=int64#7
- # asm 2: movq <t30_stack=256(%rsp),>mulrax=%rax
- movq 256(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t22_stack
- # asm 1: mulq <t22_stack=stack64#15
- # asm 2: mulq <t22_stack=112(%rsp)
- mulq 112(%rsp)
- # qhasm: t92 = mulrax
- # asm 1: mov <mulrax=int64#7,>t92=int64#8
- # asm 2: mov <mulrax=%rax,>t92=%r10
- mov %rax,%r10
- # qhasm: mulr21 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr21=int64#9
- # asm 2: mov <mulrdx=%rdx,>mulr21=%r11
- mov %rdx,%r11
- # qhasm: mulrax = t30_stack
- # asm 1: movq <t30_stack=stack64#33,>mulrax=int64#7
- # asm 2: movq <t30_stack=256(%rsp),>mulrax=%rax
- movq 256(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t23_stack
- # asm 1: mulq <t23_stack=stack64#16
- # asm 2: mulq <t23_stack=120(%rsp)
- mulq 120(%rsp)
- # qhasm: t93 = mulrax
- # asm 1: mov <mulrax=int64#7,>t93=int64#10
- # asm 2: mov <mulrax=%rax,>t93=%r12
- mov %rax,%r12
- # qhasm: mulr31 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr31=int64#11
- # asm 2: mov <mulrdx=%rdx,>mulr31=%r13
- mov %rdx,%r13
- # qhasm: mulrax = t30_stack
- # asm 1: movq <t30_stack=stack64#33,>mulrax=int64#7
- # asm 2: movq <t30_stack=256(%rsp),>mulrax=%rax
- movq 256(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t24_stack
- # asm 1: mulq <t24_stack=stack64#17
- # asm 2: mulq <t24_stack=128(%rsp)
- mulq 128(%rsp)
- # qhasm: t94 = mulrax
- # asm 1: mov <mulrax=int64#7,>t94=int64#12
- # asm 2: mov <mulrax=%rax,>t94=%r14
- mov %rax,%r14
- # qhasm: mulr41 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr41=int64#13
- # asm 2: mov <mulrdx=%rdx,>mulr41=%r15
- mov %rdx,%r15
- # qhasm: mulrax = t31_stack
- # asm 1: movq <t31_stack=stack64#34,>mulrax=int64#7
- # asm 2: movq <t31_stack=264(%rsp),>mulrax=%rax
- movq 264(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t20_stack
- # asm 1: mulq <t20_stack=stack64#13
- # asm 2: mulq <t20_stack=96(%rsp)
- mulq 96(%rsp)
- # qhasm: carry? t91 += mulrax
- # asm 1: add <mulrax=int64#7,<t91=int64#5
- # asm 2: add <mulrax=%rax,<t91=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = t31_stack
- # asm 1: movq <t31_stack=stack64#34,>mulrax=int64#7
- # asm 2: movq <t31_stack=264(%rsp),>mulrax=%rax
- movq 264(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t21_stack
- # asm 1: mulq <t21_stack=stack64#14
- # asm 2: mulq <t21_stack=104(%rsp)
- mulq 104(%rsp)
- # qhasm: carry? t92 += mulrax
- # asm 1: add <mulrax=int64#7,<t92=int64#8
- # asm 2: add <mulrax=%rax,<t92=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = t31_stack
- # asm 1: movq <t31_stack=stack64#34,>mulrax=int64#7
- # asm 2: movq <t31_stack=264(%rsp),>mulrax=%rax
- movq 264(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t22_stack
- # asm 1: mulq <t22_stack=stack64#15
- # asm 2: mulq <t22_stack=112(%rsp)
- mulq 112(%rsp)
- # qhasm: carry? t93 += mulrax
- # asm 1: add <mulrax=int64#7,<t93=int64#10
- # asm 2: add <mulrax=%rax,<t93=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = t31_stack
- # asm 1: movq <t31_stack=stack64#34,>mulrax=int64#7
- # asm 2: movq <t31_stack=264(%rsp),>mulrax=%rax
- movq 264(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t23_stack
- # asm 1: mulq <t23_stack=stack64#16
- # asm 2: mulq <t23_stack=120(%rsp)
- mulq 120(%rsp)
- # qhasm: carry? t94 += mulrax
- # asm 1: add <mulrax=int64#7,<t94=int64#12
- # asm 2: add <mulrax=%rax,<t94=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = t31_stack
- # asm 1: movq <t31_stack=stack64#34,>mulrax=int64#3
- # asm 2: movq <t31_stack=264(%rsp),>mulrax=%rdx
- movq 264(%rsp),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t24_stack
- # asm 1: mulq <t24_stack=stack64#17
- # asm 2: mulq <t24_stack=128(%rsp)
- mulq 128(%rsp)
- # qhasm: carry? t90 += mulrax
- # asm 1: add <mulrax=int64#7,<t90=int64#2
- # asm 2: add <mulrax=%rax,<t90=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = t32_stack
- # asm 1: movq <t32_stack=stack64#35,>mulrax=int64#7
- # asm 2: movq <t32_stack=272(%rsp),>mulrax=%rax
- movq 272(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t20_stack
- # asm 1: mulq <t20_stack=stack64#13
- # asm 2: mulq <t20_stack=96(%rsp)
- mulq 96(%rsp)
- # qhasm: carry? t92 += mulrax
- # asm 1: add <mulrax=int64#7,<t92=int64#8
- # asm 2: add <mulrax=%rax,<t92=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = t32_stack
- # asm 1: movq <t32_stack=stack64#35,>mulrax=int64#7
- # asm 2: movq <t32_stack=272(%rsp),>mulrax=%rax
- movq 272(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t21_stack
- # asm 1: mulq <t21_stack=stack64#14
- # asm 2: mulq <t21_stack=104(%rsp)
- mulq 104(%rsp)
- # qhasm: carry? t93 += mulrax
- # asm 1: add <mulrax=int64#7,<t93=int64#10
- # asm 2: add <mulrax=%rax,<t93=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = t32_stack
- # asm 1: movq <t32_stack=stack64#35,>mulrax=int64#7
- # asm 2: movq <t32_stack=272(%rsp),>mulrax=%rax
- movq 272(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t22_stack
- # asm 1: mulq <t22_stack=stack64#15
- # asm 2: mulq <t22_stack=112(%rsp)
- mulq 112(%rsp)
- # qhasm: carry? t94 += mulrax
- # asm 1: add <mulrax=int64#7,<t94=int64#12
- # asm 2: add <mulrax=%rax,<t94=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = t32_stack
- # asm 1: movq <t32_stack=stack64#35,>mulrax=int64#3
- # asm 2: movq <t32_stack=272(%rsp),>mulrax=%rdx
- movq 272(%rsp),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t23_stack
- # asm 1: mulq <t23_stack=stack64#16
- # asm 2: mulq <t23_stack=120(%rsp)
- mulq 120(%rsp)
- # qhasm: carry? t90 += mulrax
- # asm 1: add <mulrax=int64#7,<t90=int64#2
- # asm 2: add <mulrax=%rax,<t90=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = t32_stack
- # asm 1: movq <t32_stack=stack64#35,>mulrax=int64#3
- # asm 2: movq <t32_stack=272(%rsp),>mulrax=%rdx
- movq 272(%rsp),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t24_stack
- # asm 1: mulq <t24_stack=stack64#17
- # asm 2: mulq <t24_stack=128(%rsp)
- mulq 128(%rsp)
- # qhasm: carry? t91 += mulrax
- # asm 1: add <mulrax=int64#7,<t91=int64#5
- # asm 2: add <mulrax=%rax,<t91=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = t33_stack
- # asm 1: movq <t33_stack=stack64#36,>mulrax=int64#7
- # asm 2: movq <t33_stack=280(%rsp),>mulrax=%rax
- movq 280(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t20_stack
- # asm 1: mulq <t20_stack=stack64#13
- # asm 2: mulq <t20_stack=96(%rsp)
- mulq 96(%rsp)
- # qhasm: carry? t93 += mulrax
- # asm 1: add <mulrax=int64#7,<t93=int64#10
- # asm 2: add <mulrax=%rax,<t93=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = t33_stack
- # asm 1: movq <t33_stack=stack64#36,>mulrax=int64#7
- # asm 2: movq <t33_stack=280(%rsp),>mulrax=%rax
- movq 280(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t21_stack
- # asm 1: mulq <t21_stack=stack64#14
- # asm 2: mulq <t21_stack=104(%rsp)
- mulq 104(%rsp)
- # qhasm: carry? t94 += mulrax
- # asm 1: add <mulrax=int64#7,<t94=int64#12
- # asm 2: add <mulrax=%rax,<t94=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#43,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=336(%rsp),>mulrax=%rax
- movq 336(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t23_stack
- # asm 1: mulq <t23_stack=stack64#16
- # asm 2: mulq <t23_stack=120(%rsp)
- mulq 120(%rsp)
- # qhasm: carry? t91 += mulrax
- # asm 1: add <mulrax=int64#7,<t91=int64#5
- # asm 2: add <mulrax=%rax,<t91=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#43,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=336(%rsp),>mulrax=%rax
- movq 336(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t24_stack
- # asm 1: mulq <t24_stack=stack64#17
- # asm 2: mulq <t24_stack=128(%rsp)
- mulq 128(%rsp)
- # qhasm: carry? t92 += mulrax
- # asm 1: add <mulrax=int64#7,<t92=int64#8
- # asm 2: add <mulrax=%rax,<t92=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = t34_stack
- # asm 1: movq <t34_stack=stack64#37,>mulrax=int64#7
- # asm 2: movq <t34_stack=288(%rsp),>mulrax=%rax
- movq 288(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t20_stack
- # asm 1: mulq <t20_stack=stack64#13
- # asm 2: mulq <t20_stack=96(%rsp)
- mulq 96(%rsp)
- # qhasm: carry? t94 += mulrax
- # asm 1: add <mulrax=int64#7,<t94=int64#12
- # asm 2: add <mulrax=%rax,<t94=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#44,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=344(%rsp),>mulrax=%rax
- movq 344(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t22_stack
- # asm 1: mulq <t22_stack=stack64#15
- # asm 2: mulq <t22_stack=112(%rsp)
- mulq 112(%rsp)
- # qhasm: carry? t91 += mulrax
- # asm 1: add <mulrax=int64#7,<t91=int64#5
- # asm 2: add <mulrax=%rax,<t91=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#44,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=344(%rsp),>mulrax=%rax
- movq 344(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t23_stack
- # asm 1: mulq <t23_stack=stack64#16
- # asm 2: mulq <t23_stack=120(%rsp)
- mulq 120(%rsp)
- # qhasm: carry? t92 += mulrax
- # asm 1: add <mulrax=int64#7,<t92=int64#8
- # asm 2: add <mulrax=%rax,<t92=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#44,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=344(%rsp),>mulrax=%rax
- movq 344(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t24_stack
- # asm 1: mulq <t24_stack=stack64#17
- # asm 2: mulq <t24_stack=128(%rsp)
- mulq 128(%rsp)
- # qhasm: carry? t93 += mulrax
- # asm 1: add <mulrax=int64#7,<t93=int64#10
- # asm 2: add <mulrax=%rax,<t93=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulredmask = *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_REDMASK51
- # asm 1: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>mulredmask=int64#3
- # asm 2: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>mulredmask=%rdx
- movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,%rdx
- # qhasm: mulr01 = (mulr01.t90) << 13
- # asm 1: shld $13,<t90=int64#2,<mulr01=int64#4
- # asm 2: shld $13,<t90=%rsi,<mulr01=%rcx
- shld $13,%rsi,%rcx
- # qhasm: t90 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t90=int64#2
- # asm 2: and <mulredmask=%rdx,<t90=%rsi
- and %rdx,%rsi
- # qhasm: mulr11 = (mulr11.t91) << 13
- # asm 1: shld $13,<t91=int64#5,<mulr11=int64#6
- # asm 2: shld $13,<t91=%r8,<mulr11=%r9
- shld $13,%r8,%r9
- # qhasm: t91 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t91=int64#5
- # asm 2: and <mulredmask=%rdx,<t91=%r8
- and %rdx,%r8
- # qhasm: t91 += mulr01
- # asm 1: add <mulr01=int64#4,<t91=int64#5
- # asm 2: add <mulr01=%rcx,<t91=%r8
- add %rcx,%r8
- # qhasm: mulr21 = (mulr21.t92) << 13
- # asm 1: shld $13,<t92=int64#8,<mulr21=int64#9
- # asm 2: shld $13,<t92=%r10,<mulr21=%r11
- shld $13,%r10,%r11
- # qhasm: t92 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t92=int64#8
- # asm 2: and <mulredmask=%rdx,<t92=%r10
- and %rdx,%r10
- # qhasm: t92 += mulr11
- # asm 1: add <mulr11=int64#6,<t92=int64#8
- # asm 2: add <mulr11=%r9,<t92=%r10
- add %r9,%r10
- # qhasm: mulr31 = (mulr31.t93) << 13
- # asm 1: shld $13,<t93=int64#10,<mulr31=int64#11
- # asm 2: shld $13,<t93=%r12,<mulr31=%r13
- shld $13,%r12,%r13
- # qhasm: t93 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t93=int64#10
- # asm 2: and <mulredmask=%rdx,<t93=%r12
- and %rdx,%r12
- # qhasm: t93 += mulr21
- # asm 1: add <mulr21=int64#9,<t93=int64#10
- # asm 2: add <mulr21=%r11,<t93=%r12
- add %r11,%r12
- # qhasm: mulr41 = (mulr41.t94) << 13
- # asm 1: shld $13,<t94=int64#12,<mulr41=int64#13
- # asm 2: shld $13,<t94=%r14,<mulr41=%r15
- shld $13,%r14,%r15
- # qhasm: t94 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t94=int64#12
- # asm 2: and <mulredmask=%rdx,<t94=%r14
- and %rdx,%r14
- # qhasm: t94 += mulr31
- # asm 1: add <mulr31=int64#11,<t94=int64#12
- # asm 2: add <mulr31=%r13,<t94=%r14
- add %r13,%r14
- # qhasm: mulr41 = mulr41 * 19
- # asm 1: imulq $19,<mulr41=int64#13,>mulr41=int64#4
- # asm 2: imulq $19,<mulr41=%r15,>mulr41=%rcx
- imulq $19,%r15,%rcx
- # qhasm: t90 += mulr41
- # asm 1: add <mulr41=int64#4,<t90=int64#2
- # asm 2: add <mulr41=%rcx,<t90=%rsi
- add %rcx,%rsi
- # qhasm: mult = t90
- # asm 1: mov <t90=int64#2,>mult=int64#4
- # asm 2: mov <t90=%rsi,>mult=%rcx
- mov %rsi,%rcx
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: mult += t91
- # asm 1: add <t91=int64#5,<mult=int64#4
- # asm 2: add <t91=%r8,<mult=%rcx
- add %r8,%rcx
- # qhasm: t91 = mult
- # asm 1: mov <mult=int64#4,>t91=int64#5
- # asm 2: mov <mult=%rcx,>t91=%r8
- mov %rcx,%r8
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: t90 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t90=int64#2
- # asm 2: and <mulredmask=%rdx,<t90=%rsi
- and %rdx,%rsi
- # qhasm: mult += t92
- # asm 1: add <t92=int64#8,<mult=int64#4
- # asm 2: add <t92=%r10,<mult=%rcx
- add %r10,%rcx
- # qhasm: t92 = mult
- # asm 1: mov <mult=int64#4,>t92=int64#6
- # asm 2: mov <mult=%rcx,>t92=%r9
- mov %rcx,%r9
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: t91 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t91=int64#5
- # asm 2: and <mulredmask=%rdx,<t91=%r8
- and %rdx,%r8
- # qhasm: mult += t93
- # asm 1: add <t93=int64#10,<mult=int64#4
- # asm 2: add <t93=%r12,<mult=%rcx
- add %r12,%rcx
- # qhasm: t93 = mult
- # asm 1: mov <mult=int64#4,>t93=int64#7
- # asm 2: mov <mult=%rcx,>t93=%rax
- mov %rcx,%rax
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: t92 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t92=int64#6
- # asm 2: and <mulredmask=%rdx,<t92=%r9
- and %rdx,%r9
- # qhasm: mult += t94
- # asm 1: add <t94=int64#12,<mult=int64#4
- # asm 2: add <t94=%r14,<mult=%rcx
- add %r14,%rcx
- # qhasm: t94 = mult
- # asm 1: mov <mult=int64#4,>t94=int64#8
- # asm 2: mov <mult=%rcx,>t94=%r10
- mov %rcx,%r10
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: t93 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t93=int64#7
- # asm 2: and <mulredmask=%rdx,<t93=%rax
- and %rdx,%rax
- # qhasm: mult *= 19
- # asm 1: imulq $19,<mult=int64#4,>mult=int64#4
- # asm 2: imulq $19,<mult=%rcx,>mult=%rcx
- imulq $19,%rcx,%rcx
- # qhasm: t90 += mult
- # asm 1: add <mult=int64#4,<t90=int64#2
- # asm 2: add <mult=%rcx,<t90=%rsi
- add %rcx,%rsi
- # qhasm: t94 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t94=int64#8
- # asm 2: and <mulredmask=%rdx,<t94=%r10
- and %rdx,%r10
- # qhasm: t90_stack = t90
- # asm 1: movq <t90=int64#2,>t90_stack=stack64#13
- # asm 2: movq <t90=%rsi,>t90_stack=96(%rsp)
- movq %rsi,96(%rsp)
- # qhasm: t91_stack = t91
- # asm 1: movq <t91=int64#5,>t91_stack=stack64#14
- # asm 2: movq <t91=%r8,>t91_stack=104(%rsp)
- movq %r8,104(%rsp)
- # qhasm: t92_stack = t92
- # asm 1: movq <t92=int64#6,>t92_stack=stack64#15
- # asm 2: movq <t92=%r9,>t92_stack=112(%rsp)
- movq %r9,112(%rsp)
- # qhasm: t93_stack = t93
- # asm 1: movq <t93=int64#7,>t93_stack=stack64#16
- # asm 2: movq <t93=%rax,>t93_stack=120(%rsp)
- movq %rax,120(%rsp)
- # qhasm: t94_stack = t94
- # asm 1: movq <t94=int64#8,>t94_stack=stack64#17
- # asm 2: movq <t94=%r10,>t94_stack=128(%rsp)
- movq %r10,128(%rsp)
- # qhasm: mulrax = t43_stack
- # asm 1: movq <t43_stack=stack64#41,>mulrax=int64#2
- # asm 2: movq <t43_stack=320(%rsp),>mulrax=%rsi
- movq 320(%rsp),%rsi
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#2,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rsi,>mulrax=%rax
- imulq $19,%rsi,%rax
- # qhasm: mulx319_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#33
- # asm 2: movq <mulrax=%rax,>mulx319_stack=256(%rsp)
- movq %rax,256(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * t12_stack
- # asm 1: mulq <t12_stack=stack64#10
- # asm 2: mulq <t12_stack=72(%rsp)
- mulq 72(%rsp)
- # qhasm: t80 = mulrax
- # asm 1: mov <mulrax=int64#7,>t80=int64#2
- # asm 2: mov <mulrax=%rax,>t80=%rsi
- mov %rax,%rsi
- # qhasm: mulr01 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr01=int64#4
- # asm 2: mov <mulrdx=%rdx,>mulr01=%rcx
- mov %rdx,%rcx
- # qhasm: mulrax = t44_stack
- # asm 1: movq <t44_stack=stack64#42,>mulrax=int64#3
- # asm 2: movq <t44_stack=328(%rsp),>mulrax=%rdx
- movq 328(%rsp),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: mulx419_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#34
- # asm 2: movq <mulrax=%rax,>mulx419_stack=264(%rsp)
- movq %rax,264(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * t11_stack
- # asm 1: mulq <t11_stack=stack64#9
- # asm 2: mulq <t11_stack=64(%rsp)
- mulq 64(%rsp)
- # qhasm: carry? t80 += mulrax
- # asm 1: add <mulrax=int64#7,<t80=int64#2
- # asm 2: add <mulrax=%rax,<t80=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = t40_stack
- # asm 1: movq <t40_stack=stack64#38,>mulrax=int64#7
- # asm 2: movq <t40_stack=296(%rsp),>mulrax=%rax
- movq 296(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t10_stack
- # asm 1: mulq <t10_stack=stack64#8
- # asm 2: mulq <t10_stack=56(%rsp)
- mulq 56(%rsp)
- # qhasm: carry? t80 += mulrax
- # asm 1: add <mulrax=int64#7,<t80=int64#2
- # asm 2: add <mulrax=%rax,<t80=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = t40_stack
- # asm 1: movq <t40_stack=stack64#38,>mulrax=int64#7
- # asm 2: movq <t40_stack=296(%rsp),>mulrax=%rax
- movq 296(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t11_stack
- # asm 1: mulq <t11_stack=stack64#9
- # asm 2: mulq <t11_stack=64(%rsp)
- mulq 64(%rsp)
- # qhasm: t81 = mulrax
- # asm 1: mov <mulrax=int64#7,>t81=int64#5
- # asm 2: mov <mulrax=%rax,>t81=%r8
- mov %rax,%r8
- # qhasm: mulr11 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr11=int64#6
- # asm 2: mov <mulrdx=%rdx,>mulr11=%r9
- mov %rdx,%r9
- # qhasm: mulrax = t40_stack
- # asm 1: movq <t40_stack=stack64#38,>mulrax=int64#7
- # asm 2: movq <t40_stack=296(%rsp),>mulrax=%rax
- movq 296(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t12_stack
- # asm 1: mulq <t12_stack=stack64#10
- # asm 2: mulq <t12_stack=72(%rsp)
- mulq 72(%rsp)
- # qhasm: t82 = mulrax
- # asm 1: mov <mulrax=int64#7,>t82=int64#8
- # asm 2: mov <mulrax=%rax,>t82=%r10
- mov %rax,%r10
- # qhasm: mulr21 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr21=int64#9
- # asm 2: mov <mulrdx=%rdx,>mulr21=%r11
- mov %rdx,%r11
- # qhasm: mulrax = t40_stack
- # asm 1: movq <t40_stack=stack64#38,>mulrax=int64#7
- # asm 2: movq <t40_stack=296(%rsp),>mulrax=%rax
- movq 296(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t13_stack
- # asm 1: mulq <t13_stack=stack64#11
- # asm 2: mulq <t13_stack=80(%rsp)
- mulq 80(%rsp)
- # qhasm: t83 = mulrax
- # asm 1: mov <mulrax=int64#7,>t83=int64#10
- # asm 2: mov <mulrax=%rax,>t83=%r12
- mov %rax,%r12
- # qhasm: mulr31 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr31=int64#11
- # asm 2: mov <mulrdx=%rdx,>mulr31=%r13
- mov %rdx,%r13
- # qhasm: mulrax = t40_stack
- # asm 1: movq <t40_stack=stack64#38,>mulrax=int64#7
- # asm 2: movq <t40_stack=296(%rsp),>mulrax=%rax
- movq 296(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t14_stack
- # asm 1: mulq <t14_stack=stack64#12
- # asm 2: mulq <t14_stack=88(%rsp)
- mulq 88(%rsp)
- # qhasm: t84 = mulrax
- # asm 1: mov <mulrax=int64#7,>t84=int64#12
- # asm 2: mov <mulrax=%rax,>t84=%r14
- mov %rax,%r14
- # qhasm: mulr41 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr41=int64#13
- # asm 2: mov <mulrdx=%rdx,>mulr41=%r15
- mov %rdx,%r15
- # qhasm: mulrax = t41_stack
- # asm 1: movq <t41_stack=stack64#39,>mulrax=int64#7
- # asm 2: movq <t41_stack=304(%rsp),>mulrax=%rax
- movq 304(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t10_stack
- # asm 1: mulq <t10_stack=stack64#8
- # asm 2: mulq <t10_stack=56(%rsp)
- mulq 56(%rsp)
- # qhasm: carry? t81 += mulrax
- # asm 1: add <mulrax=int64#7,<t81=int64#5
- # asm 2: add <mulrax=%rax,<t81=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = t41_stack
- # asm 1: movq <t41_stack=stack64#39,>mulrax=int64#7
- # asm 2: movq <t41_stack=304(%rsp),>mulrax=%rax
- movq 304(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t11_stack
- # asm 1: mulq <t11_stack=stack64#9
- # asm 2: mulq <t11_stack=64(%rsp)
- mulq 64(%rsp)
- # qhasm: carry? t82 += mulrax
- # asm 1: add <mulrax=int64#7,<t82=int64#8
- # asm 2: add <mulrax=%rax,<t82=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = t41_stack
- # asm 1: movq <t41_stack=stack64#39,>mulrax=int64#7
- # asm 2: movq <t41_stack=304(%rsp),>mulrax=%rax
- movq 304(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t12_stack
- # asm 1: mulq <t12_stack=stack64#10
- # asm 2: mulq <t12_stack=72(%rsp)
- mulq 72(%rsp)
- # qhasm: carry? t83 += mulrax
- # asm 1: add <mulrax=int64#7,<t83=int64#10
- # asm 2: add <mulrax=%rax,<t83=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = t41_stack
- # asm 1: movq <t41_stack=stack64#39,>mulrax=int64#7
- # asm 2: movq <t41_stack=304(%rsp),>mulrax=%rax
- movq 304(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t13_stack
- # asm 1: mulq <t13_stack=stack64#11
- # asm 2: mulq <t13_stack=80(%rsp)
- mulq 80(%rsp)
- # qhasm: carry? t84 += mulrax
- # asm 1: add <mulrax=int64#7,<t84=int64#12
- # asm 2: add <mulrax=%rax,<t84=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = t41_stack
- # asm 1: movq <t41_stack=stack64#39,>mulrax=int64#3
- # asm 2: movq <t41_stack=304(%rsp),>mulrax=%rdx
- movq 304(%rsp),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t14_stack
- # asm 1: mulq <t14_stack=stack64#12
- # asm 2: mulq <t14_stack=88(%rsp)
- mulq 88(%rsp)
- # qhasm: carry? t80 += mulrax
- # asm 1: add <mulrax=int64#7,<t80=int64#2
- # asm 2: add <mulrax=%rax,<t80=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = t42_stack
- # asm 1: movq <t42_stack=stack64#40,>mulrax=int64#7
- # asm 2: movq <t42_stack=312(%rsp),>mulrax=%rax
- movq 312(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t10_stack
- # asm 1: mulq <t10_stack=stack64#8
- # asm 2: mulq <t10_stack=56(%rsp)
- mulq 56(%rsp)
- # qhasm: carry? t82 += mulrax
- # asm 1: add <mulrax=int64#7,<t82=int64#8
- # asm 2: add <mulrax=%rax,<t82=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = t42_stack
- # asm 1: movq <t42_stack=stack64#40,>mulrax=int64#7
- # asm 2: movq <t42_stack=312(%rsp),>mulrax=%rax
- movq 312(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t11_stack
- # asm 1: mulq <t11_stack=stack64#9
- # asm 2: mulq <t11_stack=64(%rsp)
- mulq 64(%rsp)
- # qhasm: carry? t83 += mulrax
- # asm 1: add <mulrax=int64#7,<t83=int64#10
- # asm 2: add <mulrax=%rax,<t83=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = t42_stack
- # asm 1: movq <t42_stack=stack64#40,>mulrax=int64#7
- # asm 2: movq <t42_stack=312(%rsp),>mulrax=%rax
- movq 312(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t12_stack
- # asm 1: mulq <t12_stack=stack64#10
- # asm 2: mulq <t12_stack=72(%rsp)
- mulq 72(%rsp)
- # qhasm: carry? t84 += mulrax
- # asm 1: add <mulrax=int64#7,<t84=int64#12
- # asm 2: add <mulrax=%rax,<t84=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = t42_stack
- # asm 1: movq <t42_stack=stack64#40,>mulrax=int64#3
- # asm 2: movq <t42_stack=312(%rsp),>mulrax=%rdx
- movq 312(%rsp),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t13_stack
- # asm 1: mulq <t13_stack=stack64#11
- # asm 2: mulq <t13_stack=80(%rsp)
- mulq 80(%rsp)
- # qhasm: carry? t80 += mulrax
- # asm 1: add <mulrax=int64#7,<t80=int64#2
- # asm 2: add <mulrax=%rax,<t80=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = t42_stack
- # asm 1: movq <t42_stack=stack64#40,>mulrax=int64#3
- # asm 2: movq <t42_stack=312(%rsp),>mulrax=%rdx
- movq 312(%rsp),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t14_stack
- # asm 1: mulq <t14_stack=stack64#12
- # asm 2: mulq <t14_stack=88(%rsp)
- mulq 88(%rsp)
- # qhasm: carry? t81 += mulrax
- # asm 1: add <mulrax=int64#7,<t81=int64#5
- # asm 2: add <mulrax=%rax,<t81=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = t43_stack
- # asm 1: movq <t43_stack=stack64#41,>mulrax=int64#7
- # asm 2: movq <t43_stack=320(%rsp),>mulrax=%rax
- movq 320(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t10_stack
- # asm 1: mulq <t10_stack=stack64#8
- # asm 2: mulq <t10_stack=56(%rsp)
- mulq 56(%rsp)
- # qhasm: carry? t83 += mulrax
- # asm 1: add <mulrax=int64#7,<t83=int64#10
- # asm 2: add <mulrax=%rax,<t83=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = t43_stack
- # asm 1: movq <t43_stack=stack64#41,>mulrax=int64#7
- # asm 2: movq <t43_stack=320(%rsp),>mulrax=%rax
- movq 320(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t11_stack
- # asm 1: mulq <t11_stack=stack64#9
- # asm 2: mulq <t11_stack=64(%rsp)
- mulq 64(%rsp)
- # qhasm: carry? t84 += mulrax
- # asm 1: add <mulrax=int64#7,<t84=int64#12
- # asm 2: add <mulrax=%rax,<t84=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#33,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=256(%rsp),>mulrax=%rax
- movq 256(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t13_stack
- # asm 1: mulq <t13_stack=stack64#11
- # asm 2: mulq <t13_stack=80(%rsp)
- mulq 80(%rsp)
- # qhasm: carry? t81 += mulrax
- # asm 1: add <mulrax=int64#7,<t81=int64#5
- # asm 2: add <mulrax=%rax,<t81=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#33,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=256(%rsp),>mulrax=%rax
- movq 256(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t14_stack
- # asm 1: mulq <t14_stack=stack64#12
- # asm 2: mulq <t14_stack=88(%rsp)
- mulq 88(%rsp)
- # qhasm: carry? t82 += mulrax
- # asm 1: add <mulrax=int64#7,<t82=int64#8
- # asm 2: add <mulrax=%rax,<t82=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = t44_stack
- # asm 1: movq <t44_stack=stack64#42,>mulrax=int64#7
- # asm 2: movq <t44_stack=328(%rsp),>mulrax=%rax
- movq 328(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t10_stack
- # asm 1: mulq <t10_stack=stack64#8
- # asm 2: mulq <t10_stack=56(%rsp)
- mulq 56(%rsp)
- # qhasm: carry? t84 += mulrax
- # asm 1: add <mulrax=int64#7,<t84=int64#12
- # asm 2: add <mulrax=%rax,<t84=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#34,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=264(%rsp),>mulrax=%rax
- movq 264(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t12_stack
- # asm 1: mulq <t12_stack=stack64#10
- # asm 2: mulq <t12_stack=72(%rsp)
- mulq 72(%rsp)
- # qhasm: carry? t81 += mulrax
- # asm 1: add <mulrax=int64#7,<t81=int64#5
- # asm 2: add <mulrax=%rax,<t81=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#34,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=264(%rsp),>mulrax=%rax
- movq 264(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t13_stack
- # asm 1: mulq <t13_stack=stack64#11
- # asm 2: mulq <t13_stack=80(%rsp)
- mulq 80(%rsp)
- # qhasm: carry? t82 += mulrax
- # asm 1: add <mulrax=int64#7,<t82=int64#8
- # asm 2: add <mulrax=%rax,<t82=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#34,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=264(%rsp),>mulrax=%rax
- movq 264(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t14_stack
- # asm 1: mulq <t14_stack=stack64#12
- # asm 2: mulq <t14_stack=88(%rsp)
- mulq 88(%rsp)
- # qhasm: carry? t83 += mulrax
- # asm 1: add <mulrax=int64#7,<t83=int64#10
- # asm 2: add <mulrax=%rax,<t83=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulredmask = *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_REDMASK51
- # asm 1: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>mulredmask=int64#3
- # asm 2: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>mulredmask=%rdx
- movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,%rdx
- # qhasm: mulr01 = (mulr01.t80) << 13
- # asm 1: shld $13,<t80=int64#2,<mulr01=int64#4
- # asm 2: shld $13,<t80=%rsi,<mulr01=%rcx
- shld $13,%rsi,%rcx
- # qhasm: t80 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t80=int64#2
- # asm 2: and <mulredmask=%rdx,<t80=%rsi
- and %rdx,%rsi
- # qhasm: mulr11 = (mulr11.t81) << 13
- # asm 1: shld $13,<t81=int64#5,<mulr11=int64#6
- # asm 2: shld $13,<t81=%r8,<mulr11=%r9
- shld $13,%r8,%r9
- # qhasm: t81 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t81=int64#5
- # asm 2: and <mulredmask=%rdx,<t81=%r8
- and %rdx,%r8
- # qhasm: t81 += mulr01
- # asm 1: add <mulr01=int64#4,<t81=int64#5
- # asm 2: add <mulr01=%rcx,<t81=%r8
- add %rcx,%r8
- # qhasm: mulr21 = (mulr21.t82) << 13
- # asm 1: shld $13,<t82=int64#8,<mulr21=int64#9
- # asm 2: shld $13,<t82=%r10,<mulr21=%r11
- shld $13,%r10,%r11
- # qhasm: t82 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t82=int64#8
- # asm 2: and <mulredmask=%rdx,<t82=%r10
- and %rdx,%r10
- # qhasm: t82 += mulr11
- # asm 1: add <mulr11=int64#6,<t82=int64#8
- # asm 2: add <mulr11=%r9,<t82=%r10
- add %r9,%r10
- # qhasm: mulr31 = (mulr31.t83) << 13
- # asm 1: shld $13,<t83=int64#10,<mulr31=int64#11
- # asm 2: shld $13,<t83=%r12,<mulr31=%r13
- shld $13,%r12,%r13
- # qhasm: t83 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t83=int64#10
- # asm 2: and <mulredmask=%rdx,<t83=%r12
- and %rdx,%r12
- # qhasm: t83 += mulr21
- # asm 1: add <mulr21=int64#9,<t83=int64#10
- # asm 2: add <mulr21=%r11,<t83=%r12
- add %r11,%r12
- # qhasm: mulr41 = (mulr41.t84) << 13
- # asm 1: shld $13,<t84=int64#12,<mulr41=int64#13
- # asm 2: shld $13,<t84=%r14,<mulr41=%r15
- shld $13,%r14,%r15
- # qhasm: t84 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t84=int64#12
- # asm 2: and <mulredmask=%rdx,<t84=%r14
- and %rdx,%r14
- # qhasm: t84 += mulr31
- # asm 1: add <mulr31=int64#11,<t84=int64#12
- # asm 2: add <mulr31=%r13,<t84=%r14
- add %r13,%r14
- # qhasm: mulr41 = mulr41 * 19
- # asm 1: imulq $19,<mulr41=int64#13,>mulr41=int64#4
- # asm 2: imulq $19,<mulr41=%r15,>mulr41=%rcx
- imulq $19,%r15,%rcx
- # qhasm: t80 += mulr41
- # asm 1: add <mulr41=int64#4,<t80=int64#2
- # asm 2: add <mulr41=%rcx,<t80=%rsi
- add %rcx,%rsi
- # qhasm: mult = t80
- # asm 1: mov <t80=int64#2,>mult=int64#4
- # asm 2: mov <t80=%rsi,>mult=%rcx
- mov %rsi,%rcx
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: mult += t81
- # asm 1: add <t81=int64#5,<mult=int64#4
- # asm 2: add <t81=%r8,<mult=%rcx
- add %r8,%rcx
- # qhasm: t81 = mult
- # asm 1: mov <mult=int64#4,>t81=int64#5
- # asm 2: mov <mult=%rcx,>t81=%r8
- mov %rcx,%r8
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: t80 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t80=int64#2
- # asm 2: and <mulredmask=%rdx,<t80=%rsi
- and %rdx,%rsi
- # qhasm: mult += t82
- # asm 1: add <t82=int64#8,<mult=int64#4
- # asm 2: add <t82=%r10,<mult=%rcx
- add %r10,%rcx
- # qhasm: t82 = mult
- # asm 1: mov <mult=int64#4,>t82=int64#6
- # asm 2: mov <mult=%rcx,>t82=%r9
- mov %rcx,%r9
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: t81 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t81=int64#5
- # asm 2: and <mulredmask=%rdx,<t81=%r8
- and %rdx,%r8
- # qhasm: mult += t83
- # asm 1: add <t83=int64#10,<mult=int64#4
- # asm 2: add <t83=%r12,<mult=%rcx
- add %r12,%rcx
- # qhasm: t83 = mult
- # asm 1: mov <mult=int64#4,>t83=int64#7
- # asm 2: mov <mult=%rcx,>t83=%rax
- mov %rcx,%rax
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: t82 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t82=int64#6
- # asm 2: and <mulredmask=%rdx,<t82=%r9
- and %rdx,%r9
- # qhasm: mult += t84
- # asm 1: add <t84=int64#12,<mult=int64#4
- # asm 2: add <t84=%r14,<mult=%rcx
- add %r14,%rcx
- # qhasm: t84 = mult
- # asm 1: mov <mult=int64#4,>t84=int64#8
- # asm 2: mov <mult=%rcx,>t84=%r10
- mov %rcx,%r10
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: t83 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t83=int64#7
- # asm 2: and <mulredmask=%rdx,<t83=%rax
- and %rdx,%rax
- # qhasm: mult *= 19
- # asm 1: imulq $19,<mult=int64#4,>mult=int64#4
- # asm 2: imulq $19,<mult=%rcx,>mult=%rcx
- imulq $19,%rcx,%rcx
- # qhasm: t80 += mult
- # asm 1: add <mult=int64#4,<t80=int64#2
- # asm 2: add <mult=%rcx,<t80=%rsi
- add %rcx,%rsi
- # qhasm: t84 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<t84=int64#8
- # asm 2: and <mulredmask=%rdx,<t84=%r10
- and %rdx,%r10
- # qhasm: zq0 = t80
- # asm 1: mov <t80=int64#2,>zq0=int64#3
- # asm 2: mov <t80=%rsi,>zq0=%rdx
- mov %rsi,%rdx
- # qhasm: zq1 = t81
- # asm 1: mov <t81=int64#5,>zq1=int64#4
- # asm 2: mov <t81=%r8,>zq1=%rcx
- mov %r8,%rcx
- # qhasm: zq2 = t82
- # asm 1: mov <t82=int64#6,>zq2=int64#9
- # asm 2: mov <t82=%r9,>zq2=%r11
- mov %r9,%r11
- # qhasm: zq3 = t83
- # asm 1: mov <t83=int64#7,>zq3=int64#10
- # asm 2: mov <t83=%rax,>zq3=%r12
- mov %rax,%r12
- # qhasm: zq4 = t84
- # asm 1: mov <t84=int64#8,>zq4=int64#11
- # asm 2: mov <t84=%r10,>zq4=%r13
- mov %r10,%r13
- # qhasm: zq0 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P0
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P0,<zq0=int64#3
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P0,<zq0=%rdx
- add crypto_scalarmult_curve25519_amd64_51_2P0,%rdx
- # qhasm: zq1 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<zq1=int64#4
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<zq1=%rcx
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%rcx
- # qhasm: zq2 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<zq2=int64#9
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<zq2=%r11
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%r11
- # qhasm: zq3 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<zq3=int64#10
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<zq3=%r12
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%r12
- # qhasm: zq4 += *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_2P1234
- # asm 1: add crypto_scalarmult_curve25519_amd64_51_2P1234,<zq4=int64#11
- # asm 2: add crypto_scalarmult_curve25519_amd64_51_2P1234,<zq4=%r13
- add crypto_scalarmult_curve25519_amd64_51_2P1234,%r13
- # qhasm: t80 += t90_stack
- # asm 1: addq <t90_stack=stack64#13,<t80=int64#2
- # asm 2: addq <t90_stack=96(%rsp),<t80=%rsi
- addq 96(%rsp),%rsi
- # qhasm: t81 += t91_stack
- # asm 1: addq <t91_stack=stack64#14,<t81=int64#5
- # asm 2: addq <t91_stack=104(%rsp),<t81=%r8
- addq 104(%rsp),%r8
- # qhasm: t82 += t92_stack
- # asm 1: addq <t92_stack=stack64#15,<t82=int64#6
- # asm 2: addq <t92_stack=112(%rsp),<t82=%r9
- addq 112(%rsp),%r9
- # qhasm: t83 += t93_stack
- # asm 1: addq <t93_stack=stack64#16,<t83=int64#7
- # asm 2: addq <t93_stack=120(%rsp),<t83=%rax
- addq 120(%rsp),%rax
- # qhasm: t84 += t94_stack
- # asm 1: addq <t94_stack=stack64#17,<t84=int64#8
- # asm 2: addq <t94_stack=128(%rsp),<t84=%r10
- addq 128(%rsp),%r10
- # qhasm: zq0 -= t90_stack
- # asm 1: subq <t90_stack=stack64#13,<zq0=int64#3
- # asm 2: subq <t90_stack=96(%rsp),<zq0=%rdx
- subq 96(%rsp),%rdx
- # qhasm: zq1 -= t91_stack
- # asm 1: subq <t91_stack=stack64#14,<zq1=int64#4
- # asm 2: subq <t91_stack=104(%rsp),<zq1=%rcx
- subq 104(%rsp),%rcx
- # qhasm: zq2 -= t92_stack
- # asm 1: subq <t92_stack=stack64#15,<zq2=int64#9
- # asm 2: subq <t92_stack=112(%rsp),<zq2=%r11
- subq 112(%rsp),%r11
- # qhasm: zq3 -= t93_stack
- # asm 1: subq <t93_stack=stack64#16,<zq3=int64#10
- # asm 2: subq <t93_stack=120(%rsp),<zq3=%r12
- subq 120(%rsp),%r12
- # qhasm: zq4 -= t94_stack
- # asm 1: subq <t94_stack=stack64#17,<zq4=int64#11
- # asm 2: subq <t94_stack=128(%rsp),<zq4=%r13
- subq 128(%rsp),%r13
- # qhasm: *(uint64 *)(workp + 120) = t80
- # asm 1: movq <t80=int64#2,120(<workp=int64#1)
- # asm 2: movq <t80=%rsi,120(<workp=%rdi)
- movq %rsi,120(%rdi)
- # qhasm: *(uint64 *)(workp + 128) = t81
- # asm 1: movq <t81=int64#5,128(<workp=int64#1)
- # asm 2: movq <t81=%r8,128(<workp=%rdi)
- movq %r8,128(%rdi)
- # qhasm: *(uint64 *)(workp + 136) = t82
- # asm 1: movq <t82=int64#6,136(<workp=int64#1)
- # asm 2: movq <t82=%r9,136(<workp=%rdi)
- movq %r9,136(%rdi)
- # qhasm: *(uint64 *)(workp + 144) = t83
- # asm 1: movq <t83=int64#7,144(<workp=int64#1)
- # asm 2: movq <t83=%rax,144(<workp=%rdi)
- movq %rax,144(%rdi)
- # qhasm: *(uint64 *)(workp + 152) = t84
- # asm 1: movq <t84=int64#8,152(<workp=int64#1)
- # asm 2: movq <t84=%r10,152(<workp=%rdi)
- movq %r10,152(%rdi)
- # qhasm: *(uint64 *)(workp + 160) = zq0
- # asm 1: movq <zq0=int64#3,160(<workp=int64#1)
- # asm 2: movq <zq0=%rdx,160(<workp=%rdi)
- movq %rdx,160(%rdi)
- # qhasm: *(uint64 *)(workp + 168) = zq1
- # asm 1: movq <zq1=int64#4,168(<workp=int64#1)
- # asm 2: movq <zq1=%rcx,168(<workp=%rdi)
- movq %rcx,168(%rdi)
- # qhasm: *(uint64 *)(workp + 176) = zq2
- # asm 1: movq <zq2=int64#9,176(<workp=int64#1)
- # asm 2: movq <zq2=%r11,176(<workp=%rdi)
- movq %r11,176(%rdi)
- # qhasm: *(uint64 *)(workp + 184) = zq3
- # asm 1: movq <zq3=int64#10,184(<workp=int64#1)
- # asm 2: movq <zq3=%r12,184(<workp=%rdi)
- movq %r12,184(%rdi)
- # qhasm: *(uint64 *)(workp + 192) = zq4
- # asm 1: movq <zq4=int64#11,192(<workp=int64#1)
- # asm 2: movq <zq4=%r13,192(<workp=%rdi)
- movq %r13,192(%rdi)
- # qhasm: squarerax = *(uint64 *)(workp + 120)
- # asm 1: movq 120(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 120(<workp=%rdi),>squarerax=%rax
- movq 120(%rdi),%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 120)
- # asm 1: mulq 120(<workp=int64#1)
- # asm 2: mulq 120(<workp=%rdi)
- mulq 120(%rdi)
- # qhasm: xq0 = squarerax
- # asm 1: mov <squarerax=int64#7,>xq0=int64#2
- # asm 2: mov <squarerax=%rax,>xq0=%rsi
- mov %rax,%rsi
- # qhasm: squarer01 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer01=int64#4
- # asm 2: mov <squarerdx=%rdx,>squarer01=%rcx
- mov %rdx,%rcx
- # qhasm: squarerax = *(uint64 *)(workp + 120)
- # asm 1: movq 120(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 120(<workp=%rdi),>squarerax=%rax
- movq 120(%rdi),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 128)
- # asm 1: mulq 128(<workp=int64#1)
- # asm 2: mulq 128(<workp=%rdi)
- mulq 128(%rdi)
- # qhasm: xq1 = squarerax
- # asm 1: mov <squarerax=int64#7,>xq1=int64#5
- # asm 2: mov <squarerax=%rax,>xq1=%r8
- mov %rax,%r8
- # qhasm: squarer11 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer11=int64#6
- # asm 2: mov <squarerdx=%rdx,>squarer11=%r9
- mov %rdx,%r9
- # qhasm: squarerax = *(uint64 *)(workp + 120)
- # asm 1: movq 120(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 120(<workp=%rdi),>squarerax=%rax
- movq 120(%rdi),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 136)
- # asm 1: mulq 136(<workp=int64#1)
- # asm 2: mulq 136(<workp=%rdi)
- mulq 136(%rdi)
- # qhasm: xq2 = squarerax
- # asm 1: mov <squarerax=int64#7,>xq2=int64#8
- # asm 2: mov <squarerax=%rax,>xq2=%r10
- mov %rax,%r10
- # qhasm: squarer21 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer21=int64#9
- # asm 2: mov <squarerdx=%rdx,>squarer21=%r11
- mov %rdx,%r11
- # qhasm: squarerax = *(uint64 *)(workp + 120)
- # asm 1: movq 120(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 120(<workp=%rdi),>squarerax=%rax
- movq 120(%rdi),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 144)
- # asm 1: mulq 144(<workp=int64#1)
- # asm 2: mulq 144(<workp=%rdi)
- mulq 144(%rdi)
- # qhasm: xq3 = squarerax
- # asm 1: mov <squarerax=int64#7,>xq3=int64#10
- # asm 2: mov <squarerax=%rax,>xq3=%r12
- mov %rax,%r12
- # qhasm: squarer31 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer31=int64#11
- # asm 2: mov <squarerdx=%rdx,>squarer31=%r13
- mov %rdx,%r13
- # qhasm: squarerax = *(uint64 *)(workp + 120)
- # asm 1: movq 120(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 120(<workp=%rdi),>squarerax=%rax
- movq 120(%rdi),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 152)
- # asm 1: mulq 152(<workp=int64#1)
- # asm 2: mulq 152(<workp=%rdi)
- mulq 152(%rdi)
- # qhasm: xq4 = squarerax
- # asm 1: mov <squarerax=int64#7,>xq4=int64#12
- # asm 2: mov <squarerax=%rax,>xq4=%r14
- mov %rax,%r14
- # qhasm: squarer41 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer41=int64#13
- # asm 2: mov <squarerdx=%rdx,>squarer41=%r15
- mov %rdx,%r15
- # qhasm: squarerax = *(uint64 *)(workp + 128)
- # asm 1: movq 128(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 128(<workp=%rdi),>squarerax=%rax
- movq 128(%rdi),%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 128)
- # asm 1: mulq 128(<workp=int64#1)
- # asm 2: mulq 128(<workp=%rdi)
- mulq 128(%rdi)
- # qhasm: carry? xq2 += squarerax
- # asm 1: add <squarerax=int64#7,<xq2=int64#8
- # asm 2: add <squarerax=%rax,<xq2=%r10
- add %rax,%r10
- # qhasm: squarer21 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer21=int64#9
- # asm 2: adc <squarerdx=%rdx,<squarer21=%r11
- adc %rdx,%r11
- # qhasm: squarerax = *(uint64 *)(workp + 128)
- # asm 1: movq 128(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 128(<workp=%rdi),>squarerax=%rax
- movq 128(%rdi),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 136)
- # asm 1: mulq 136(<workp=int64#1)
- # asm 2: mulq 136(<workp=%rdi)
- mulq 136(%rdi)
- # qhasm: carry? xq3 += squarerax
- # asm 1: add <squarerax=int64#7,<xq3=int64#10
- # asm 2: add <squarerax=%rax,<xq3=%r12
- add %rax,%r12
- # qhasm: squarer31 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer31=int64#11
- # asm 2: adc <squarerdx=%rdx,<squarer31=%r13
- adc %rdx,%r13
- # qhasm: squarerax = *(uint64 *)(workp + 128)
- # asm 1: movq 128(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 128(<workp=%rdi),>squarerax=%rax
- movq 128(%rdi),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 144)
- # asm 1: mulq 144(<workp=int64#1)
- # asm 2: mulq 144(<workp=%rdi)
- mulq 144(%rdi)
- # qhasm: carry? xq4 += squarerax
- # asm 1: add <squarerax=int64#7,<xq4=int64#12
- # asm 2: add <squarerax=%rax,<xq4=%r14
- add %rax,%r14
- # qhasm: squarer41 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer41=int64#13
- # asm 2: adc <squarerdx=%rdx,<squarer41=%r15
- adc %rdx,%r15
- # qhasm: squarerax = *(uint64 *)(workp + 128)
- # asm 1: movq 128(<workp=int64#1),>squarerax=int64#3
- # asm 2: movq 128(<workp=%rdi),>squarerax=%rdx
- movq 128(%rdi),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 152)
- # asm 1: mulq 152(<workp=int64#1)
- # asm 2: mulq 152(<workp=%rdi)
- mulq 152(%rdi)
- # qhasm: carry? xq0 += squarerax
- # asm 1: add <squarerax=int64#7,<xq0=int64#2
- # asm 2: add <squarerax=%rax,<xq0=%rsi
- add %rax,%rsi
- # qhasm: squarer01 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer01=int64#4
- # asm 2: adc <squarerdx=%rdx,<squarer01=%rcx
- adc %rdx,%rcx
- # qhasm: squarerax = *(uint64 *)(workp + 136)
- # asm 1: movq 136(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 136(<workp=%rdi),>squarerax=%rax
- movq 136(%rdi),%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 136)
- # asm 1: mulq 136(<workp=int64#1)
- # asm 2: mulq 136(<workp=%rdi)
- mulq 136(%rdi)
- # qhasm: carry? xq4 += squarerax
- # asm 1: add <squarerax=int64#7,<xq4=int64#12
- # asm 2: add <squarerax=%rax,<xq4=%r14
- add %rax,%r14
- # qhasm: squarer41 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer41=int64#13
- # asm 2: adc <squarerdx=%rdx,<squarer41=%r15
- adc %rdx,%r15
- # qhasm: squarerax = *(uint64 *)(workp + 136)
- # asm 1: movq 136(<workp=int64#1),>squarerax=int64#3
- # asm 2: movq 136(<workp=%rdi),>squarerax=%rdx
- movq 136(%rdi),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 144)
- # asm 1: mulq 144(<workp=int64#1)
- # asm 2: mulq 144(<workp=%rdi)
- mulq 144(%rdi)
- # qhasm: carry? xq0 += squarerax
- # asm 1: add <squarerax=int64#7,<xq0=int64#2
- # asm 2: add <squarerax=%rax,<xq0=%rsi
- add %rax,%rsi
- # qhasm: squarer01 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer01=int64#4
- # asm 2: adc <squarerdx=%rdx,<squarer01=%rcx
- adc %rdx,%rcx
- # qhasm: squarerax = *(uint64 *)(workp + 136)
- # asm 1: movq 136(<workp=int64#1),>squarerax=int64#3
- # asm 2: movq 136(<workp=%rdi),>squarerax=%rdx
- movq 136(%rdi),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 152)
- # asm 1: mulq 152(<workp=int64#1)
- # asm 2: mulq 152(<workp=%rdi)
- mulq 152(%rdi)
- # qhasm: carry? xq1 += squarerax
- # asm 1: add <squarerax=int64#7,<xq1=int64#5
- # asm 2: add <squarerax=%rax,<xq1=%r8
- add %rax,%r8
- # qhasm: squarer11 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer11=int64#6
- # asm 2: adc <squarerdx=%rdx,<squarer11=%r9
- adc %rdx,%r9
- # qhasm: squarerax = *(uint64 *)(workp + 144)
- # asm 1: movq 144(<workp=int64#1),>squarerax=int64#3
- # asm 2: movq 144(<workp=%rdi),>squarerax=%rdx
- movq 144(%rdi),%rdx
- # qhasm: squarerax *= 19
- # asm 1: imulq $19,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $19,<squarerax=%rdx,>squarerax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 144)
- # asm 1: mulq 144(<workp=int64#1)
- # asm 2: mulq 144(<workp=%rdi)
- mulq 144(%rdi)
- # qhasm: carry? xq1 += squarerax
- # asm 1: add <squarerax=int64#7,<xq1=int64#5
- # asm 2: add <squarerax=%rax,<xq1=%r8
- add %rax,%r8
- # qhasm: squarer11 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer11=int64#6
- # asm 2: adc <squarerdx=%rdx,<squarer11=%r9
- adc %rdx,%r9
- # qhasm: squarerax = *(uint64 *)(workp + 144)
- # asm 1: movq 144(<workp=int64#1),>squarerax=int64#3
- # asm 2: movq 144(<workp=%rdi),>squarerax=%rdx
- movq 144(%rdi),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 152)
- # asm 1: mulq 152(<workp=int64#1)
- # asm 2: mulq 152(<workp=%rdi)
- mulq 152(%rdi)
- # qhasm: carry? xq2 += squarerax
- # asm 1: add <squarerax=int64#7,<xq2=int64#8
- # asm 2: add <squarerax=%rax,<xq2=%r10
- add %rax,%r10
- # qhasm: squarer21 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer21=int64#9
- # asm 2: adc <squarerdx=%rdx,<squarer21=%r11
- adc %rdx,%r11
- # qhasm: squarerax = *(uint64 *)(workp + 152)
- # asm 1: movq 152(<workp=int64#1),>squarerax=int64#3
- # asm 2: movq 152(<workp=%rdi),>squarerax=%rdx
- movq 152(%rdi),%rdx
- # qhasm: squarerax *= 19
- # asm 1: imulq $19,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $19,<squarerax=%rdx,>squarerax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 152)
- # asm 1: mulq 152(<workp=int64#1)
- # asm 2: mulq 152(<workp=%rdi)
- mulq 152(%rdi)
- # qhasm: carry? xq3 += squarerax
- # asm 1: add <squarerax=int64#7,<xq3=int64#10
- # asm 2: add <squarerax=%rax,<xq3=%r12
- add %rax,%r12
- # qhasm: squarer31 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer31=int64#11
- # asm 2: adc <squarerdx=%rdx,<squarer31=%r13
- adc %rdx,%r13
- # qhasm: squareredmask = *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_REDMASK51
- # asm 1: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>squareredmask=int64#3
- # asm 2: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>squareredmask=%rdx
- movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,%rdx
- # qhasm: squarer01 = (squarer01.xq0) << 13
- # asm 1: shld $13,<xq0=int64#2,<squarer01=int64#4
- # asm 2: shld $13,<xq0=%rsi,<squarer01=%rcx
- shld $13,%rsi,%rcx
- # qhasm: xq0 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<xq0=int64#2
- # asm 2: and <squareredmask=%rdx,<xq0=%rsi
- and %rdx,%rsi
- # qhasm: squarer11 = (squarer11.xq1) << 13
- # asm 1: shld $13,<xq1=int64#5,<squarer11=int64#6
- # asm 2: shld $13,<xq1=%r8,<squarer11=%r9
- shld $13,%r8,%r9
- # qhasm: xq1 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<xq1=int64#5
- # asm 2: and <squareredmask=%rdx,<xq1=%r8
- and %rdx,%r8
- # qhasm: xq1 += squarer01
- # asm 1: add <squarer01=int64#4,<xq1=int64#5
- # asm 2: add <squarer01=%rcx,<xq1=%r8
- add %rcx,%r8
- # qhasm: squarer21 = (squarer21.xq2) << 13
- # asm 1: shld $13,<xq2=int64#8,<squarer21=int64#9
- # asm 2: shld $13,<xq2=%r10,<squarer21=%r11
- shld $13,%r10,%r11
- # qhasm: xq2 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<xq2=int64#8
- # asm 2: and <squareredmask=%rdx,<xq2=%r10
- and %rdx,%r10
- # qhasm: xq2 += squarer11
- # asm 1: add <squarer11=int64#6,<xq2=int64#8
- # asm 2: add <squarer11=%r9,<xq2=%r10
- add %r9,%r10
- # qhasm: squarer31 = (squarer31.xq3) << 13
- # asm 1: shld $13,<xq3=int64#10,<squarer31=int64#11
- # asm 2: shld $13,<xq3=%r12,<squarer31=%r13
- shld $13,%r12,%r13
- # qhasm: xq3 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<xq3=int64#10
- # asm 2: and <squareredmask=%rdx,<xq3=%r12
- and %rdx,%r12
- # qhasm: xq3 += squarer21
- # asm 1: add <squarer21=int64#9,<xq3=int64#10
- # asm 2: add <squarer21=%r11,<xq3=%r12
- add %r11,%r12
- # qhasm: squarer41 = (squarer41.xq4) << 13
- # asm 1: shld $13,<xq4=int64#12,<squarer41=int64#13
- # asm 2: shld $13,<xq4=%r14,<squarer41=%r15
- shld $13,%r14,%r15
- # qhasm: xq4 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<xq4=int64#12
- # asm 2: and <squareredmask=%rdx,<xq4=%r14
- and %rdx,%r14
- # qhasm: xq4 += squarer31
- # asm 1: add <squarer31=int64#11,<xq4=int64#12
- # asm 2: add <squarer31=%r13,<xq4=%r14
- add %r13,%r14
- # qhasm: squarer41 = squarer41 * 19
- # asm 1: imulq $19,<squarer41=int64#13,>squarer41=int64#4
- # asm 2: imulq $19,<squarer41=%r15,>squarer41=%rcx
- imulq $19,%r15,%rcx
- # qhasm: xq0 += squarer41
- # asm 1: add <squarer41=int64#4,<xq0=int64#2
- # asm 2: add <squarer41=%rcx,<xq0=%rsi
- add %rcx,%rsi
- # qhasm: squaret = xq0
- # asm 1: mov <xq0=int64#2,>squaret=int64#4
- # asm 2: mov <xq0=%rsi,>squaret=%rcx
- mov %rsi,%rcx
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += xq1
- # asm 1: add <xq1=int64#5,<squaret=int64#4
- # asm 2: add <xq1=%r8,<squaret=%rcx
- add %r8,%rcx
- # qhasm: xq0 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<xq0=int64#2
- # asm 2: and <squareredmask=%rdx,<xq0=%rsi
- and %rdx,%rsi
- # qhasm: xq1 = squaret
- # asm 1: mov <squaret=int64#4,>xq1=int64#5
- # asm 2: mov <squaret=%rcx,>xq1=%r8
- mov %rcx,%r8
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += xq2
- # asm 1: add <xq2=int64#8,<squaret=int64#4
- # asm 2: add <xq2=%r10,<squaret=%rcx
- add %r10,%rcx
- # qhasm: xq1 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<xq1=int64#5
- # asm 2: and <squareredmask=%rdx,<xq1=%r8
- and %rdx,%r8
- # qhasm: xq2 = squaret
- # asm 1: mov <squaret=int64#4,>xq2=int64#6
- # asm 2: mov <squaret=%rcx,>xq2=%r9
- mov %rcx,%r9
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += xq3
- # asm 1: add <xq3=int64#10,<squaret=int64#4
- # asm 2: add <xq3=%r12,<squaret=%rcx
- add %r12,%rcx
- # qhasm: xq2 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<xq2=int64#6
- # asm 2: and <squareredmask=%rdx,<xq2=%r9
- and %rdx,%r9
- # qhasm: xq3 = squaret
- # asm 1: mov <squaret=int64#4,>xq3=int64#7
- # asm 2: mov <squaret=%rcx,>xq3=%rax
- mov %rcx,%rax
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += xq4
- # asm 1: add <xq4=int64#12,<squaret=int64#4
- # asm 2: add <xq4=%r14,<squaret=%rcx
- add %r14,%rcx
- # qhasm: xq3 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<xq3=int64#7
- # asm 2: and <squareredmask=%rdx,<xq3=%rax
- and %rdx,%rax
- # qhasm: xq4 = squaret
- # asm 1: mov <squaret=int64#4,>xq4=int64#8
- # asm 2: mov <squaret=%rcx,>xq4=%r10
- mov %rcx,%r10
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret *= 19
- # asm 1: imulq $19,<squaret=int64#4,>squaret=int64#4
- # asm 2: imulq $19,<squaret=%rcx,>squaret=%rcx
- imulq $19,%rcx,%rcx
- # qhasm: xq0 += squaret
- # asm 1: add <squaret=int64#4,<xq0=int64#2
- # asm 2: add <squaret=%rcx,<xq0=%rsi
- add %rcx,%rsi
- # qhasm: xq4 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<xq4=int64#8
- # asm 2: and <squareredmask=%rdx,<xq4=%r10
- and %rdx,%r10
- # qhasm: *(uint64 *)(workp + 120) = xq0
- # asm 1: movq <xq0=int64#2,120(<workp=int64#1)
- # asm 2: movq <xq0=%rsi,120(<workp=%rdi)
- movq %rsi,120(%rdi)
- # qhasm: *(uint64 *)(workp + 128) = xq1
- # asm 1: movq <xq1=int64#5,128(<workp=int64#1)
- # asm 2: movq <xq1=%r8,128(<workp=%rdi)
- movq %r8,128(%rdi)
- # qhasm: *(uint64 *)(workp + 136) = xq2
- # asm 1: movq <xq2=int64#6,136(<workp=int64#1)
- # asm 2: movq <xq2=%r9,136(<workp=%rdi)
- movq %r9,136(%rdi)
- # qhasm: *(uint64 *)(workp + 144) = xq3
- # asm 1: movq <xq3=int64#7,144(<workp=int64#1)
- # asm 2: movq <xq3=%rax,144(<workp=%rdi)
- movq %rax,144(%rdi)
- # qhasm: *(uint64 *)(workp + 152) = xq4
- # asm 1: movq <xq4=int64#8,152(<workp=int64#1)
- # asm 2: movq <xq4=%r10,152(<workp=%rdi)
- movq %r10,152(%rdi)
- # qhasm: squarerax = *(uint64 *)(workp + 160)
- # asm 1: movq 160(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 160(<workp=%rdi),>squarerax=%rax
- movq 160(%rdi),%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 160)
- # asm 1: mulq 160(<workp=int64#1)
- # asm 2: mulq 160(<workp=%rdi)
- mulq 160(%rdi)
- # qhasm: zq0 = squarerax
- # asm 1: mov <squarerax=int64#7,>zq0=int64#2
- # asm 2: mov <squarerax=%rax,>zq0=%rsi
- mov %rax,%rsi
- # qhasm: squarer01 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer01=int64#4
- # asm 2: mov <squarerdx=%rdx,>squarer01=%rcx
- mov %rdx,%rcx
- # qhasm: squarerax = *(uint64 *)(workp + 160)
- # asm 1: movq 160(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 160(<workp=%rdi),>squarerax=%rax
- movq 160(%rdi),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 168)
- # asm 1: mulq 168(<workp=int64#1)
- # asm 2: mulq 168(<workp=%rdi)
- mulq 168(%rdi)
- # qhasm: zq1 = squarerax
- # asm 1: mov <squarerax=int64#7,>zq1=int64#5
- # asm 2: mov <squarerax=%rax,>zq1=%r8
- mov %rax,%r8
- # qhasm: squarer11 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer11=int64#6
- # asm 2: mov <squarerdx=%rdx,>squarer11=%r9
- mov %rdx,%r9
- # qhasm: squarerax = *(uint64 *)(workp + 160)
- # asm 1: movq 160(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 160(<workp=%rdi),>squarerax=%rax
- movq 160(%rdi),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 176)
- # asm 1: mulq 176(<workp=int64#1)
- # asm 2: mulq 176(<workp=%rdi)
- mulq 176(%rdi)
- # qhasm: zq2 = squarerax
- # asm 1: mov <squarerax=int64#7,>zq2=int64#8
- # asm 2: mov <squarerax=%rax,>zq2=%r10
- mov %rax,%r10
- # qhasm: squarer21 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer21=int64#9
- # asm 2: mov <squarerdx=%rdx,>squarer21=%r11
- mov %rdx,%r11
- # qhasm: squarerax = *(uint64 *)(workp + 160)
- # asm 1: movq 160(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 160(<workp=%rdi),>squarerax=%rax
- movq 160(%rdi),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 184)
- # asm 1: mulq 184(<workp=int64#1)
- # asm 2: mulq 184(<workp=%rdi)
- mulq 184(%rdi)
- # qhasm: zq3 = squarerax
- # asm 1: mov <squarerax=int64#7,>zq3=int64#10
- # asm 2: mov <squarerax=%rax,>zq3=%r12
- mov %rax,%r12
- # qhasm: squarer31 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer31=int64#11
- # asm 2: mov <squarerdx=%rdx,>squarer31=%r13
- mov %rdx,%r13
- # qhasm: squarerax = *(uint64 *)(workp + 160)
- # asm 1: movq 160(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 160(<workp=%rdi),>squarerax=%rax
- movq 160(%rdi),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 192)
- # asm 1: mulq 192(<workp=int64#1)
- # asm 2: mulq 192(<workp=%rdi)
- mulq 192(%rdi)
- # qhasm: zq4 = squarerax
- # asm 1: mov <squarerax=int64#7,>zq4=int64#12
- # asm 2: mov <squarerax=%rax,>zq4=%r14
- mov %rax,%r14
- # qhasm: squarer41 = squarerdx
- # asm 1: mov <squarerdx=int64#3,>squarer41=int64#13
- # asm 2: mov <squarerdx=%rdx,>squarer41=%r15
- mov %rdx,%r15
- # qhasm: squarerax = *(uint64 *)(workp + 168)
- # asm 1: movq 168(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 168(<workp=%rdi),>squarerax=%rax
- movq 168(%rdi),%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 168)
- # asm 1: mulq 168(<workp=int64#1)
- # asm 2: mulq 168(<workp=%rdi)
- mulq 168(%rdi)
- # qhasm: carry? zq2 += squarerax
- # asm 1: add <squarerax=int64#7,<zq2=int64#8
- # asm 2: add <squarerax=%rax,<zq2=%r10
- add %rax,%r10
- # qhasm: squarer21 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer21=int64#9
- # asm 2: adc <squarerdx=%rdx,<squarer21=%r11
- adc %rdx,%r11
- # qhasm: squarerax = *(uint64 *)(workp + 168)
- # asm 1: movq 168(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 168(<workp=%rdi),>squarerax=%rax
- movq 168(%rdi),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 176)
- # asm 1: mulq 176(<workp=int64#1)
- # asm 2: mulq 176(<workp=%rdi)
- mulq 176(%rdi)
- # qhasm: carry? zq3 += squarerax
- # asm 1: add <squarerax=int64#7,<zq3=int64#10
- # asm 2: add <squarerax=%rax,<zq3=%r12
- add %rax,%r12
- # qhasm: squarer31 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer31=int64#11
- # asm 2: adc <squarerdx=%rdx,<squarer31=%r13
- adc %rdx,%r13
- # qhasm: squarerax = *(uint64 *)(workp + 168)
- # asm 1: movq 168(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 168(<workp=%rdi),>squarerax=%rax
- movq 168(%rdi),%rax
- # qhasm: squarerax <<= 1
- # asm 1: shl $1,<squarerax=int64#7
- # asm 2: shl $1,<squarerax=%rax
- shl $1,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 184)
- # asm 1: mulq 184(<workp=int64#1)
- # asm 2: mulq 184(<workp=%rdi)
- mulq 184(%rdi)
- # qhasm: carry? zq4 += squarerax
- # asm 1: add <squarerax=int64#7,<zq4=int64#12
- # asm 2: add <squarerax=%rax,<zq4=%r14
- add %rax,%r14
- # qhasm: squarer41 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer41=int64#13
- # asm 2: adc <squarerdx=%rdx,<squarer41=%r15
- adc %rdx,%r15
- # qhasm: squarerax = *(uint64 *)(workp + 168)
- # asm 1: movq 168(<workp=int64#1),>squarerax=int64#3
- # asm 2: movq 168(<workp=%rdi),>squarerax=%rdx
- movq 168(%rdi),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 192)
- # asm 1: mulq 192(<workp=int64#1)
- # asm 2: mulq 192(<workp=%rdi)
- mulq 192(%rdi)
- # qhasm: carry? zq0 += squarerax
- # asm 1: add <squarerax=int64#7,<zq0=int64#2
- # asm 2: add <squarerax=%rax,<zq0=%rsi
- add %rax,%rsi
- # qhasm: squarer01 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer01=int64#4
- # asm 2: adc <squarerdx=%rdx,<squarer01=%rcx
- adc %rdx,%rcx
- # qhasm: squarerax = *(uint64 *)(workp + 176)
- # asm 1: movq 176(<workp=int64#1),>squarerax=int64#7
- # asm 2: movq 176(<workp=%rdi),>squarerax=%rax
- movq 176(%rdi),%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 176)
- # asm 1: mulq 176(<workp=int64#1)
- # asm 2: mulq 176(<workp=%rdi)
- mulq 176(%rdi)
- # qhasm: carry? zq4 += squarerax
- # asm 1: add <squarerax=int64#7,<zq4=int64#12
- # asm 2: add <squarerax=%rax,<zq4=%r14
- add %rax,%r14
- # qhasm: squarer41 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer41=int64#13
- # asm 2: adc <squarerdx=%rdx,<squarer41=%r15
- adc %rdx,%r15
- # qhasm: squarerax = *(uint64 *)(workp + 176)
- # asm 1: movq 176(<workp=int64#1),>squarerax=int64#3
- # asm 2: movq 176(<workp=%rdi),>squarerax=%rdx
- movq 176(%rdi),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 184)
- # asm 1: mulq 184(<workp=int64#1)
- # asm 2: mulq 184(<workp=%rdi)
- mulq 184(%rdi)
- # qhasm: carry? zq0 += squarerax
- # asm 1: add <squarerax=int64#7,<zq0=int64#2
- # asm 2: add <squarerax=%rax,<zq0=%rsi
- add %rax,%rsi
- # qhasm: squarer01 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer01=int64#4
- # asm 2: adc <squarerdx=%rdx,<squarer01=%rcx
- adc %rdx,%rcx
- # qhasm: squarerax = *(uint64 *)(workp + 176)
- # asm 1: movq 176(<workp=int64#1),>squarerax=int64#3
- # asm 2: movq 176(<workp=%rdi),>squarerax=%rdx
- movq 176(%rdi),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 192)
- # asm 1: mulq 192(<workp=int64#1)
- # asm 2: mulq 192(<workp=%rdi)
- mulq 192(%rdi)
- # qhasm: carry? zq1 += squarerax
- # asm 1: add <squarerax=int64#7,<zq1=int64#5
- # asm 2: add <squarerax=%rax,<zq1=%r8
- add %rax,%r8
- # qhasm: squarer11 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer11=int64#6
- # asm 2: adc <squarerdx=%rdx,<squarer11=%r9
- adc %rdx,%r9
- # qhasm: squarerax = *(uint64 *)(workp + 184)
- # asm 1: movq 184(<workp=int64#1),>squarerax=int64#3
- # asm 2: movq 184(<workp=%rdi),>squarerax=%rdx
- movq 184(%rdi),%rdx
- # qhasm: squarerax *= 19
- # asm 1: imulq $19,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $19,<squarerax=%rdx,>squarerax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 184)
- # asm 1: mulq 184(<workp=int64#1)
- # asm 2: mulq 184(<workp=%rdi)
- mulq 184(%rdi)
- # qhasm: carry? zq1 += squarerax
- # asm 1: add <squarerax=int64#7,<zq1=int64#5
- # asm 2: add <squarerax=%rax,<zq1=%r8
- add %rax,%r8
- # qhasm: squarer11 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer11=int64#6
- # asm 2: adc <squarerdx=%rdx,<squarer11=%r9
- adc %rdx,%r9
- # qhasm: squarerax = *(uint64 *)(workp + 184)
- # asm 1: movq 184(<workp=int64#1),>squarerax=int64#3
- # asm 2: movq 184(<workp=%rdi),>squarerax=%rdx
- movq 184(%rdi),%rdx
- # qhasm: squarerax *= 38
- # asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
- imulq $38,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 192)
- # asm 1: mulq 192(<workp=int64#1)
- # asm 2: mulq 192(<workp=%rdi)
- mulq 192(%rdi)
- # qhasm: carry? zq2 += squarerax
- # asm 1: add <squarerax=int64#7,<zq2=int64#8
- # asm 2: add <squarerax=%rax,<zq2=%r10
- add %rax,%r10
- # qhasm: squarer21 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer21=int64#9
- # asm 2: adc <squarerdx=%rdx,<squarer21=%r11
- adc %rdx,%r11
- # qhasm: squarerax = *(uint64 *)(workp + 192)
- # asm 1: movq 192(<workp=int64#1),>squarerax=int64#3
- # asm 2: movq 192(<workp=%rdi),>squarerax=%rdx
- movq 192(%rdi),%rdx
- # qhasm: squarerax *= 19
- # asm 1: imulq $19,<squarerax=int64#3,>squarerax=int64#7
- # asm 2: imulq $19,<squarerax=%rdx,>squarerax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(workp + 192)
- # asm 1: mulq 192(<workp=int64#1)
- # asm 2: mulq 192(<workp=%rdi)
- mulq 192(%rdi)
- # qhasm: carry? zq3 += squarerax
- # asm 1: add <squarerax=int64#7,<zq3=int64#10
- # asm 2: add <squarerax=%rax,<zq3=%r12
- add %rax,%r12
- # qhasm: squarer31 += squarerdx + carry
- # asm 1: adc <squarerdx=int64#3,<squarer31=int64#11
- # asm 2: adc <squarerdx=%rdx,<squarer31=%r13
- adc %rdx,%r13
- # qhasm: squareredmask = *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_REDMASK51
- # asm 1: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>squareredmask=int64#3
- # asm 2: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>squareredmask=%rdx
- movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,%rdx
- # qhasm: squarer01 = (squarer01.zq0) << 13
- # asm 1: shld $13,<zq0=int64#2,<squarer01=int64#4
- # asm 2: shld $13,<zq0=%rsi,<squarer01=%rcx
- shld $13,%rsi,%rcx
- # qhasm: zq0 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<zq0=int64#2
- # asm 2: and <squareredmask=%rdx,<zq0=%rsi
- and %rdx,%rsi
- # qhasm: squarer11 = (squarer11.zq1) << 13
- # asm 1: shld $13,<zq1=int64#5,<squarer11=int64#6
- # asm 2: shld $13,<zq1=%r8,<squarer11=%r9
- shld $13,%r8,%r9
- # qhasm: zq1 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<zq1=int64#5
- # asm 2: and <squareredmask=%rdx,<zq1=%r8
- and %rdx,%r8
- # qhasm: zq1 += squarer01
- # asm 1: add <squarer01=int64#4,<zq1=int64#5
- # asm 2: add <squarer01=%rcx,<zq1=%r8
- add %rcx,%r8
- # qhasm: squarer21 = (squarer21.zq2) << 13
- # asm 1: shld $13,<zq2=int64#8,<squarer21=int64#9
- # asm 2: shld $13,<zq2=%r10,<squarer21=%r11
- shld $13,%r10,%r11
- # qhasm: zq2 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<zq2=int64#8
- # asm 2: and <squareredmask=%rdx,<zq2=%r10
- and %rdx,%r10
- # qhasm: zq2 += squarer11
- # asm 1: add <squarer11=int64#6,<zq2=int64#8
- # asm 2: add <squarer11=%r9,<zq2=%r10
- add %r9,%r10
- # qhasm: squarer31 = (squarer31.zq3) << 13
- # asm 1: shld $13,<zq3=int64#10,<squarer31=int64#11
- # asm 2: shld $13,<zq3=%r12,<squarer31=%r13
- shld $13,%r12,%r13
- # qhasm: zq3 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<zq3=int64#10
- # asm 2: and <squareredmask=%rdx,<zq3=%r12
- and %rdx,%r12
- # qhasm: zq3 += squarer21
- # asm 1: add <squarer21=int64#9,<zq3=int64#10
- # asm 2: add <squarer21=%r11,<zq3=%r12
- add %r11,%r12
- # qhasm: squarer41 = (squarer41.zq4) << 13
- # asm 1: shld $13,<zq4=int64#12,<squarer41=int64#13
- # asm 2: shld $13,<zq4=%r14,<squarer41=%r15
- shld $13,%r14,%r15
- # qhasm: zq4 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<zq4=int64#12
- # asm 2: and <squareredmask=%rdx,<zq4=%r14
- and %rdx,%r14
- # qhasm: zq4 += squarer31
- # asm 1: add <squarer31=int64#11,<zq4=int64#12
- # asm 2: add <squarer31=%r13,<zq4=%r14
- add %r13,%r14
- # qhasm: squarer41 = squarer41 * 19
- # asm 1: imulq $19,<squarer41=int64#13,>squarer41=int64#4
- # asm 2: imulq $19,<squarer41=%r15,>squarer41=%rcx
- imulq $19,%r15,%rcx
- # qhasm: zq0 += squarer41
- # asm 1: add <squarer41=int64#4,<zq0=int64#2
- # asm 2: add <squarer41=%rcx,<zq0=%rsi
- add %rcx,%rsi
- # qhasm: squaret = zq0
- # asm 1: mov <zq0=int64#2,>squaret=int64#4
- # asm 2: mov <zq0=%rsi,>squaret=%rcx
- mov %rsi,%rcx
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += zq1
- # asm 1: add <zq1=int64#5,<squaret=int64#4
- # asm 2: add <zq1=%r8,<squaret=%rcx
- add %r8,%rcx
- # qhasm: zq0 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<zq0=int64#2
- # asm 2: and <squareredmask=%rdx,<zq0=%rsi
- and %rdx,%rsi
- # qhasm: zq1 = squaret
- # asm 1: mov <squaret=int64#4,>zq1=int64#5
- # asm 2: mov <squaret=%rcx,>zq1=%r8
- mov %rcx,%r8
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += zq2
- # asm 1: add <zq2=int64#8,<squaret=int64#4
- # asm 2: add <zq2=%r10,<squaret=%rcx
- add %r10,%rcx
- # qhasm: zq1 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<zq1=int64#5
- # asm 2: and <squareredmask=%rdx,<zq1=%r8
- and %rdx,%r8
- # qhasm: zq2 = squaret
- # asm 1: mov <squaret=int64#4,>zq2=int64#6
- # asm 2: mov <squaret=%rcx,>zq2=%r9
- mov %rcx,%r9
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += zq3
- # asm 1: add <zq3=int64#10,<squaret=int64#4
- # asm 2: add <zq3=%r12,<squaret=%rcx
- add %r12,%rcx
- # qhasm: zq2 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<zq2=int64#6
- # asm 2: and <squareredmask=%rdx,<zq2=%r9
- and %rdx,%r9
- # qhasm: zq3 = squaret
- # asm 1: mov <squaret=int64#4,>zq3=int64#7
- # asm 2: mov <squaret=%rcx,>zq3=%rax
- mov %rcx,%rax
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret += zq4
- # asm 1: add <zq4=int64#12,<squaret=int64#4
- # asm 2: add <zq4=%r14,<squaret=%rcx
- add %r14,%rcx
- # qhasm: zq3 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<zq3=int64#7
- # asm 2: and <squareredmask=%rdx,<zq3=%rax
- and %rdx,%rax
- # qhasm: zq4 = squaret
- # asm 1: mov <squaret=int64#4,>zq4=int64#8
- # asm 2: mov <squaret=%rcx,>zq4=%r10
- mov %rcx,%r10
- # qhasm: (uint64) squaret >>= 51
- # asm 1: shr $51,<squaret=int64#4
- # asm 2: shr $51,<squaret=%rcx
- shr $51,%rcx
- # qhasm: squaret *= 19
- # asm 1: imulq $19,<squaret=int64#4,>squaret=int64#4
- # asm 2: imulq $19,<squaret=%rcx,>squaret=%rcx
- imulq $19,%rcx,%rcx
- # qhasm: zq0 += squaret
- # asm 1: add <squaret=int64#4,<zq0=int64#2
- # asm 2: add <squaret=%rcx,<zq0=%rsi
- add %rcx,%rsi
- # qhasm: zq4 &= squareredmask
- # asm 1: and <squareredmask=int64#3,<zq4=int64#8
- # asm 2: and <squareredmask=%rdx,<zq4=%r10
- and %rdx,%r10
- # qhasm: *(uint64 *)(workp + 160) = zq0
- # asm 1: movq <zq0=int64#2,160(<workp=int64#1)
- # asm 2: movq <zq0=%rsi,160(<workp=%rdi)
- movq %rsi,160(%rdi)
- # qhasm: *(uint64 *)(workp + 168) = zq1
- # asm 1: movq <zq1=int64#5,168(<workp=int64#1)
- # asm 2: movq <zq1=%r8,168(<workp=%rdi)
- movq %r8,168(%rdi)
- # qhasm: *(uint64 *)(workp + 176) = zq2
- # asm 1: movq <zq2=int64#6,176(<workp=int64#1)
- # asm 2: movq <zq2=%r9,176(<workp=%rdi)
- movq %r9,176(%rdi)
- # qhasm: *(uint64 *)(workp + 184) = zq3
- # asm 1: movq <zq3=int64#7,184(<workp=int64#1)
- # asm 2: movq <zq3=%rax,184(<workp=%rdi)
- movq %rax,184(%rdi)
- # qhasm: *(uint64 *)(workp + 192) = zq4
- # asm 1: movq <zq4=int64#8,192(<workp=int64#1)
- # asm 2: movq <zq4=%r10,192(<workp=%rdi)
- movq %r10,192(%rdi)
- # qhasm: mulrax = *(uint64 *)(workp + 184)
- # asm 1: movq 184(<workp=int64#1),>mulrax=int64#2
- # asm 2: movq 184(<workp=%rdi),>mulrax=%rsi
- movq 184(%rdi),%rsi
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#2,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rsi,>mulrax=%rax
- imulq $19,%rsi,%rax
- # qhasm: mulx319_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
- # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
- movq %rax,56(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 16)
- # asm 1: mulq 16(<workp=int64#1)
- # asm 2: mulq 16(<workp=%rdi)
- mulq 16(%rdi)
- # qhasm: zq0 = mulrax
- # asm 1: mov <mulrax=int64#7,>zq0=int64#2
- # asm 2: mov <mulrax=%rax,>zq0=%rsi
- mov %rax,%rsi
- # qhasm: mulr01 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr01=int64#4
- # asm 2: mov <mulrdx=%rdx,>mulr01=%rcx
- mov %rdx,%rcx
- # qhasm: mulrax = *(uint64 *)(workp + 192)
- # asm 1: movq 192(<workp=int64#1),>mulrax=int64#3
- # asm 2: movq 192(<workp=%rdi),>mulrax=%rdx
- movq 192(%rdi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: mulx419_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
- # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
- movq %rax,64(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 8)
- # asm 1: mulq 8(<workp=int64#1)
- # asm 2: mulq 8(<workp=%rdi)
- mulq 8(%rdi)
- # qhasm: carry? zq0 += mulrax
- # asm 1: add <mulrax=int64#7,<zq0=int64#2
- # asm 2: add <mulrax=%rax,<zq0=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = *(uint64 *)(workp + 160)
- # asm 1: movq 160(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 160(<workp=%rdi),>mulrax=%rax
- movq 160(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 0)
- # asm 1: mulq 0(<workp=int64#1)
- # asm 2: mulq 0(<workp=%rdi)
- mulq 0(%rdi)
- # qhasm: carry? zq0 += mulrax
- # asm 1: add <mulrax=int64#7,<zq0=int64#2
- # asm 2: add <mulrax=%rax,<zq0=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = *(uint64 *)(workp + 160)
- # asm 1: movq 160(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 160(<workp=%rdi),>mulrax=%rax
- movq 160(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 8)
- # asm 1: mulq 8(<workp=int64#1)
- # asm 2: mulq 8(<workp=%rdi)
- mulq 8(%rdi)
- # qhasm: zq1 = mulrax
- # asm 1: mov <mulrax=int64#7,>zq1=int64#5
- # asm 2: mov <mulrax=%rax,>zq1=%r8
- mov %rax,%r8
- # qhasm: mulr11 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr11=int64#6
- # asm 2: mov <mulrdx=%rdx,>mulr11=%r9
- mov %rdx,%r9
- # qhasm: mulrax = *(uint64 *)(workp + 160)
- # asm 1: movq 160(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 160(<workp=%rdi),>mulrax=%rax
- movq 160(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 16)
- # asm 1: mulq 16(<workp=int64#1)
- # asm 2: mulq 16(<workp=%rdi)
- mulq 16(%rdi)
- # qhasm: zq2 = mulrax
- # asm 1: mov <mulrax=int64#7,>zq2=int64#8
- # asm 2: mov <mulrax=%rax,>zq2=%r10
- mov %rax,%r10
- # qhasm: mulr21 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr21=int64#9
- # asm 2: mov <mulrdx=%rdx,>mulr21=%r11
- mov %rdx,%r11
- # qhasm: mulrax = *(uint64 *)(workp + 160)
- # asm 1: movq 160(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 160(<workp=%rdi),>mulrax=%rax
- movq 160(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 24)
- # asm 1: mulq 24(<workp=int64#1)
- # asm 2: mulq 24(<workp=%rdi)
- mulq 24(%rdi)
- # qhasm: zq3 = mulrax
- # asm 1: mov <mulrax=int64#7,>zq3=int64#10
- # asm 2: mov <mulrax=%rax,>zq3=%r12
- mov %rax,%r12
- # qhasm: mulr31 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr31=int64#11
- # asm 2: mov <mulrdx=%rdx,>mulr31=%r13
- mov %rdx,%r13
- # qhasm: mulrax = *(uint64 *)(workp + 160)
- # asm 1: movq 160(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 160(<workp=%rdi),>mulrax=%rax
- movq 160(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 32)
- # asm 1: mulq 32(<workp=int64#1)
- # asm 2: mulq 32(<workp=%rdi)
- mulq 32(%rdi)
- # qhasm: zq4 = mulrax
- # asm 1: mov <mulrax=int64#7,>zq4=int64#12
- # asm 2: mov <mulrax=%rax,>zq4=%r14
- mov %rax,%r14
- # qhasm: mulr41 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr41=int64#13
- # asm 2: mov <mulrdx=%rdx,>mulr41=%r15
- mov %rdx,%r15
- # qhasm: mulrax = *(uint64 *)(workp + 168)
- # asm 1: movq 168(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 168(<workp=%rdi),>mulrax=%rax
- movq 168(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 0)
- # asm 1: mulq 0(<workp=int64#1)
- # asm 2: mulq 0(<workp=%rdi)
- mulq 0(%rdi)
- # qhasm: carry? zq1 += mulrax
- # asm 1: add <mulrax=int64#7,<zq1=int64#5
- # asm 2: add <mulrax=%rax,<zq1=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = *(uint64 *)(workp + 168)
- # asm 1: movq 168(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 168(<workp=%rdi),>mulrax=%rax
- movq 168(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 8)
- # asm 1: mulq 8(<workp=int64#1)
- # asm 2: mulq 8(<workp=%rdi)
- mulq 8(%rdi)
- # qhasm: carry? zq2 += mulrax
- # asm 1: add <mulrax=int64#7,<zq2=int64#8
- # asm 2: add <mulrax=%rax,<zq2=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = *(uint64 *)(workp + 168)
- # asm 1: movq 168(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 168(<workp=%rdi),>mulrax=%rax
- movq 168(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 16)
- # asm 1: mulq 16(<workp=int64#1)
- # asm 2: mulq 16(<workp=%rdi)
- mulq 16(%rdi)
- # qhasm: carry? zq3 += mulrax
- # asm 1: add <mulrax=int64#7,<zq3=int64#10
- # asm 2: add <mulrax=%rax,<zq3=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = *(uint64 *)(workp + 168)
- # asm 1: movq 168(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 168(<workp=%rdi),>mulrax=%rax
- movq 168(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 24)
- # asm 1: mulq 24(<workp=int64#1)
- # asm 2: mulq 24(<workp=%rdi)
- mulq 24(%rdi)
- # qhasm: carry? zq4 += mulrax
- # asm 1: add <mulrax=int64#7,<zq4=int64#12
- # asm 2: add <mulrax=%rax,<zq4=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = *(uint64 *)(workp + 168)
- # asm 1: movq 168(<workp=int64#1),>mulrax=int64#3
- # asm 2: movq 168(<workp=%rdi),>mulrax=%rdx
- movq 168(%rdi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 32)
- # asm 1: mulq 32(<workp=int64#1)
- # asm 2: mulq 32(<workp=%rdi)
- mulq 32(%rdi)
- # qhasm: carry? zq0 += mulrax
- # asm 1: add <mulrax=int64#7,<zq0=int64#2
- # asm 2: add <mulrax=%rax,<zq0=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = *(uint64 *)(workp + 176)
- # asm 1: movq 176(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 176(<workp=%rdi),>mulrax=%rax
- movq 176(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 0)
- # asm 1: mulq 0(<workp=int64#1)
- # asm 2: mulq 0(<workp=%rdi)
- mulq 0(%rdi)
- # qhasm: carry? zq2 += mulrax
- # asm 1: add <mulrax=int64#7,<zq2=int64#8
- # asm 2: add <mulrax=%rax,<zq2=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = *(uint64 *)(workp + 176)
- # asm 1: movq 176(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 176(<workp=%rdi),>mulrax=%rax
- movq 176(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 8)
- # asm 1: mulq 8(<workp=int64#1)
- # asm 2: mulq 8(<workp=%rdi)
- mulq 8(%rdi)
- # qhasm: carry? zq3 += mulrax
- # asm 1: add <mulrax=int64#7,<zq3=int64#10
- # asm 2: add <mulrax=%rax,<zq3=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = *(uint64 *)(workp + 176)
- # asm 1: movq 176(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 176(<workp=%rdi),>mulrax=%rax
- movq 176(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 16)
- # asm 1: mulq 16(<workp=int64#1)
- # asm 2: mulq 16(<workp=%rdi)
- mulq 16(%rdi)
- # qhasm: carry? zq4 += mulrax
- # asm 1: add <mulrax=int64#7,<zq4=int64#12
- # asm 2: add <mulrax=%rax,<zq4=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = *(uint64 *)(workp + 176)
- # asm 1: movq 176(<workp=int64#1),>mulrax=int64#3
- # asm 2: movq 176(<workp=%rdi),>mulrax=%rdx
- movq 176(%rdi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 24)
- # asm 1: mulq 24(<workp=int64#1)
- # asm 2: mulq 24(<workp=%rdi)
- mulq 24(%rdi)
- # qhasm: carry? zq0 += mulrax
- # asm 1: add <mulrax=int64#7,<zq0=int64#2
- # asm 2: add <mulrax=%rax,<zq0=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = *(uint64 *)(workp + 176)
- # asm 1: movq 176(<workp=int64#1),>mulrax=int64#3
- # asm 2: movq 176(<workp=%rdi),>mulrax=%rdx
- movq 176(%rdi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 32)
- # asm 1: mulq 32(<workp=int64#1)
- # asm 2: mulq 32(<workp=%rdi)
- mulq 32(%rdi)
- # qhasm: carry? zq1 += mulrax
- # asm 1: add <mulrax=int64#7,<zq1=int64#5
- # asm 2: add <mulrax=%rax,<zq1=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = *(uint64 *)(workp + 184)
- # asm 1: movq 184(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 184(<workp=%rdi),>mulrax=%rax
- movq 184(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 0)
- # asm 1: mulq 0(<workp=int64#1)
- # asm 2: mulq 0(<workp=%rdi)
- mulq 0(%rdi)
- # qhasm: carry? zq3 += mulrax
- # asm 1: add <mulrax=int64#7,<zq3=int64#10
- # asm 2: add <mulrax=%rax,<zq3=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = *(uint64 *)(workp + 184)
- # asm 1: movq 184(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 184(<workp=%rdi),>mulrax=%rax
- movq 184(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 8)
- # asm 1: mulq 8(<workp=int64#1)
- # asm 2: mulq 8(<workp=%rdi)
- mulq 8(%rdi)
- # qhasm: carry? zq4 += mulrax
- # asm 1: add <mulrax=int64#7,<zq4=int64#12
- # asm 2: add <mulrax=%rax,<zq4=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
- movq 56(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 24)
- # asm 1: mulq 24(<workp=int64#1)
- # asm 2: mulq 24(<workp=%rdi)
- mulq 24(%rdi)
- # qhasm: carry? zq1 += mulrax
- # asm 1: add <mulrax=int64#7,<zq1=int64#5
- # asm 2: add <mulrax=%rax,<zq1=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
- movq 56(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 32)
- # asm 1: mulq 32(<workp=int64#1)
- # asm 2: mulq 32(<workp=%rdi)
- mulq 32(%rdi)
- # qhasm: carry? zq2 += mulrax
- # asm 1: add <mulrax=int64#7,<zq2=int64#8
- # asm 2: add <mulrax=%rax,<zq2=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = *(uint64 *)(workp + 192)
- # asm 1: movq 192(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 192(<workp=%rdi),>mulrax=%rax
- movq 192(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 0)
- # asm 1: mulq 0(<workp=int64#1)
- # asm 2: mulq 0(<workp=%rdi)
- mulq 0(%rdi)
- # qhasm: carry? zq4 += mulrax
- # asm 1: add <mulrax=int64#7,<zq4=int64#12
- # asm 2: add <mulrax=%rax,<zq4=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 16)
- # asm 1: mulq 16(<workp=int64#1)
- # asm 2: mulq 16(<workp=%rdi)
- mulq 16(%rdi)
- # qhasm: carry? zq1 += mulrax
- # asm 1: add <mulrax=int64#7,<zq1=int64#5
- # asm 2: add <mulrax=%rax,<zq1=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 24)
- # asm 1: mulq 24(<workp=int64#1)
- # asm 2: mulq 24(<workp=%rdi)
- mulq 24(%rdi)
- # qhasm: carry? zq2 += mulrax
- # asm 1: add <mulrax=int64#7,<zq2=int64#8
- # asm 2: add <mulrax=%rax,<zq2=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(workp + 32)
- # asm 1: mulq 32(<workp=int64#1)
- # asm 2: mulq 32(<workp=%rdi)
- mulq 32(%rdi)
- # qhasm: carry? zq3 += mulrax
- # asm 1: add <mulrax=int64#7,<zq3=int64#10
- # asm 2: add <mulrax=%rax,<zq3=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulredmask = *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_REDMASK51
- # asm 1: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>mulredmask=int64#3
- # asm 2: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>mulredmask=%rdx
- movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,%rdx
- # qhasm: mulr01 = (mulr01.zq0) << 13
- # asm 1: shld $13,<zq0=int64#2,<mulr01=int64#4
- # asm 2: shld $13,<zq0=%rsi,<mulr01=%rcx
- shld $13,%rsi,%rcx
- # qhasm: zq0 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zq0=int64#2
- # asm 2: and <mulredmask=%rdx,<zq0=%rsi
- and %rdx,%rsi
- # qhasm: mulr11 = (mulr11.zq1) << 13
- # asm 1: shld $13,<zq1=int64#5,<mulr11=int64#6
- # asm 2: shld $13,<zq1=%r8,<mulr11=%r9
- shld $13,%r8,%r9
- # qhasm: zq1 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zq1=int64#5
- # asm 2: and <mulredmask=%rdx,<zq1=%r8
- and %rdx,%r8
- # qhasm: zq1 += mulr01
- # asm 1: add <mulr01=int64#4,<zq1=int64#5
- # asm 2: add <mulr01=%rcx,<zq1=%r8
- add %rcx,%r8
- # qhasm: mulr21 = (mulr21.zq2) << 13
- # asm 1: shld $13,<zq2=int64#8,<mulr21=int64#9
- # asm 2: shld $13,<zq2=%r10,<mulr21=%r11
- shld $13,%r10,%r11
- # qhasm: zq2 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zq2=int64#8
- # asm 2: and <mulredmask=%rdx,<zq2=%r10
- and %rdx,%r10
- # qhasm: zq2 += mulr11
- # asm 1: add <mulr11=int64#6,<zq2=int64#8
- # asm 2: add <mulr11=%r9,<zq2=%r10
- add %r9,%r10
- # qhasm: mulr31 = (mulr31.zq3) << 13
- # asm 1: shld $13,<zq3=int64#10,<mulr31=int64#11
- # asm 2: shld $13,<zq3=%r12,<mulr31=%r13
- shld $13,%r12,%r13
- # qhasm: zq3 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zq3=int64#10
- # asm 2: and <mulredmask=%rdx,<zq3=%r12
- and %rdx,%r12
- # qhasm: zq3 += mulr21
- # asm 1: add <mulr21=int64#9,<zq3=int64#10
- # asm 2: add <mulr21=%r11,<zq3=%r12
- add %r11,%r12
- # qhasm: mulr41 = (mulr41.zq4) << 13
- # asm 1: shld $13,<zq4=int64#12,<mulr41=int64#13
- # asm 2: shld $13,<zq4=%r14,<mulr41=%r15
- shld $13,%r14,%r15
- # qhasm: zq4 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zq4=int64#12
- # asm 2: and <mulredmask=%rdx,<zq4=%r14
- and %rdx,%r14
- # qhasm: zq4 += mulr31
- # asm 1: add <mulr31=int64#11,<zq4=int64#12
- # asm 2: add <mulr31=%r13,<zq4=%r14
- add %r13,%r14
- # qhasm: mulr41 = mulr41 * 19
- # asm 1: imulq $19,<mulr41=int64#13,>mulr41=int64#4
- # asm 2: imulq $19,<mulr41=%r15,>mulr41=%rcx
- imulq $19,%r15,%rcx
- # qhasm: zq0 += mulr41
- # asm 1: add <mulr41=int64#4,<zq0=int64#2
- # asm 2: add <mulr41=%rcx,<zq0=%rsi
- add %rcx,%rsi
- # qhasm: mult = zq0
- # asm 1: mov <zq0=int64#2,>mult=int64#4
- # asm 2: mov <zq0=%rsi,>mult=%rcx
- mov %rsi,%rcx
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: mult += zq1
- # asm 1: add <zq1=int64#5,<mult=int64#4
- # asm 2: add <zq1=%r8,<mult=%rcx
- add %r8,%rcx
- # qhasm: zq1 = mult
- # asm 1: mov <mult=int64#4,>zq1=int64#5
- # asm 2: mov <mult=%rcx,>zq1=%r8
- mov %rcx,%r8
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: zq0 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zq0=int64#2
- # asm 2: and <mulredmask=%rdx,<zq0=%rsi
- and %rdx,%rsi
- # qhasm: mult += zq2
- # asm 1: add <zq2=int64#8,<mult=int64#4
- # asm 2: add <zq2=%r10,<mult=%rcx
- add %r10,%rcx
- # qhasm: zq2 = mult
- # asm 1: mov <mult=int64#4,>zq2=int64#6
- # asm 2: mov <mult=%rcx,>zq2=%r9
- mov %rcx,%r9
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: zq1 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zq1=int64#5
- # asm 2: and <mulredmask=%rdx,<zq1=%r8
- and %rdx,%r8
- # qhasm: mult += zq3
- # asm 1: add <zq3=int64#10,<mult=int64#4
- # asm 2: add <zq3=%r12,<mult=%rcx
- add %r12,%rcx
- # qhasm: zq3 = mult
- # asm 1: mov <mult=int64#4,>zq3=int64#7
- # asm 2: mov <mult=%rcx,>zq3=%rax
- mov %rcx,%rax
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: zq2 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zq2=int64#6
- # asm 2: and <mulredmask=%rdx,<zq2=%r9
- and %rdx,%r9
- # qhasm: mult += zq4
- # asm 1: add <zq4=int64#12,<mult=int64#4
- # asm 2: add <zq4=%r14,<mult=%rcx
- add %r14,%rcx
- # qhasm: zq4 = mult
- # asm 1: mov <mult=int64#4,>zq4=int64#8
- # asm 2: mov <mult=%rcx,>zq4=%r10
- mov %rcx,%r10
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: zq3 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zq3=int64#7
- # asm 2: and <mulredmask=%rdx,<zq3=%rax
- and %rdx,%rax
- # qhasm: mult *= 19
- # asm 1: imulq $19,<mult=int64#4,>mult=int64#4
- # asm 2: imulq $19,<mult=%rcx,>mult=%rcx
- imulq $19,%rcx,%rcx
- # qhasm: zq0 += mult
- # asm 1: add <mult=int64#4,<zq0=int64#2
- # asm 2: add <mult=%rcx,<zq0=%rsi
- add %rcx,%rsi
- # qhasm: zq4 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zq4=int64#8
- # asm 2: and <mulredmask=%rdx,<zq4=%r10
- and %rdx,%r10
- # qhasm: *(uint64 *)(workp + 160) = zq0
- # asm 1: movq <zq0=int64#2,160(<workp=int64#1)
- # asm 2: movq <zq0=%rsi,160(<workp=%rdi)
- movq %rsi,160(%rdi)
- # qhasm: *(uint64 *)(workp + 168) = zq1
- # asm 1: movq <zq1=int64#5,168(<workp=int64#1)
- # asm 2: movq <zq1=%r8,168(<workp=%rdi)
- movq %r8,168(%rdi)
- # qhasm: *(uint64 *)(workp + 176) = zq2
- # asm 1: movq <zq2=int64#6,176(<workp=int64#1)
- # asm 2: movq <zq2=%r9,176(<workp=%rdi)
- movq %r9,176(%rdi)
- # qhasm: *(uint64 *)(workp + 184) = zq3
- # asm 1: movq <zq3=int64#7,184(<workp=int64#1)
- # asm 2: movq <zq3=%rax,184(<workp=%rdi)
- movq %rax,184(%rdi)
- # qhasm: *(uint64 *)(workp + 192) = zq4
- # asm 1: movq <zq4=int64#8,192(<workp=int64#1)
- # asm 2: movq <zq4=%r10,192(<workp=%rdi)
- movq %r10,192(%rdi)
- # qhasm: mulrax = t63_stack
- # asm 1: movq <t63_stack=stack64#26,>mulrax=int64#2
- # asm 2: movq <t63_stack=200(%rsp),>mulrax=%rsi
- movq 200(%rsp),%rsi
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#2,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rsi,>mulrax=%rax
- imulq $19,%rsi,%rax
- # qhasm: mulx319_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
- # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
- movq %rax,56(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * t72_stack
- # asm 1: mulq <t72_stack=stack64#20
- # asm 2: mulq <t72_stack=152(%rsp)
- mulq 152(%rsp)
- # qhasm: xp0 = mulrax
- # asm 1: mov <mulrax=int64#7,>xp0=int64#2
- # asm 2: mov <mulrax=%rax,>xp0=%rsi
- mov %rax,%rsi
- # qhasm: mulr01 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr01=int64#4
- # asm 2: mov <mulrdx=%rdx,>mulr01=%rcx
- mov %rdx,%rcx
- # qhasm: mulrax = t64_stack
- # asm 1: movq <t64_stack=stack64#27,>mulrax=int64#3
- # asm 2: movq <t64_stack=208(%rsp),>mulrax=%rdx
- movq 208(%rsp),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: mulx419_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
- # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
- movq %rax,64(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * t71_stack
- # asm 1: mulq <t71_stack=stack64#19
- # asm 2: mulq <t71_stack=144(%rsp)
- mulq 144(%rsp)
- # qhasm: carry? xp0 += mulrax
- # asm 1: add <mulrax=int64#7,<xp0=int64#2
- # asm 2: add <mulrax=%rax,<xp0=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = t60_stack
- # asm 1: movq <t60_stack=stack64#23,>mulrax=int64#7
- # asm 2: movq <t60_stack=176(%rsp),>mulrax=%rax
- movq 176(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t70_stack
- # asm 1: mulq <t70_stack=stack64#18
- # asm 2: mulq <t70_stack=136(%rsp)
- mulq 136(%rsp)
- # qhasm: carry? xp0 += mulrax
- # asm 1: add <mulrax=int64#7,<xp0=int64#2
- # asm 2: add <mulrax=%rax,<xp0=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = t60_stack
- # asm 1: movq <t60_stack=stack64#23,>mulrax=int64#7
- # asm 2: movq <t60_stack=176(%rsp),>mulrax=%rax
- movq 176(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t71_stack
- # asm 1: mulq <t71_stack=stack64#19
- # asm 2: mulq <t71_stack=144(%rsp)
- mulq 144(%rsp)
- # qhasm: xp1 = mulrax
- # asm 1: mov <mulrax=int64#7,>xp1=int64#5
- # asm 2: mov <mulrax=%rax,>xp1=%r8
- mov %rax,%r8
- # qhasm: mulr11 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr11=int64#6
- # asm 2: mov <mulrdx=%rdx,>mulr11=%r9
- mov %rdx,%r9
- # qhasm: mulrax = t60_stack
- # asm 1: movq <t60_stack=stack64#23,>mulrax=int64#7
- # asm 2: movq <t60_stack=176(%rsp),>mulrax=%rax
- movq 176(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t72_stack
- # asm 1: mulq <t72_stack=stack64#20
- # asm 2: mulq <t72_stack=152(%rsp)
- mulq 152(%rsp)
- # qhasm: xp2 = mulrax
- # asm 1: mov <mulrax=int64#7,>xp2=int64#8
- # asm 2: mov <mulrax=%rax,>xp2=%r10
- mov %rax,%r10
- # qhasm: mulr21 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr21=int64#9
- # asm 2: mov <mulrdx=%rdx,>mulr21=%r11
- mov %rdx,%r11
- # qhasm: mulrax = t60_stack
- # asm 1: movq <t60_stack=stack64#23,>mulrax=int64#7
- # asm 2: movq <t60_stack=176(%rsp),>mulrax=%rax
- movq 176(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t73_stack
- # asm 1: mulq <t73_stack=stack64#21
- # asm 2: mulq <t73_stack=160(%rsp)
- mulq 160(%rsp)
- # qhasm: xp3 = mulrax
- # asm 1: mov <mulrax=int64#7,>xp3=int64#10
- # asm 2: mov <mulrax=%rax,>xp3=%r12
- mov %rax,%r12
- # qhasm: mulr31 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr31=int64#11
- # asm 2: mov <mulrdx=%rdx,>mulr31=%r13
- mov %rdx,%r13
- # qhasm: mulrax = t60_stack
- # asm 1: movq <t60_stack=stack64#23,>mulrax=int64#7
- # asm 2: movq <t60_stack=176(%rsp),>mulrax=%rax
- movq 176(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t74_stack
- # asm 1: mulq <t74_stack=stack64#22
- # asm 2: mulq <t74_stack=168(%rsp)
- mulq 168(%rsp)
- # qhasm: xp4 = mulrax
- # asm 1: mov <mulrax=int64#7,>xp4=int64#12
- # asm 2: mov <mulrax=%rax,>xp4=%r14
- mov %rax,%r14
- # qhasm: mulr41 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr41=int64#13
- # asm 2: mov <mulrdx=%rdx,>mulr41=%r15
- mov %rdx,%r15
- # qhasm: mulrax = t61_stack
- # asm 1: movq <t61_stack=stack64#24,>mulrax=int64#7
- # asm 2: movq <t61_stack=184(%rsp),>mulrax=%rax
- movq 184(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t70_stack
- # asm 1: mulq <t70_stack=stack64#18
- # asm 2: mulq <t70_stack=136(%rsp)
- mulq 136(%rsp)
- # qhasm: carry? xp1 += mulrax
- # asm 1: add <mulrax=int64#7,<xp1=int64#5
- # asm 2: add <mulrax=%rax,<xp1=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = t61_stack
- # asm 1: movq <t61_stack=stack64#24,>mulrax=int64#7
- # asm 2: movq <t61_stack=184(%rsp),>mulrax=%rax
- movq 184(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t71_stack
- # asm 1: mulq <t71_stack=stack64#19
- # asm 2: mulq <t71_stack=144(%rsp)
- mulq 144(%rsp)
- # qhasm: carry? xp2 += mulrax
- # asm 1: add <mulrax=int64#7,<xp2=int64#8
- # asm 2: add <mulrax=%rax,<xp2=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = t61_stack
- # asm 1: movq <t61_stack=stack64#24,>mulrax=int64#7
- # asm 2: movq <t61_stack=184(%rsp),>mulrax=%rax
- movq 184(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t72_stack
- # asm 1: mulq <t72_stack=stack64#20
- # asm 2: mulq <t72_stack=152(%rsp)
- mulq 152(%rsp)
- # qhasm: carry? xp3 += mulrax
- # asm 1: add <mulrax=int64#7,<xp3=int64#10
- # asm 2: add <mulrax=%rax,<xp3=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = t61_stack
- # asm 1: movq <t61_stack=stack64#24,>mulrax=int64#7
- # asm 2: movq <t61_stack=184(%rsp),>mulrax=%rax
- movq 184(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t73_stack
- # asm 1: mulq <t73_stack=stack64#21
- # asm 2: mulq <t73_stack=160(%rsp)
- mulq 160(%rsp)
- # qhasm: carry? xp4 += mulrax
- # asm 1: add <mulrax=int64#7,<xp4=int64#12
- # asm 2: add <mulrax=%rax,<xp4=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = t61_stack
- # asm 1: movq <t61_stack=stack64#24,>mulrax=int64#3
- # asm 2: movq <t61_stack=184(%rsp),>mulrax=%rdx
- movq 184(%rsp),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t74_stack
- # asm 1: mulq <t74_stack=stack64#22
- # asm 2: mulq <t74_stack=168(%rsp)
- mulq 168(%rsp)
- # qhasm: carry? xp0 += mulrax
- # asm 1: add <mulrax=int64#7,<xp0=int64#2
- # asm 2: add <mulrax=%rax,<xp0=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = t62_stack
- # asm 1: movq <t62_stack=stack64#25,>mulrax=int64#7
- # asm 2: movq <t62_stack=192(%rsp),>mulrax=%rax
- movq 192(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t70_stack
- # asm 1: mulq <t70_stack=stack64#18
- # asm 2: mulq <t70_stack=136(%rsp)
- mulq 136(%rsp)
- # qhasm: carry? xp2 += mulrax
- # asm 1: add <mulrax=int64#7,<xp2=int64#8
- # asm 2: add <mulrax=%rax,<xp2=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = t62_stack
- # asm 1: movq <t62_stack=stack64#25,>mulrax=int64#7
- # asm 2: movq <t62_stack=192(%rsp),>mulrax=%rax
- movq 192(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t71_stack
- # asm 1: mulq <t71_stack=stack64#19
- # asm 2: mulq <t71_stack=144(%rsp)
- mulq 144(%rsp)
- # qhasm: carry? xp3 += mulrax
- # asm 1: add <mulrax=int64#7,<xp3=int64#10
- # asm 2: add <mulrax=%rax,<xp3=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = t62_stack
- # asm 1: movq <t62_stack=stack64#25,>mulrax=int64#7
- # asm 2: movq <t62_stack=192(%rsp),>mulrax=%rax
- movq 192(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t72_stack
- # asm 1: mulq <t72_stack=stack64#20
- # asm 2: mulq <t72_stack=152(%rsp)
- mulq 152(%rsp)
- # qhasm: carry? xp4 += mulrax
- # asm 1: add <mulrax=int64#7,<xp4=int64#12
- # asm 2: add <mulrax=%rax,<xp4=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = t62_stack
- # asm 1: movq <t62_stack=stack64#25,>mulrax=int64#3
- # asm 2: movq <t62_stack=192(%rsp),>mulrax=%rdx
- movq 192(%rsp),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t73_stack
- # asm 1: mulq <t73_stack=stack64#21
- # asm 2: mulq <t73_stack=160(%rsp)
- mulq 160(%rsp)
- # qhasm: carry? xp0 += mulrax
- # asm 1: add <mulrax=int64#7,<xp0=int64#2
- # asm 2: add <mulrax=%rax,<xp0=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = t62_stack
- # asm 1: movq <t62_stack=stack64#25,>mulrax=int64#3
- # asm 2: movq <t62_stack=192(%rsp),>mulrax=%rdx
- movq 192(%rsp),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t74_stack
- # asm 1: mulq <t74_stack=stack64#22
- # asm 2: mulq <t74_stack=168(%rsp)
- mulq 168(%rsp)
- # qhasm: carry? xp1 += mulrax
- # asm 1: add <mulrax=int64#7,<xp1=int64#5
- # asm 2: add <mulrax=%rax,<xp1=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = t63_stack
- # asm 1: movq <t63_stack=stack64#26,>mulrax=int64#7
- # asm 2: movq <t63_stack=200(%rsp),>mulrax=%rax
- movq 200(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t70_stack
- # asm 1: mulq <t70_stack=stack64#18
- # asm 2: mulq <t70_stack=136(%rsp)
- mulq 136(%rsp)
- # qhasm: carry? xp3 += mulrax
- # asm 1: add <mulrax=int64#7,<xp3=int64#10
- # asm 2: add <mulrax=%rax,<xp3=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = t63_stack
- # asm 1: movq <t63_stack=stack64#26,>mulrax=int64#7
- # asm 2: movq <t63_stack=200(%rsp),>mulrax=%rax
- movq 200(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t71_stack
- # asm 1: mulq <t71_stack=stack64#19
- # asm 2: mulq <t71_stack=144(%rsp)
- mulq 144(%rsp)
- # qhasm: carry? xp4 += mulrax
- # asm 1: add <mulrax=int64#7,<xp4=int64#12
- # asm 2: add <mulrax=%rax,<xp4=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
- movq 56(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t73_stack
- # asm 1: mulq <t73_stack=stack64#21
- # asm 2: mulq <t73_stack=160(%rsp)
- mulq 160(%rsp)
- # qhasm: carry? xp1 += mulrax
- # asm 1: add <mulrax=int64#7,<xp1=int64#5
- # asm 2: add <mulrax=%rax,<xp1=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
- movq 56(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t74_stack
- # asm 1: mulq <t74_stack=stack64#22
- # asm 2: mulq <t74_stack=168(%rsp)
- mulq 168(%rsp)
- # qhasm: carry? xp2 += mulrax
- # asm 1: add <mulrax=int64#7,<xp2=int64#8
- # asm 2: add <mulrax=%rax,<xp2=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = t64_stack
- # asm 1: movq <t64_stack=stack64#27,>mulrax=int64#7
- # asm 2: movq <t64_stack=208(%rsp),>mulrax=%rax
- movq 208(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t70_stack
- # asm 1: mulq <t70_stack=stack64#18
- # asm 2: mulq <t70_stack=136(%rsp)
- mulq 136(%rsp)
- # qhasm: carry? xp4 += mulrax
- # asm 1: add <mulrax=int64#7,<xp4=int64#12
- # asm 2: add <mulrax=%rax,<xp4=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t72_stack
- # asm 1: mulq <t72_stack=stack64#20
- # asm 2: mulq <t72_stack=152(%rsp)
- mulq 152(%rsp)
- # qhasm: carry? xp1 += mulrax
- # asm 1: add <mulrax=int64#7,<xp1=int64#5
- # asm 2: add <mulrax=%rax,<xp1=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t73_stack
- # asm 1: mulq <t73_stack=stack64#21
- # asm 2: mulq <t73_stack=160(%rsp)
- mulq 160(%rsp)
- # qhasm: carry? xp2 += mulrax
- # asm 1: add <mulrax=int64#7,<xp2=int64#8
- # asm 2: add <mulrax=%rax,<xp2=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t74_stack
- # asm 1: mulq <t74_stack=stack64#22
- # asm 2: mulq <t74_stack=168(%rsp)
- mulq 168(%rsp)
- # qhasm: carry? xp3 += mulrax
- # asm 1: add <mulrax=int64#7,<xp3=int64#10
- # asm 2: add <mulrax=%rax,<xp3=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulredmask = *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_REDMASK51
- # asm 1: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>mulredmask=int64#3
- # asm 2: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>mulredmask=%rdx
- movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,%rdx
- # qhasm: mulr01 = (mulr01.xp0) << 13
- # asm 1: shld $13,<xp0=int64#2,<mulr01=int64#4
- # asm 2: shld $13,<xp0=%rsi,<mulr01=%rcx
- shld $13,%rsi,%rcx
- # qhasm: xp0 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<xp0=int64#2
- # asm 2: and <mulredmask=%rdx,<xp0=%rsi
- and %rdx,%rsi
- # qhasm: mulr11 = (mulr11.xp1) << 13
- # asm 1: shld $13,<xp1=int64#5,<mulr11=int64#6
- # asm 2: shld $13,<xp1=%r8,<mulr11=%r9
- shld $13,%r8,%r9
- # qhasm: xp1 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<xp1=int64#5
- # asm 2: and <mulredmask=%rdx,<xp1=%r8
- and %rdx,%r8
- # qhasm: xp1 += mulr01
- # asm 1: add <mulr01=int64#4,<xp1=int64#5
- # asm 2: add <mulr01=%rcx,<xp1=%r8
- add %rcx,%r8
- # qhasm: mulr21 = (mulr21.xp2) << 13
- # asm 1: shld $13,<xp2=int64#8,<mulr21=int64#9
- # asm 2: shld $13,<xp2=%r10,<mulr21=%r11
- shld $13,%r10,%r11
- # qhasm: xp2 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<xp2=int64#8
- # asm 2: and <mulredmask=%rdx,<xp2=%r10
- and %rdx,%r10
- # qhasm: xp2 += mulr11
- # asm 1: add <mulr11=int64#6,<xp2=int64#8
- # asm 2: add <mulr11=%r9,<xp2=%r10
- add %r9,%r10
- # qhasm: mulr31 = (mulr31.xp3) << 13
- # asm 1: shld $13,<xp3=int64#10,<mulr31=int64#11
- # asm 2: shld $13,<xp3=%r12,<mulr31=%r13
- shld $13,%r12,%r13
- # qhasm: xp3 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<xp3=int64#10
- # asm 2: and <mulredmask=%rdx,<xp3=%r12
- and %rdx,%r12
- # qhasm: xp3 += mulr21
- # asm 1: add <mulr21=int64#9,<xp3=int64#10
- # asm 2: add <mulr21=%r11,<xp3=%r12
- add %r11,%r12
- # qhasm: mulr41 = (mulr41.xp4) << 13
- # asm 1: shld $13,<xp4=int64#12,<mulr41=int64#13
- # asm 2: shld $13,<xp4=%r14,<mulr41=%r15
- shld $13,%r14,%r15
- # qhasm: xp4 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<xp4=int64#12
- # asm 2: and <mulredmask=%rdx,<xp4=%r14
- and %rdx,%r14
- # qhasm: xp4 += mulr31
- # asm 1: add <mulr31=int64#11,<xp4=int64#12
- # asm 2: add <mulr31=%r13,<xp4=%r14
- add %r13,%r14
- # qhasm: mulr41 = mulr41 * 19
- # asm 1: imulq $19,<mulr41=int64#13,>mulr41=int64#4
- # asm 2: imulq $19,<mulr41=%r15,>mulr41=%rcx
- imulq $19,%r15,%rcx
- # qhasm: xp0 += mulr41
- # asm 1: add <mulr41=int64#4,<xp0=int64#2
- # asm 2: add <mulr41=%rcx,<xp0=%rsi
- add %rcx,%rsi
- # qhasm: mult = xp0
- # asm 1: mov <xp0=int64#2,>mult=int64#4
- # asm 2: mov <xp0=%rsi,>mult=%rcx
- mov %rsi,%rcx
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: mult += xp1
- # asm 1: add <xp1=int64#5,<mult=int64#4
- # asm 2: add <xp1=%r8,<mult=%rcx
- add %r8,%rcx
- # qhasm: xp1 = mult
- # asm 1: mov <mult=int64#4,>xp1=int64#5
- # asm 2: mov <mult=%rcx,>xp1=%r8
- mov %rcx,%r8
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: xp0 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<xp0=int64#2
- # asm 2: and <mulredmask=%rdx,<xp0=%rsi
- and %rdx,%rsi
- # qhasm: mult += xp2
- # asm 1: add <xp2=int64#8,<mult=int64#4
- # asm 2: add <xp2=%r10,<mult=%rcx
- add %r10,%rcx
- # qhasm: xp2 = mult
- # asm 1: mov <mult=int64#4,>xp2=int64#6
- # asm 2: mov <mult=%rcx,>xp2=%r9
- mov %rcx,%r9
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: xp1 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<xp1=int64#5
- # asm 2: and <mulredmask=%rdx,<xp1=%r8
- and %rdx,%r8
- # qhasm: mult += xp3
- # asm 1: add <xp3=int64#10,<mult=int64#4
- # asm 2: add <xp3=%r12,<mult=%rcx
- add %r12,%rcx
- # qhasm: xp3 = mult
- # asm 1: mov <mult=int64#4,>xp3=int64#7
- # asm 2: mov <mult=%rcx,>xp3=%rax
- mov %rcx,%rax
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: xp2 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<xp2=int64#6
- # asm 2: and <mulredmask=%rdx,<xp2=%r9
- and %rdx,%r9
- # qhasm: mult += xp4
- # asm 1: add <xp4=int64#12,<mult=int64#4
- # asm 2: add <xp4=%r14,<mult=%rcx
- add %r14,%rcx
- # qhasm: xp4 = mult
- # asm 1: mov <mult=int64#4,>xp4=int64#8
- # asm 2: mov <mult=%rcx,>xp4=%r10
- mov %rcx,%r10
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: xp3 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<xp3=int64#7
- # asm 2: and <mulredmask=%rdx,<xp3=%rax
- and %rdx,%rax
- # qhasm: mult *= 19
- # asm 1: imulq $19,<mult=int64#4,>mult=int64#4
- # asm 2: imulq $19,<mult=%rcx,>mult=%rcx
- imulq $19,%rcx,%rcx
- # qhasm: xp0 += mult
- # asm 1: add <mult=int64#4,<xp0=int64#2
- # asm 2: add <mult=%rcx,<xp0=%rsi
- add %rcx,%rsi
- # qhasm: xp4 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<xp4=int64#8
- # asm 2: and <mulredmask=%rdx,<xp4=%r10
- and %rdx,%r10
- # qhasm: *(uint64 *)(workp + 40) = xp0
- # asm 1: movq <xp0=int64#2,40(<workp=int64#1)
- # asm 2: movq <xp0=%rsi,40(<workp=%rdi)
- movq %rsi,40(%rdi)
- # qhasm: *(uint64 *)(workp + 48) = xp1
- # asm 1: movq <xp1=int64#5,48(<workp=int64#1)
- # asm 2: movq <xp1=%r8,48(<workp=%rdi)
- movq %r8,48(%rdi)
- # qhasm: *(uint64 *)(workp + 56) = xp2
- # asm 1: movq <xp2=int64#6,56(<workp=int64#1)
- # asm 2: movq <xp2=%r9,56(<workp=%rdi)
- movq %r9,56(%rdi)
- # qhasm: *(uint64 *)(workp + 64) = xp3
- # asm 1: movq <xp3=int64#7,64(<workp=int64#1)
- # asm 2: movq <xp3=%rax,64(<workp=%rdi)
- movq %rax,64(%rdi)
- # qhasm: *(uint64 *)(workp + 72) = xp4
- # asm 1: movq <xp4=int64#8,72(<workp=int64#1)
- # asm 2: movq <xp4=%r10,72(<workp=%rdi)
- movq %r10,72(%rdi)
- # qhasm: mul121666rax = t50_stack
- # asm 1: movq <t50_stack=stack64#28,>mul121666rax=int64#7
- # asm 2: movq <t50_stack=216(%rsp),>mul121666rax=%rax
- movq 216(%rsp),%rax
- # qhasm: (uint128) mul121666rdx mul121666rax = mul121666rax * *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_121666_213
- mulq crypto_scalarmult_curve25519_amd64_51_121666_213
- # qhasm: (uint64) mul121666rax >>= 13
- # asm 1: shr $13,<mul121666rax=int64#7
- # asm 2: shr $13,<mul121666rax=%rax
- shr $13,%rax
- # qhasm: zp0 = mul121666rax
- # asm 1: mov <mul121666rax=int64#7,>zp0=int64#2
- # asm 2: mov <mul121666rax=%rax,>zp0=%rsi
- mov %rax,%rsi
- # qhasm: zp1 = mul121666rdx
- # asm 1: mov <mul121666rdx=int64#3,>zp1=int64#4
- # asm 2: mov <mul121666rdx=%rdx,>zp1=%rcx
- mov %rdx,%rcx
- # qhasm: mul121666rax = t51_stack
- # asm 1: movq <t51_stack=stack64#29,>mul121666rax=int64#7
- # asm 2: movq <t51_stack=224(%rsp),>mul121666rax=%rax
- movq 224(%rsp),%rax
- # qhasm: (uint128) mul121666rdx mul121666rax = mul121666rax * *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_121666_213
- mulq crypto_scalarmult_curve25519_amd64_51_121666_213
- # qhasm: (uint64) mul121666rax >>= 13
- # asm 1: shr $13,<mul121666rax=int64#7
- # asm 2: shr $13,<mul121666rax=%rax
- shr $13,%rax
- # qhasm: zp1 += mul121666rax
- # asm 1: add <mul121666rax=int64#7,<zp1=int64#4
- # asm 2: add <mul121666rax=%rax,<zp1=%rcx
- add %rax,%rcx
- # qhasm: zp2 = mul121666rdx
- # asm 1: mov <mul121666rdx=int64#3,>zp2=int64#5
- # asm 2: mov <mul121666rdx=%rdx,>zp2=%r8
- mov %rdx,%r8
- # qhasm: mul121666rax = t52_stack
- # asm 1: movq <t52_stack=stack64#30,>mul121666rax=int64#7
- # asm 2: movq <t52_stack=232(%rsp),>mul121666rax=%rax
- movq 232(%rsp),%rax
- # qhasm: (uint128) mul121666rdx mul121666rax = mul121666rax * *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_121666_213
- mulq crypto_scalarmult_curve25519_amd64_51_121666_213
- # qhasm: (uint64) mul121666rax >>= 13
- # asm 1: shr $13,<mul121666rax=int64#7
- # asm 2: shr $13,<mul121666rax=%rax
- shr $13,%rax
- # qhasm: zp2 += mul121666rax
- # asm 1: add <mul121666rax=int64#7,<zp2=int64#5
- # asm 2: add <mul121666rax=%rax,<zp2=%r8
- add %rax,%r8
- # qhasm: zp3 = mul121666rdx
- # asm 1: mov <mul121666rdx=int64#3,>zp3=int64#6
- # asm 2: mov <mul121666rdx=%rdx,>zp3=%r9
- mov %rdx,%r9
- # qhasm: mul121666rax = t53_stack
- # asm 1: movq <t53_stack=stack64#31,>mul121666rax=int64#7
- # asm 2: movq <t53_stack=240(%rsp),>mul121666rax=%rax
- movq 240(%rsp),%rax
- # qhasm: (uint128) mul121666rdx mul121666rax = mul121666rax * *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_121666_213
- mulq crypto_scalarmult_curve25519_amd64_51_121666_213
- # qhasm: (uint64) mul121666rax >>= 13
- # asm 1: shr $13,<mul121666rax=int64#7
- # asm 2: shr $13,<mul121666rax=%rax
- shr $13,%rax
- # qhasm: zp3 += mul121666rax
- # asm 1: add <mul121666rax=int64#7,<zp3=int64#6
- # asm 2: add <mul121666rax=%rax,<zp3=%r9
- add %rax,%r9
- # qhasm: zp4 = mul121666rdx
- # asm 1: mov <mul121666rdx=int64#3,>zp4=int64#8
- # asm 2: mov <mul121666rdx=%rdx,>zp4=%r10
- mov %rdx,%r10
- # qhasm: mul121666rax = t54_stack
- # asm 1: movq <t54_stack=stack64#32,>mul121666rax=int64#7
- # asm 2: movq <t54_stack=248(%rsp),>mul121666rax=%rax
- movq 248(%rsp),%rax
- # qhasm: (uint128) mul121666rdx mul121666rax = mul121666rax * *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_121666_213
- mulq crypto_scalarmult_curve25519_amd64_51_121666_213
- # qhasm: (uint64) mul121666rax >>= 13
- # asm 1: shr $13,<mul121666rax=int64#7
- # asm 2: shr $13,<mul121666rax=%rax
- shr $13,%rax
- # qhasm: zp4 += mul121666rax
- # asm 1: add <mul121666rax=int64#7,<zp4=int64#8
- # asm 2: add <mul121666rax=%rax,<zp4=%r10
- add %rax,%r10
- # qhasm: mul121666rdx *= 19
- # asm 1: imulq $19,<mul121666rdx=int64#3,>mul121666rdx=int64#3
- # asm 2: imulq $19,<mul121666rdx=%rdx,>mul121666rdx=%rdx
- imulq $19,%rdx,%rdx
- # qhasm: zp0 += mul121666rdx
- # asm 1: add <mul121666rdx=int64#3,<zp0=int64#2
- # asm 2: add <mul121666rdx=%rdx,<zp0=%rsi
- add %rdx,%rsi
- # qhasm: zp0 += t70_stack
- # asm 1: addq <t70_stack=stack64#18,<zp0=int64#2
- # asm 2: addq <t70_stack=136(%rsp),<zp0=%rsi
- addq 136(%rsp),%rsi
- # qhasm: zp1 += t71_stack
- # asm 1: addq <t71_stack=stack64#19,<zp1=int64#4
- # asm 2: addq <t71_stack=144(%rsp),<zp1=%rcx
- addq 144(%rsp),%rcx
- # qhasm: zp2 += t72_stack
- # asm 1: addq <t72_stack=stack64#20,<zp2=int64#5
- # asm 2: addq <t72_stack=152(%rsp),<zp2=%r8
- addq 152(%rsp),%r8
- # qhasm: zp3 += t73_stack
- # asm 1: addq <t73_stack=stack64#21,<zp3=int64#6
- # asm 2: addq <t73_stack=160(%rsp),<zp3=%r9
- addq 160(%rsp),%r9
- # qhasm: zp4 += t74_stack
- # asm 1: addq <t74_stack=stack64#22,<zp4=int64#8
- # asm 2: addq <t74_stack=168(%rsp),<zp4=%r10
- addq 168(%rsp),%r10
- # qhasm: *(uint64 *)(workp + 80) = zp0
- # asm 1: movq <zp0=int64#2,80(<workp=int64#1)
- # asm 2: movq <zp0=%rsi,80(<workp=%rdi)
- movq %rsi,80(%rdi)
- # qhasm: *(uint64 *)(workp + 88) = zp1
- # asm 1: movq <zp1=int64#4,88(<workp=int64#1)
- # asm 2: movq <zp1=%rcx,88(<workp=%rdi)
- movq %rcx,88(%rdi)
- # qhasm: *(uint64 *)(workp + 96) = zp2
- # asm 1: movq <zp2=int64#5,96(<workp=int64#1)
- # asm 2: movq <zp2=%r8,96(<workp=%rdi)
- movq %r8,96(%rdi)
- # qhasm: *(uint64 *)(workp + 104) = zp3
- # asm 1: movq <zp3=int64#6,104(<workp=int64#1)
- # asm 2: movq <zp3=%r9,104(<workp=%rdi)
- movq %r9,104(%rdi)
- # qhasm: *(uint64 *)(workp + 112) = zp4
- # asm 1: movq <zp4=int64#8,112(<workp=int64#1)
- # asm 2: movq <zp4=%r10,112(<workp=%rdi)
- movq %r10,112(%rdi)
- # qhasm: mulrax = *(uint64 *)(workp + 104)
- # asm 1: movq 104(<workp=int64#1),>mulrax=int64#2
- # asm 2: movq 104(<workp=%rdi),>mulrax=%rsi
- movq 104(%rdi),%rsi
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#2,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rsi,>mulrax=%rax
- imulq $19,%rsi,%rax
- # qhasm: mulx319_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
- # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
- movq %rax,56(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * t52_stack
- # asm 1: mulq <t52_stack=stack64#30
- # asm 2: mulq <t52_stack=232(%rsp)
- mulq 232(%rsp)
- # qhasm: zp0 = mulrax
- # asm 1: mov <mulrax=int64#7,>zp0=int64#2
- # asm 2: mov <mulrax=%rax,>zp0=%rsi
- mov %rax,%rsi
- # qhasm: mulr01 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr01=int64#4
- # asm 2: mov <mulrdx=%rdx,>mulr01=%rcx
- mov %rdx,%rcx
- # qhasm: mulrax = *(uint64 *)(workp + 112)
- # asm 1: movq 112(<workp=int64#1),>mulrax=int64#3
- # asm 2: movq 112(<workp=%rdi),>mulrax=%rdx
- movq 112(%rdi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: mulx419_stack = mulrax
- # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
- # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
- movq %rax,64(%rsp)
- # qhasm: (uint128) mulrdx mulrax = mulrax * t51_stack
- # asm 1: mulq <t51_stack=stack64#29
- # asm 2: mulq <t51_stack=224(%rsp)
- mulq 224(%rsp)
- # qhasm: carry? zp0 += mulrax
- # asm 1: add <mulrax=int64#7,<zp0=int64#2
- # asm 2: add <mulrax=%rax,<zp0=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = *(uint64 *)(workp + 80)
- # asm 1: movq 80(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 80(<workp=%rdi),>mulrax=%rax
- movq 80(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t50_stack
- # asm 1: mulq <t50_stack=stack64#28
- # asm 2: mulq <t50_stack=216(%rsp)
- mulq 216(%rsp)
- # qhasm: carry? zp0 += mulrax
- # asm 1: add <mulrax=int64#7,<zp0=int64#2
- # asm 2: add <mulrax=%rax,<zp0=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = *(uint64 *)(workp + 80)
- # asm 1: movq 80(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 80(<workp=%rdi),>mulrax=%rax
- movq 80(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t51_stack
- # asm 1: mulq <t51_stack=stack64#29
- # asm 2: mulq <t51_stack=224(%rsp)
- mulq 224(%rsp)
- # qhasm: zp1 = mulrax
- # asm 1: mov <mulrax=int64#7,>zp1=int64#5
- # asm 2: mov <mulrax=%rax,>zp1=%r8
- mov %rax,%r8
- # qhasm: mulr11 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr11=int64#6
- # asm 2: mov <mulrdx=%rdx,>mulr11=%r9
- mov %rdx,%r9
- # qhasm: mulrax = *(uint64 *)(workp + 80)
- # asm 1: movq 80(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 80(<workp=%rdi),>mulrax=%rax
- movq 80(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t52_stack
- # asm 1: mulq <t52_stack=stack64#30
- # asm 2: mulq <t52_stack=232(%rsp)
- mulq 232(%rsp)
- # qhasm: zp2 = mulrax
- # asm 1: mov <mulrax=int64#7,>zp2=int64#8
- # asm 2: mov <mulrax=%rax,>zp2=%r10
- mov %rax,%r10
- # qhasm: mulr21 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr21=int64#9
- # asm 2: mov <mulrdx=%rdx,>mulr21=%r11
- mov %rdx,%r11
- # qhasm: mulrax = *(uint64 *)(workp + 80)
- # asm 1: movq 80(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 80(<workp=%rdi),>mulrax=%rax
- movq 80(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t53_stack
- # asm 1: mulq <t53_stack=stack64#31
- # asm 2: mulq <t53_stack=240(%rsp)
- mulq 240(%rsp)
- # qhasm: zp3 = mulrax
- # asm 1: mov <mulrax=int64#7,>zp3=int64#10
- # asm 2: mov <mulrax=%rax,>zp3=%r12
- mov %rax,%r12
- # qhasm: mulr31 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr31=int64#11
- # asm 2: mov <mulrdx=%rdx,>mulr31=%r13
- mov %rdx,%r13
- # qhasm: mulrax = *(uint64 *)(workp + 80)
- # asm 1: movq 80(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 80(<workp=%rdi),>mulrax=%rax
- movq 80(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t54_stack
- # asm 1: mulq <t54_stack=stack64#32
- # asm 2: mulq <t54_stack=248(%rsp)
- mulq 248(%rsp)
- # qhasm: zp4 = mulrax
- # asm 1: mov <mulrax=int64#7,>zp4=int64#12
- # asm 2: mov <mulrax=%rax,>zp4=%r14
- mov %rax,%r14
- # qhasm: mulr41 = mulrdx
- # asm 1: mov <mulrdx=int64#3,>mulr41=int64#13
- # asm 2: mov <mulrdx=%rdx,>mulr41=%r15
- mov %rdx,%r15
- # qhasm: mulrax = *(uint64 *)(workp + 88)
- # asm 1: movq 88(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 88(<workp=%rdi),>mulrax=%rax
- movq 88(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t50_stack
- # asm 1: mulq <t50_stack=stack64#28
- # asm 2: mulq <t50_stack=216(%rsp)
- mulq 216(%rsp)
- # qhasm: carry? zp1 += mulrax
- # asm 1: add <mulrax=int64#7,<zp1=int64#5
- # asm 2: add <mulrax=%rax,<zp1=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = *(uint64 *)(workp + 88)
- # asm 1: movq 88(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 88(<workp=%rdi),>mulrax=%rax
- movq 88(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t51_stack
- # asm 1: mulq <t51_stack=stack64#29
- # asm 2: mulq <t51_stack=224(%rsp)
- mulq 224(%rsp)
- # qhasm: carry? zp2 += mulrax
- # asm 1: add <mulrax=int64#7,<zp2=int64#8
- # asm 2: add <mulrax=%rax,<zp2=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = *(uint64 *)(workp + 88)
- # asm 1: movq 88(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 88(<workp=%rdi),>mulrax=%rax
- movq 88(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t52_stack
- # asm 1: mulq <t52_stack=stack64#30
- # asm 2: mulq <t52_stack=232(%rsp)
- mulq 232(%rsp)
- # qhasm: carry? zp3 += mulrax
- # asm 1: add <mulrax=int64#7,<zp3=int64#10
- # asm 2: add <mulrax=%rax,<zp3=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = *(uint64 *)(workp + 88)
- # asm 1: movq 88(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 88(<workp=%rdi),>mulrax=%rax
- movq 88(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t53_stack
- # asm 1: mulq <t53_stack=stack64#31
- # asm 2: mulq <t53_stack=240(%rsp)
- mulq 240(%rsp)
- # qhasm: carry? zp4 += mulrax
- # asm 1: add <mulrax=int64#7,<zp4=int64#12
- # asm 2: add <mulrax=%rax,<zp4=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = *(uint64 *)(workp + 88)
- # asm 1: movq 88(<workp=int64#1),>mulrax=int64#3
- # asm 2: movq 88(<workp=%rdi),>mulrax=%rdx
- movq 88(%rdi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t54_stack
- # asm 1: mulq <t54_stack=stack64#32
- # asm 2: mulq <t54_stack=248(%rsp)
- mulq 248(%rsp)
- # qhasm: carry? zp0 += mulrax
- # asm 1: add <mulrax=int64#7,<zp0=int64#2
- # asm 2: add <mulrax=%rax,<zp0=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = *(uint64 *)(workp + 96)
- # asm 1: movq 96(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 96(<workp=%rdi),>mulrax=%rax
- movq 96(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t50_stack
- # asm 1: mulq <t50_stack=stack64#28
- # asm 2: mulq <t50_stack=216(%rsp)
- mulq 216(%rsp)
- # qhasm: carry? zp2 += mulrax
- # asm 1: add <mulrax=int64#7,<zp2=int64#8
- # asm 2: add <mulrax=%rax,<zp2=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = *(uint64 *)(workp + 96)
- # asm 1: movq 96(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 96(<workp=%rdi),>mulrax=%rax
- movq 96(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t51_stack
- # asm 1: mulq <t51_stack=stack64#29
- # asm 2: mulq <t51_stack=224(%rsp)
- mulq 224(%rsp)
- # qhasm: carry? zp3 += mulrax
- # asm 1: add <mulrax=int64#7,<zp3=int64#10
- # asm 2: add <mulrax=%rax,<zp3=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = *(uint64 *)(workp + 96)
- # asm 1: movq 96(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 96(<workp=%rdi),>mulrax=%rax
- movq 96(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t52_stack
- # asm 1: mulq <t52_stack=stack64#30
- # asm 2: mulq <t52_stack=232(%rsp)
- mulq 232(%rsp)
- # qhasm: carry? zp4 += mulrax
- # asm 1: add <mulrax=int64#7,<zp4=int64#12
- # asm 2: add <mulrax=%rax,<zp4=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = *(uint64 *)(workp + 96)
- # asm 1: movq 96(<workp=int64#1),>mulrax=int64#3
- # asm 2: movq 96(<workp=%rdi),>mulrax=%rdx
- movq 96(%rdi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t53_stack
- # asm 1: mulq <t53_stack=stack64#31
- # asm 2: mulq <t53_stack=240(%rsp)
- mulq 240(%rsp)
- # qhasm: carry? zp0 += mulrax
- # asm 1: add <mulrax=int64#7,<zp0=int64#2
- # asm 2: add <mulrax=%rax,<zp0=%rsi
- add %rax,%rsi
- # qhasm: mulr01 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
- # asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
- adc %rdx,%rcx
- # qhasm: mulrax = *(uint64 *)(workp + 96)
- # asm 1: movq 96(<workp=int64#1),>mulrax=int64#3
- # asm 2: movq 96(<workp=%rdi),>mulrax=%rdx
- movq 96(%rdi),%rdx
- # qhasm: mulrax *= 19
- # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
- # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
- imulq $19,%rdx,%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t54_stack
- # asm 1: mulq <t54_stack=stack64#32
- # asm 2: mulq <t54_stack=248(%rsp)
- mulq 248(%rsp)
- # qhasm: carry? zp1 += mulrax
- # asm 1: add <mulrax=int64#7,<zp1=int64#5
- # asm 2: add <mulrax=%rax,<zp1=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = *(uint64 *)(workp + 104)
- # asm 1: movq 104(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 104(<workp=%rdi),>mulrax=%rax
- movq 104(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t50_stack
- # asm 1: mulq <t50_stack=stack64#28
- # asm 2: mulq <t50_stack=216(%rsp)
- mulq 216(%rsp)
- # qhasm: carry? zp3 += mulrax
- # asm 1: add <mulrax=int64#7,<zp3=int64#10
- # asm 2: add <mulrax=%rax,<zp3=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulrax = *(uint64 *)(workp + 104)
- # asm 1: movq 104(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 104(<workp=%rdi),>mulrax=%rax
- movq 104(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t51_stack
- # asm 1: mulq <t51_stack=stack64#29
- # asm 2: mulq <t51_stack=224(%rsp)
- mulq 224(%rsp)
- # qhasm: carry? zp4 += mulrax
- # asm 1: add <mulrax=int64#7,<zp4=int64#12
- # asm 2: add <mulrax=%rax,<zp4=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
- movq 56(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t53_stack
- # asm 1: mulq <t53_stack=stack64#31
- # asm 2: mulq <t53_stack=240(%rsp)
- mulq 240(%rsp)
- # qhasm: carry? zp1 += mulrax
- # asm 1: add <mulrax=int64#7,<zp1=int64#5
- # asm 2: add <mulrax=%rax,<zp1=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = mulx319_stack
- # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
- # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
- movq 56(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t54_stack
- # asm 1: mulq <t54_stack=stack64#32
- # asm 2: mulq <t54_stack=248(%rsp)
- mulq 248(%rsp)
- # qhasm: carry? zp2 += mulrax
- # asm 1: add <mulrax=int64#7,<zp2=int64#8
- # asm 2: add <mulrax=%rax,<zp2=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = *(uint64 *)(workp + 112)
- # asm 1: movq 112(<workp=int64#1),>mulrax=int64#7
- # asm 2: movq 112(<workp=%rdi),>mulrax=%rax
- movq 112(%rdi),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t50_stack
- # asm 1: mulq <t50_stack=stack64#28
- # asm 2: mulq <t50_stack=216(%rsp)
- mulq 216(%rsp)
- # qhasm: carry? zp4 += mulrax
- # asm 1: add <mulrax=int64#7,<zp4=int64#12
- # asm 2: add <mulrax=%rax,<zp4=%r14
- add %rax,%r14
- # qhasm: mulr41 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
- # asm 2: adc <mulrdx=%rdx,<mulr41=%r15
- adc %rdx,%r15
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t52_stack
- # asm 1: mulq <t52_stack=stack64#30
- # asm 2: mulq <t52_stack=232(%rsp)
- mulq 232(%rsp)
- # qhasm: carry? zp1 += mulrax
- # asm 1: add <mulrax=int64#7,<zp1=int64#5
- # asm 2: add <mulrax=%rax,<zp1=%r8
- add %rax,%r8
- # qhasm: mulr11 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
- # asm 2: adc <mulrdx=%rdx,<mulr11=%r9
- adc %rdx,%r9
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t53_stack
- # asm 1: mulq <t53_stack=stack64#31
- # asm 2: mulq <t53_stack=240(%rsp)
- mulq 240(%rsp)
- # qhasm: carry? zp2 += mulrax
- # asm 1: add <mulrax=int64#7,<zp2=int64#8
- # asm 2: add <mulrax=%rax,<zp2=%r10
- add %rax,%r10
- # qhasm: mulr21 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
- # asm 2: adc <mulrdx=%rdx,<mulr21=%r11
- adc %rdx,%r11
- # qhasm: mulrax = mulx419_stack
- # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
- # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
- movq 64(%rsp),%rax
- # qhasm: (uint128) mulrdx mulrax = mulrax * t54_stack
- # asm 1: mulq <t54_stack=stack64#32
- # asm 2: mulq <t54_stack=248(%rsp)
- mulq 248(%rsp)
- # qhasm: carry? zp3 += mulrax
- # asm 1: add <mulrax=int64#7,<zp3=int64#10
- # asm 2: add <mulrax=%rax,<zp3=%r12
- add %rax,%r12
- # qhasm: mulr31 += mulrdx + carry
- # asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
- # asm 2: adc <mulrdx=%rdx,<mulr31=%r13
- adc %rdx,%r13
- # qhasm: mulredmask = *(uint64 *) &crypto_scalarmult_curve25519_amd64_51_REDMASK51
- # asm 1: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>mulredmask=int64#3
- # asm 2: movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,>mulredmask=%rdx
- movq crypto_scalarmult_curve25519_amd64_51_REDMASK51,%rdx
- # qhasm: mulr01 = (mulr01.zp0) << 13
- # asm 1: shld $13,<zp0=int64#2,<mulr01=int64#4
- # asm 2: shld $13,<zp0=%rsi,<mulr01=%rcx
- shld $13,%rsi,%rcx
- # qhasm: zp0 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zp0=int64#2
- # asm 2: and <mulredmask=%rdx,<zp0=%rsi
- and %rdx,%rsi
- # qhasm: mulr11 = (mulr11.zp1) << 13
- # asm 1: shld $13,<zp1=int64#5,<mulr11=int64#6
- # asm 2: shld $13,<zp1=%r8,<mulr11=%r9
- shld $13,%r8,%r9
- # qhasm: zp1 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zp1=int64#5
- # asm 2: and <mulredmask=%rdx,<zp1=%r8
- and %rdx,%r8
- # qhasm: zp1 += mulr01
- # asm 1: add <mulr01=int64#4,<zp1=int64#5
- # asm 2: add <mulr01=%rcx,<zp1=%r8
- add %rcx,%r8
- # qhasm: mulr21 = (mulr21.zp2) << 13
- # asm 1: shld $13,<zp2=int64#8,<mulr21=int64#9
- # asm 2: shld $13,<zp2=%r10,<mulr21=%r11
- shld $13,%r10,%r11
- # qhasm: zp2 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zp2=int64#8
- # asm 2: and <mulredmask=%rdx,<zp2=%r10
- and %rdx,%r10
- # qhasm: zp2 += mulr11
- # asm 1: add <mulr11=int64#6,<zp2=int64#8
- # asm 2: add <mulr11=%r9,<zp2=%r10
- add %r9,%r10
- # qhasm: mulr31 = (mulr31.zp3) << 13
- # asm 1: shld $13,<zp3=int64#10,<mulr31=int64#11
- # asm 2: shld $13,<zp3=%r12,<mulr31=%r13
- shld $13,%r12,%r13
- # qhasm: zp3 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zp3=int64#10
- # asm 2: and <mulredmask=%rdx,<zp3=%r12
- and %rdx,%r12
- # qhasm: zp3 += mulr21
- # asm 1: add <mulr21=int64#9,<zp3=int64#10
- # asm 2: add <mulr21=%r11,<zp3=%r12
- add %r11,%r12
- # qhasm: mulr41 = (mulr41.zp4) << 13
- # asm 1: shld $13,<zp4=int64#12,<mulr41=int64#13
- # asm 2: shld $13,<zp4=%r14,<mulr41=%r15
- shld $13,%r14,%r15
- # qhasm: zp4 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zp4=int64#12
- # asm 2: and <mulredmask=%rdx,<zp4=%r14
- and %rdx,%r14
- # qhasm: zp4 += mulr31
- # asm 1: add <mulr31=int64#11,<zp4=int64#12
- # asm 2: add <mulr31=%r13,<zp4=%r14
- add %r13,%r14
- # qhasm: mulr41 = mulr41 * 19
- # asm 1: imulq $19,<mulr41=int64#13,>mulr41=int64#4
- # asm 2: imulq $19,<mulr41=%r15,>mulr41=%rcx
- imulq $19,%r15,%rcx
- # qhasm: zp0 += mulr41
- # asm 1: add <mulr41=int64#4,<zp0=int64#2
- # asm 2: add <mulr41=%rcx,<zp0=%rsi
- add %rcx,%rsi
- # qhasm: mult = zp0
- # asm 1: mov <zp0=int64#2,>mult=int64#4
- # asm 2: mov <zp0=%rsi,>mult=%rcx
- mov %rsi,%rcx
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: mult += zp1
- # asm 1: add <zp1=int64#5,<mult=int64#4
- # asm 2: add <zp1=%r8,<mult=%rcx
- add %r8,%rcx
- # qhasm: zp1 = mult
- # asm 1: mov <mult=int64#4,>zp1=int64#5
- # asm 2: mov <mult=%rcx,>zp1=%r8
- mov %rcx,%r8
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: zp0 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zp0=int64#2
- # asm 2: and <mulredmask=%rdx,<zp0=%rsi
- and %rdx,%rsi
- # qhasm: mult += zp2
- # asm 1: add <zp2=int64#8,<mult=int64#4
- # asm 2: add <zp2=%r10,<mult=%rcx
- add %r10,%rcx
- # qhasm: zp2 = mult
- # asm 1: mov <mult=int64#4,>zp2=int64#6
- # asm 2: mov <mult=%rcx,>zp2=%r9
- mov %rcx,%r9
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: zp1 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zp1=int64#5
- # asm 2: and <mulredmask=%rdx,<zp1=%r8
- and %rdx,%r8
- # qhasm: mult += zp3
- # asm 1: add <zp3=int64#10,<mult=int64#4
- # asm 2: add <zp3=%r12,<mult=%rcx
- add %r12,%rcx
- # qhasm: zp3 = mult
- # asm 1: mov <mult=int64#4,>zp3=int64#7
- # asm 2: mov <mult=%rcx,>zp3=%rax
- mov %rcx,%rax
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: zp2 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zp2=int64#6
- # asm 2: and <mulredmask=%rdx,<zp2=%r9
- and %rdx,%r9
- # qhasm: mult += zp4
- # asm 1: add <zp4=int64#12,<mult=int64#4
- # asm 2: add <zp4=%r14,<mult=%rcx
- add %r14,%rcx
- # qhasm: zp4 = mult
- # asm 1: mov <mult=int64#4,>zp4=int64#8
- # asm 2: mov <mult=%rcx,>zp4=%r10
- mov %rcx,%r10
- # qhasm: (uint64) mult >>= 51
- # asm 1: shr $51,<mult=int64#4
- # asm 2: shr $51,<mult=%rcx
- shr $51,%rcx
- # qhasm: zp3 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zp3=int64#7
- # asm 2: and <mulredmask=%rdx,<zp3=%rax
- and %rdx,%rax
- # qhasm: mult *= 19
- # asm 1: imulq $19,<mult=int64#4,>mult=int64#4
- # asm 2: imulq $19,<mult=%rcx,>mult=%rcx
- imulq $19,%rcx,%rcx
- # qhasm: zp0 += mult
- # asm 1: add <mult=int64#4,<zp0=int64#2
- # asm 2: add <mult=%rcx,<zp0=%rsi
- add %rcx,%rsi
- # qhasm: zp4 &= mulredmask
- # asm 1: and <mulredmask=int64#3,<zp4=int64#8
- # asm 2: and <mulredmask=%rdx,<zp4=%r10
- and %rdx,%r10
- # qhasm: *(uint64 *)(workp + 80) = zp0
- # asm 1: movq <zp0=int64#2,80(<workp=int64#1)
- # asm 2: movq <zp0=%rsi,80(<workp=%rdi)
- movq %rsi,80(%rdi)
- # qhasm: *(uint64 *)(workp + 88) = zp1
- # asm 1: movq <zp1=int64#5,88(<workp=int64#1)
- # asm 2: movq <zp1=%r8,88(<workp=%rdi)
- movq %r8,88(%rdi)
- # qhasm: *(uint64 *)(workp + 96) = zp2
- # asm 1: movq <zp2=int64#6,96(<workp=int64#1)
- # asm 2: movq <zp2=%r9,96(<workp=%rdi)
- movq %r9,96(%rdi)
- # qhasm: *(uint64 *)(workp + 104) = zp3
- # asm 1: movq <zp3=int64#7,104(<workp=int64#1)
- # asm 2: movq <zp3=%rax,104(<workp=%rdi)
- movq %rax,104(%rdi)
- # qhasm: *(uint64 *)(workp + 112) = zp4
- # asm 1: movq <zp4=int64#8,112(<workp=int64#1)
- # asm 2: movq <zp4=%r10,112(<workp=%rdi)
- movq %r10,112(%rdi)
- # qhasm: caller1 = caller1_stack
- # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
- # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
- movq 0(%rsp),%r11
- # qhasm: caller2 = caller2_stack
- # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
- # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
- movq 8(%rsp),%r12
- # qhasm: caller3 = caller3_stack
- # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
- # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
- movq 16(%rsp),%r13
- # qhasm: caller4 = caller4_stack
- # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
- # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
- movq 24(%rsp),%r14
- # qhasm: caller5 = caller5_stack
- # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
- # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
- movq 32(%rsp),%r15
- # qhasm: caller6 = caller6_stack
- # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
- # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
- movq 40(%rsp),%rbx
- # qhasm: caller7 = caller7_stack
- # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
- # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
- movq 48(%rsp),%rbp
- # qhasm: leave
- add %r11,%rsp
- mov %rdi,%rax
- mov %rsi,%rdx
- ret
|