scalarmult.s 372 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158
  1. # qhasm: int32 input_0
  2. # qhasm: int32 input_1
  3. # qhasm: int32 input_2
  4. # qhasm: int32 input_3
  5. # qhasm: stack32 input_4
  6. # qhasm: stack32 input_5
  7. # qhasm: stack32 input_6
  8. # qhasm: stack32 input_7
  9. # qhasm: int32 caller_r4
  10. # qhasm: int32 caller_r5
  11. # qhasm: int32 caller_r6
  12. # qhasm: int32 caller_r7
  13. # qhasm: int32 caller_r8
  14. # qhasm: int32 caller_r9
  15. # qhasm: int32 caller_r10
  16. # qhasm: int32 caller_r11
  17. # qhasm: int32 caller_r12
  18. # qhasm: int32 caller_r14
  19. # qhasm: reg128 caller_q4
  20. # qhasm: reg128 caller_q5
  21. # qhasm: reg128 caller_q6
  22. # qhasm: reg128 caller_q7
  23. # qhasm: startcode
  24. .fpu neon
  25. .text
  26. # qhasm: stack3072 playground1
  27. # qhasm: int32 playground1_ptr
  28. # qhasm: int32 i
  29. # qhasm: int32 j
  30. # qhasm: int32 ptr
  31. # qhasm: int32 swap
  32. # qhasm: int32 pos
  33. # qhasm: int32 bit
  34. # qhasm: int32 byte
  35. # qhasm: int32 word
  36. # qhasm: int32 pos8
  37. # qhasm: int32 pos7
  38. # qhasm: int32 mulsource
  39. # qhasm: int32 postcopy
  40. # qhasm: int32 q
  41. # qhasm: int32 p
  42. # qhasm: int32 n
  43. # qhasm: reg128 e0
  44. # qhasm: reg128 e4
  45. # qhasm: reg128 f0
  46. # qhasm: reg128 f4
  47. # qhasm: reg128 f8
  48. # qhasm: reg128 g0
  49. # qhasm: reg128 g4
  50. # qhasm: reg128 g8
  51. # qhasm: reg128 d0
  52. # qhasm: reg128 d4
  53. # qhasm: reg128 d8
  54. # qhasm: reg128 x0
  55. # qhasm: reg128 x4
  56. # qhasm: reg128 x8
  57. # qhasm: reg128 F0
  58. # qhasm: reg128 F4
  59. # qhasm: reg128 F8
  60. # qhasm: reg128 G0
  61. # qhasm: reg128 G4
  62. # qhasm: reg128 G8
  63. # qhasm: reg128 X0
  64. # qhasm: reg128 X4
  65. # qhasm: reg128 X8
  66. # qhasm: reg128 f0plusF0
  67. # qhasm: reg128 f0minusF0
  68. # qhasm: reg128 g0plusG0
  69. # qhasm: reg128 g0minusG0
  70. # qhasm: reg128 f4plusF4
  71. # qhasm: reg128 f4minusF4
  72. # qhasm: reg128 g4plusG4
  73. # qhasm: reg128 g4minusG4
  74. # qhasm: reg128 f8plusF8
  75. # qhasm: reg128 f8minusF8
  76. # qhasm: reg128 g8plusG8
  77. # qhasm: reg128 g8minusG8
  78. # qhasm: reg128 fg01
  79. # qhasm: reg128 fg23
  80. # qhasm: reg128 fg45
  81. # qhasm: reg128 fg67
  82. # qhasm: reg128 fg89
  83. # qhasm: reg128 fg01_2
  84. # qhasm: reg128 fg23_2
  85. # qhasm: reg128 fg45_2
  86. # qhasm: reg128 fg67_2
  87. # qhasm: reg128 fg45_19_38
  88. # qhasm: reg128 fg67_19_38
  89. # qhasm: reg128 fg89_19_38
  90. # qhasm: reg128 h0
  91. # qhasm: reg128 h1
  92. # qhasm: reg128 h2
  93. # qhasm: reg128 h3
  94. # qhasm: reg128 h4
  95. # qhasm: reg128 h5
  96. # qhasm: reg128 h6
  97. # qhasm: reg128 h7
  98. # qhasm: reg128 h8
  99. # qhasm: reg128 h9
  100. # qhasm: stack64 h0stack
  101. # qhasm: stack64 h1stack
  102. # qhasm: stack64 h2stack
  103. # qhasm: stack64 h3stack
  104. # qhasm: stack64 h4stack
  105. # qhasm: stack64 h5stack
  106. # qhasm: stack64 h6stack
  107. # qhasm: stack64 h7stack
  108. # qhasm: stack64 h8stack
  109. # qhasm: stack64 h9stack
  110. # qhasm: reg128 t0
  111. # qhasm: reg128 t1
  112. # qhasm: reg128 t2
  113. # qhasm: reg128 t3
  114. # qhasm: reg128 t4
  115. # qhasm: reg128 t5
  116. # qhasm: reg128 t6
  117. # qhasm: reg128 t7
  118. # qhasm: reg128 t8
  119. # qhasm: reg128 t9
  120. # qhasm: reg128 c0
  121. # qhasm: reg128 c1
  122. # qhasm: reg128 c2
  123. # qhasm: reg128 c3
  124. # qhasm: reg128 c4
  125. # qhasm: reg128 c5
  126. # qhasm: reg128 c6
  127. # qhasm: reg128 c7
  128. # qhasm: reg128 c8
  129. # qhasm: reg128 c9
  130. # qhasm: reg128 f02
  131. # qhasm: reg128 f13
  132. # qhasm: reg128 f46
  133. # qhasm: reg128 f57
  134. # qhasm: reg128 f89
  135. # qhasm: reg128 g02
  136. # qhasm: reg128 g13
  137. # qhasm: reg128 g46
  138. # qhasm: reg128 g57
  139. # qhasm: reg128 g89
  140. # qhasm: reg128 f13_2
  141. # qhasm: reg128 f57_2
  142. # qhasm: reg128 f89_2
  143. # qhasm: reg128 mix
  144. # qhasm: reg128 g13_19
  145. # qhasm: reg128 g46_19
  146. # qhasm: reg128 g57_19
  147. # qhasm: reg128 g89_19
  148. # qhasm: stack128 f13_2_stack
  149. # qhasm: stack128 f57_2_stack
  150. # qhasm: stack128 mix_stack
  151. # qhasm: stack128 g13_19_stack
  152. # qhasm: stack128 g46_19_stack
  153. # qhasm: stack128 g57_19_stack
  154. # qhasm: stack128 g89_19_stack
  155. # qhasm: stack128 h9_stack
  156. # qhasm: stack128 h7_stack
  157. # qhasm: stack128 h5_stack
  158. # qhasm: reg128 t
  159. # qhasm: reg128 s
  160. # qhasm: reg128 s2
  161. # qhasm: reg128 c
  162. # qhasm: reg128 mask26
  163. # qhasm: reg128 mask25
  164. # qhasm: reg128 _0x2000000
  165. # qhasm: reg128 _0x1000000
  166. # qhasm: reg128 _19_19_38_38
  167. # qhasm: stack128 _0x2000000_stack
  168. # qhasm: stack128 _0x1000000_stack
  169. # qhasm: stack128 _19_19_38_38_stack
  170. # qhasm: reg128 h02
  171. # qhasm: reg128 h24
  172. # qhasm: reg128 h46
  173. # qhasm: reg128 h68
  174. # qhasm: reg128 h80
  175. # qhasm: reg128 h31
  176. # qhasm: reg128 h53
  177. # qhasm: reg128 h75
  178. # qhasm: reg128 h97
  179. # qhasm: reg128 h19
  180. # qhasm: reg128 h04
  181. # qhasm: reg128 h15
  182. # qhasm: reg128 h26
  183. # qhasm: reg128 h37
  184. # qhasm: reg128 h48
  185. # qhasm: reg128 h59
  186. # qhasm: reg128 f0_f1_f2_f3
  187. # qhasm: reg128 f4_f5_f6_f7
  188. # qhasm: reg128 f8_f9_g8_g9
  189. # qhasm: reg128 19f8_19f9_19g8_19g9
  190. # qhasm: reg128 f8_2f9_g8_g9
  191. # qhasm: reg128 g0_g1_g2_g3
  192. # qhasm: reg128 g4_g5_g6_g7
  193. # qhasm: reg128 f0_2f1_f2_2f3
  194. # qhasm: reg128 f4_2f5_f6_2f7
  195. # qhasm: reg128 f8_2f9_f9_f6
  196. # qhasm: reg128 g0_19g1_g2_19g3
  197. # qhasm: reg128 19g0_19g1_19g2_19g3
  198. # qhasm: reg128 19g4_19g5_19g6_19g7
  199. # qhasm: reg128 g4_19g5_g6_19g7
  200. # qhasm: reg128 g8_19g9_19g8_19g9
  201. # qhasm: reg128 f1_f8_f3_f0
  202. # qhasm: reg128 f5_f2_f7_f4
  203. # qhasm: reg128 19g8_g9_19g2_g3
  204. # qhasm: reg128 19g4_g5_19g6_g7
  205. # qhasm: reg128 _19_19_19_19
  206. # qhasm: reg128 _0_1_0_1
  207. # qhasm: reg128 _1_1_1_1
  208. # qhasm: stack512 playground2
  209. # qhasm: int32 playp
  210. # qhasm: int32 binput
  211. # qhasm: reg128 b
  212. # qhasm: int32 pos0
  213. # qhasm: int32 pos1
  214. # qhasm: int32 pos2
  215. # qhasm: int32 pos3
  216. # qhasm: int32 posh
  217. # qhasm: int32 posf
  218. # qhasm: int32 posg
  219. # qhasm: int32 posH
  220. # qhasm: int32 posF
  221. # qhasm: int32 posG
  222. # qhasm: int32 posx
  223. # qhasm: int32 posy
  224. # qhasm: int32 out0
  225. # qhasm: int32 out1
  226. # qhasm: int32 out2
  227. # qhasm: int32 out3
  228. # qhasm: int32 out4
  229. # qhasm: int32 out5
  230. # qhasm: int32 out6
  231. # qhasm: int32 out7
  232. # qhasm: int32 out8
  233. # qhasm: int32 out9
  234. # qhasm: int32 carry0
  235. # qhasm: int32 carry1
  236. # qhasm: int32 carry2
  237. # qhasm: int32 carry3
  238. # qhasm: int32 carry4
  239. # qhasm: int32 carry5
  240. # qhasm: int32 carry6
  241. # qhasm: int32 carry7
  242. # qhasm: int32 carry8
  243. # qhasm: int32 carry9
  244. # qhasm: int32 carry
  245. # qhasm: reg128 zero
  246. # qhasm: reg128 one
  247. # qhasm: qpushenter crypto_scalarmult_curve25519_neon2
  248. .align 4
  249. .global _crypto_scalarmult_curve25519_neon2
  250. .global crypto_scalarmult_curve25519_neon2
  251. .type _crypto_scalarmult_curve25519_neon2 STT_FUNC
  252. .type crypto_scalarmult_curve25519_neon2 STT_FUNC
  253. _crypto_scalarmult_curve25519_neon2:
  254. crypto_scalarmult_curve25519_neon2:
  255. vpush {q4,q5,q6,q7}
  256. mov r12,sp
  257. sub sp,sp,#736
  258. and sp,sp,#0xffffffe0
  259. # qhasm: stack64 stack_r45
  260. # qhasm: stack64 stack_r67
  261. # qhasm: stack64 stack_r89
  262. # qhasm: stack64 stack_r1011
  263. # qhasm: stack32 stack_r12
  264. # qhasm: stack32 stack_r14
  265. # qhasm: assign r4 r5 to caller_r4 caller_r5;stack_r45 = caller_r4 caller_r5
  266. # asm 1: strd <caller_r4=int32#5,>stack_r45=stack64#1
  267. # asm 2: strd <caller_r4=r4,>stack_r45=[sp,#0]
  268. strd r4,[sp,#0]
  269. # qhasm: assign r6 r7 to caller_r6 caller_r7;stack_r67 = caller_r6 caller_r7
  270. # asm 1: strd <caller_r6=int32#7,>stack_r67=stack64#2
  271. # asm 2: strd <caller_r6=r6,>stack_r67=[sp,#8]
  272. strd r6,[sp,#8]
  273. # qhasm: assign r8 r9 to caller_r8 caller_r9;stack_r89 = caller_r8 caller_r9
  274. # asm 1: strd <caller_r8=int32#9,>stack_r89=stack64#3
  275. # asm 2: strd <caller_r8=r8,>stack_r89=[sp,#16]
  276. strd r8,[sp,#16]
  277. # qhasm: assign r10 r11 to caller_r10 caller_r11;stack_r1011 = caller_r10 caller_r11
  278. # asm 1: strd <caller_r10=int32#11,>stack_r1011=stack64#4
  279. # asm 2: strd <caller_r10=r10,>stack_r1011=[sp,#24]
  280. strd r10,[sp,#24]
  281. # qhasm: stack_r12 = caller_r12
  282. # asm 1: str <caller_r12=int32#13,>stack_r12=stack32#1
  283. # asm 2: str <caller_r12=r12,>stack_r12=[sp,#480]
  284. str r12,[sp,#480]
  285. # qhasm: stack_r14 = caller_r14
  286. # asm 1: str <caller_r14=int32#14,>stack_r14=stack32#2
  287. # asm 2: str <caller_r14=r14,>stack_r14=[sp,#484]
  288. str r14,[sp,#484]
  289. # qhasm: stack32 swap_stack
  290. # qhasm: stack32 pos_stack
  291. # qhasm: stack256 e
  292. # qhasm: int32 eptr
  293. # qhasm: q = input_0
  294. # asm 1: mov >q=int32#1,<input_0=int32#1
  295. # asm 2: mov >q=r0,<input_0=r0
  296. mov r0,r0
  297. # qhasm: n = input_1
  298. # asm 1: mov >n=int32#2,<input_1=int32#2
  299. # asm 2: mov >n=r1,<input_1=r1
  300. mov r1,r1
  301. # qhasm: p = input_2
  302. # asm 1: mov >p=int32#3,<input_2=int32#3
  303. # asm 2: mov >p=r2,<input_2=r2
  304. mov r2,r2
  305. # qhasm: playground1_ptr = &playground1
  306. # asm 1: lea >playground1_ptr=int32#4,<playground1=stack3072#1
  307. # asm 2: lea >playground1_ptr=r3,<playground1=[sp,#32]
  308. add r3,sp,#32
  309. # qhasm: swap = 0
  310. # asm 1: ldr >swap=int32#5,=0
  311. # asm 2: ldr >swap=r4,=0
  312. ldr r4,=0
  313. # qhasm: pos = 254
  314. # asm 1: ldr >pos=int32#6,=254
  315. # asm 2: ldr >pos=r5,=254
  316. ldr r5,=254
  317. # qhasm: 4x _0x1000000 = 1
  318. # asm 1: vmov.i32 >_0x1000000=reg128#1,#1
  319. # asm 2: vmov.i32 >_0x1000000=q0,#1
  320. vmov.i32 q0,#1
  321. # qhasm: 2x _0x2000000 = _0x1000000 unsigned>> 7
  322. # asm 1: vshr.u64 >_0x2000000=reg128#2,<_0x1000000=reg128#1,#7
  323. # asm 2: vshr.u64 >_0x2000000=q1,<_0x1000000=q0,#7
  324. vshr.u64 q1,q0,#7
  325. # qhasm: 2x _0x1000000 = _0x1000000 unsigned>> 8
  326. # asm 1: vshr.u64 >_0x1000000=reg128#1,<_0x1000000=reg128#1,#8
  327. # asm 2: vshr.u64 >_0x1000000=q0,<_0x1000000=q0,#8
  328. vshr.u64 q0,q0,#8
  329. # qhasm: new _19_19_38_38
  330. # qhasm: _19_19_38_38 = 19,19,_19_19_38_38[2,3]
  331. # asm 1: vmov.i32 <_19_19_38_38=reg128#3%bot,#19
  332. # asm 2: vmov.i32 <_19_19_38_38=d4,#19
  333. vmov.i32 d4,#19
  334. # qhasm: _19_19_38_38 = _19_19_38_38[0,1],38,38
  335. # asm 1: vmov.i32 <_19_19_38_38=reg128#3%top,#38
  336. # asm 2: vmov.i32 <_19_19_38_38=d5,#38
  337. vmov.i32 d5,#38
  338. # qhasm: ptr = &_0x2000000_stack
  339. # asm 1: lea >ptr=int32#7,<_0x2000000_stack=stack128#1
  340. # asm 2: lea >ptr=r6,<_0x2000000_stack=[sp,#512]
  341. add r6,sp,#512
  342. # qhasm: mem128[ptr] aligned= _0x2000000
  343. # asm 1: vst1.8 {<_0x2000000=reg128#2%bot-<_0x2000000=reg128#2%top},[<ptr=int32#7,: 128]
  344. # asm 2: vst1.8 {<_0x2000000=d2-<_0x2000000=d3},[<ptr=r6,: 128]
  345. vst1.8 {d2-d3},[r6,: 128]
  346. # qhasm: ptr = &_0x1000000_stack
  347. # asm 1: lea >ptr=int32#7,<_0x1000000_stack=stack128#2
  348. # asm 2: lea >ptr=r6,<_0x1000000_stack=[sp,#528]
  349. add r6,sp,#528
  350. # qhasm: mem128[ptr] aligned= _0x1000000
  351. # asm 1: vst1.8 {<_0x1000000=reg128#1%bot-<_0x1000000=reg128#1%top},[<ptr=int32#7,: 128]
  352. # asm 2: vst1.8 {<_0x1000000=d0-<_0x1000000=d1},[<ptr=r6,: 128]
  353. vst1.8 {d0-d1},[r6,: 128]
  354. # qhasm: ptr = &_19_19_38_38_stack
  355. # asm 1: lea >ptr=int32#7,<_19_19_38_38_stack=stack128#3
  356. # asm 2: lea >ptr=r6,<_19_19_38_38_stack=[sp,#544]
  357. add r6,sp,#544
  358. # qhasm: mem128[ptr] aligned= _19_19_38_38
  359. # asm 1: vst1.8 {<_19_19_38_38=reg128#3%bot-<_19_19_38_38=reg128#3%top},[<ptr=int32#7,: 128]
  360. # asm 2: vst1.8 {<_19_19_38_38=d4-<_19_19_38_38=d5},[<ptr=r6,: 128]
  361. vst1.8 {d4-d5},[r6,: 128]
  362. # qhasm: posx = playground1_ptr + 0
  363. # asm 1: add >posx=int32#7,<playground1_ptr=int32#4,#0
  364. # asm 2: add >posx=r6,<playground1_ptr=r3,#0
  365. add r6,r3,#0
  366. # qhasm: 4x zero = 0
  367. # asm 1: vmov.i32 >zero=reg128#3,#0
  368. # asm 2: vmov.i32 >zero=q2,#0
  369. vmov.i32 q2,#0
  370. # qhasm: mem128[posx] aligned= zero;posx += 16
  371. # asm 1: vst1.8 {<zero=reg128#3%bot-<zero=reg128#3%top},[<posx=int32#7,: 128]!
  372. # asm 2: vst1.8 {<zero=d4-<zero=d5},[<posx=r6,: 128]!
  373. vst1.8 {d4-d5},[r6,: 128]!
  374. # qhasm: mem128[posx] aligned= zero;posx += 16
  375. # asm 1: vst1.8 {<zero=reg128#3%bot-<zero=reg128#3%top},[<posx=int32#7,: 128]!
  376. # asm 2: vst1.8 {<zero=d4-<zero=d5},[<posx=r6,: 128]!
  377. vst1.8 {d4-d5},[r6,: 128]!
  378. # qhasm: mem64[posx] aligned= zero[0]
  379. # asm 1: vst1.8 <zero=reg128#3%bot,[<posx=int32#7,: 64]
  380. # asm 2: vst1.8 <zero=d4,[<posx=r6,: 64]
  381. vst1.8 d4,[r6,: 64]
  382. # qhasm: ptr = playground1_ptr + 0
  383. # asm 1: add >ptr=int32#7,<playground1_ptr=int32#4,#0
  384. # asm 2: add >ptr=r6,<playground1_ptr=r3,#0
  385. add r6,r3,#0
  386. # qhasm: word = 960
  387. # asm 1: ldr >word=int32#8,=960
  388. # asm 2: ldr >word=r7,=960
  389. ldr r7,=960
  390. # qhasm: word = word - 2
  391. # asm 1: sub >word=int32#8,<word=int32#8,#2
  392. # asm 2: sub >word=r7,<word=r7,#2
  393. sub r7,r7,#2
  394. # qhasm: word = -word
  395. # asm 1: neg >word=int32#8,<word=int32#8
  396. # asm 2: neg >word=r7,<word=r7
  397. neg r7,r7
  398. # qhasm: word = word - (word << 7)
  399. # asm 1: sub >word=int32#8,<word=int32#8,<word=int32#8,LSL #7
  400. # asm 2: sub >word=r7,<word=r7,<word=r7,LSL #7
  401. sub r7,r7,r7,LSL #7
  402. # qhasm: mem32[ptr] = word
  403. # asm 1: str <word=int32#8,[<ptr=int32#7]
  404. # asm 2: str <word=r7,[<ptr=r6]
  405. str r7,[r6]
  406. # qhasm: eptr = &e
  407. # asm 1: lea >eptr=int32#7,<e=stack256#1
  408. # asm 2: lea >eptr=r6,<e=[sp,#704]
  409. add r6,sp,#704
  410. # qhasm: e0 = mem128[n];n += 16
  411. # asm 1: vld1.8 {>e0=reg128#3%bot->e0=reg128#3%top},[<n=int32#2]!
  412. # asm 2: vld1.8 {>e0=d4->e0=d5},[<n=r1]!
  413. vld1.8 {d4-d5},[r1]!
  414. # qhasm: e4 = mem128[n]
  415. # asm 1: vld1.8 {>e4=reg128#4%bot->e4=reg128#4%top},[<n=int32#2]
  416. # asm 2: vld1.8 {>e4=d6->e4=d7},[<n=r1]
  417. vld1.8 {d6-d7},[r1]
  418. # qhasm: mem128[eptr] aligned= e0;eptr += 16
  419. # asm 1: vst1.8 {<e0=reg128#3%bot-<e0=reg128#3%top},[<eptr=int32#7,: 128]!
  420. # asm 2: vst1.8 {<e0=d4-<e0=d5},[<eptr=r6,: 128]!
  421. vst1.8 {d4-d5},[r6,: 128]!
  422. # qhasm: mem128[eptr] aligned= e4
  423. # asm 1: vst1.8 {<e4=reg128#4%bot-<e4=reg128#4%top},[<eptr=int32#7,: 128]
  424. # asm 2: vst1.8 {<e4=d6-<e4=d7},[<eptr=r6,: 128]
  425. vst1.8 {d6-d7},[r6,: 128]
  426. # qhasm: eptr -= 16
  427. # asm 1: sub >eptr=int32#2,<eptr=int32#7,#16
  428. # asm 2: sub >eptr=r1,<eptr=r6,#16
  429. sub r1,r6,#16
  430. # qhasm: byte = mem8[eptr]
  431. # asm 1: ldrb >byte=int32#7,[<eptr=int32#2]
  432. # asm 2: ldrb >byte=r6,[<eptr=r1]
  433. ldrb r6,[r1]
  434. # qhasm: byte &= 248
  435. # asm 1: and >byte=int32#7,<byte=int32#7,#248
  436. # asm 2: and >byte=r6,<byte=r6,#248
  437. and r6,r6,#248
  438. # qhasm: mem8[eptr] = byte
  439. # asm 1: strb <byte=int32#7,[<eptr=int32#2]
  440. # asm 2: strb <byte=r6,[<eptr=r1]
  441. strb r6,[r1]
  442. # qhasm: byte = mem8[eptr + 31]
  443. # asm 1: ldrb >byte=int32#7,[<eptr=int32#2,#31]
  444. # asm 2: ldrb >byte=r6,[<eptr=r1,#31]
  445. ldrb r6,[r1,#31]
  446. # qhasm: byte &= 127
  447. # asm 1: and >byte=int32#7,<byte=int32#7,#127
  448. # asm 2: and >byte=r6,<byte=r6,#127
  449. and r6,r6,#127
  450. # qhasm: byte |= 64
  451. # asm 1: orr >byte=int32#7,<byte=int32#7,#64
  452. # asm 2: orr >byte=r6,<byte=r6,#64
  453. orr r6,r6,#64
  454. # qhasm: mem8[eptr + 31] = byte
  455. # asm 1: strb <byte=int32#7,[<eptr=int32#2,#31]
  456. # asm 2: strb <byte=r6,[<eptr=r1,#31]
  457. strb r6,[r1,#31]
  458. # qhasm: 2x mask26 = 0xffffffff
  459. # asm 1: vmov.i64 >mask26=reg128#3,#0xffffffff
  460. # asm 2: vmov.i64 >mask26=q2,#0xffffffff
  461. vmov.i64 q2,#0xffffffff
  462. # qhasm: 2x mask25 = mask26 unsigned>> 7
  463. # asm 1: vshr.u64 >mask25=reg128#4,<mask26=reg128#3,#7
  464. # asm 2: vshr.u64 >mask25=q3,<mask26=q2,#7
  465. vshr.u64 q3,q2,#7
  466. # qhasm: 2x mask26 = mask26 unsigned>> 6
  467. # asm 1: vshr.u64 >mask26=reg128#3,<mask26=reg128#3,#6
  468. # asm 2: vshr.u64 >mask26=q2,<mask26=q2,#6
  469. vshr.u64 q2,q2,#6
  470. # qhasm: new h0
  471. # qhasm: new h1
  472. # qhasm: new h2
  473. # qhasm: new h3
  474. # qhasm: new h4
  475. # qhasm: new h5
  476. # qhasm: new h6
  477. # qhasm: new h7
  478. # qhasm: new h8
  479. # qhasm: new h9
  480. # qhasm: h0 = mem64[p] h0[1]
  481. # asm 1: vld1.8 {<h0=reg128#5%bot},[<p=int32#3]
  482. # asm 2: vld1.8 {<h0=d8},[<p=r2]
  483. vld1.8 {d8},[r2]
  484. # qhasm: h1 = mem64[p] h1[1]
  485. # asm 1: vld1.8 {<h1=reg128#6%bot},[<p=int32#3]
  486. # asm 2: vld1.8 {<h1=d10},[<p=r2]
  487. vld1.8 {d10},[r2]
  488. # qhasm: p += 6
  489. # asm 1: add >p=int32#3,<p=int32#3,#6
  490. # asm 2: add >p=r2,<p=r2,#6
  491. add r2,r2,#6
  492. # qhasm: h2 = mem64[p] h2[1]
  493. # asm 1: vld1.8 {<h2=reg128#7%bot},[<p=int32#3]
  494. # asm 2: vld1.8 {<h2=d12},[<p=r2]
  495. vld1.8 {d12},[r2]
  496. # qhasm: h3 = mem64[p] h3[1]
  497. # asm 1: vld1.8 {<h3=reg128#8%bot},[<p=int32#3]
  498. # asm 2: vld1.8 {<h3=d14},[<p=r2]
  499. vld1.8 {d14},[r2]
  500. # qhasm: p += 6
  501. # asm 1: add >p=int32#3,<p=int32#3,#6
  502. # asm 2: add >p=r2,<p=r2,#6
  503. add r2,r2,#6
  504. # qhasm: h4 = mem64[p] h4[1]
  505. # asm 1: vld1.8 {<h4=reg128#9%bot},[<p=int32#3]
  506. # asm 2: vld1.8 {<h4=d16},[<p=r2]
  507. vld1.8 {d16},[r2]
  508. # qhasm: p += 4
  509. # asm 1: add >p=int32#3,<p=int32#3,#4
  510. # asm 2: add >p=r2,<p=r2,#4
  511. add r2,r2,#4
  512. # qhasm: h5 = mem64[p] h5[1]
  513. # asm 1: vld1.8 {<h5=reg128#10%bot},[<p=int32#3]
  514. # asm 2: vld1.8 {<h5=d18},[<p=r2]
  515. vld1.8 {d18},[r2]
  516. # qhasm: h6 = mem64[p] h6[1]
  517. # asm 1: vld1.8 {<h6=reg128#11%bot},[<p=int32#3]
  518. # asm 2: vld1.8 {<h6=d20},[<p=r2]
  519. vld1.8 {d20},[r2]
  520. # qhasm: p += 6
  521. # asm 1: add >p=int32#3,<p=int32#3,#6
  522. # asm 2: add >p=r2,<p=r2,#6
  523. add r2,r2,#6
  524. # qhasm: h7 = mem64[p] h7[1]
  525. # asm 1: vld1.8 {<h7=reg128#12%bot},[<p=int32#3]
  526. # asm 2: vld1.8 {<h7=d22},[<p=r2]
  527. vld1.8 {d22},[r2]
  528. # qhasm: p += 2
  529. # asm 1: add >p=int32#3,<p=int32#3,#2
  530. # asm 2: add >p=r2,<p=r2,#2
  531. add r2,r2,#2
  532. # qhasm: h8 = mem64[p] h8[1]
  533. # asm 1: vld1.8 {<h8=reg128#13%bot},[<p=int32#3]
  534. # asm 2: vld1.8 {<h8=d24},[<p=r2]
  535. vld1.8 {d24},[r2]
  536. # qhasm: h9 = mem64[p] h9[1]
  537. # asm 1: vld1.8 {<h9=reg128#14%bot},[<p=int32#3]
  538. # asm 2: vld1.8 {<h9=d26},[<p=r2]
  539. vld1.8 {d26},[r2]
  540. # qhasm: 2x h1 unsigned>>= 26
  541. # asm 1: vshr.u64 >h1=reg128#6,<h1=reg128#6,#26
  542. # asm 2: vshr.u64 >h1=q5,<h1=q5,#26
  543. vshr.u64 q5,q5,#26
  544. # qhasm: 2x h2 unsigned>>= 3
  545. # asm 1: vshr.u64 >h2=reg128#7,<h2=reg128#7,#3
  546. # asm 2: vshr.u64 >h2=q6,<h2=q6,#3
  547. vshr.u64 q6,q6,#3
  548. # qhasm: 2x h3 unsigned>>= 29
  549. # asm 1: vshr.u64 >h3=reg128#8,<h3=reg128#8,#29
  550. # asm 2: vshr.u64 >h3=q7,<h3=q7,#29
  551. vshr.u64 q7,q7,#29
  552. # qhasm: 2x h4 unsigned>>= 6
  553. # asm 1: vshr.u64 >h4=reg128#9,<h4=reg128#9,#6
  554. # asm 2: vshr.u64 >h4=q8,<h4=q8,#6
  555. vshr.u64 q8,q8,#6
  556. # qhasm: 2x h6 unsigned>>= 25
  557. # asm 1: vshr.u64 >h6=reg128#11,<h6=reg128#11,#25
  558. # asm 2: vshr.u64 >h6=q10,<h6=q10,#25
  559. vshr.u64 q10,q10,#25
  560. # qhasm: 2x h7 unsigned>>= 3
  561. # asm 1: vshr.u64 >h7=reg128#12,<h7=reg128#12,#3
  562. # asm 2: vshr.u64 >h7=q11,<h7=q11,#3
  563. vshr.u64 q11,q11,#3
  564. # qhasm: 2x h8 unsigned>>= 12
  565. # asm 1: vshr.u64 >h8=reg128#13,<h8=reg128#13,#12
  566. # asm 2: vshr.u64 >h8=q12,<h8=q12,#12
  567. vshr.u64 q12,q12,#12
  568. # qhasm: 2x h9 unsigned>>= 38
  569. # asm 1: vshr.u64 >h9=reg128#14,<h9=reg128#14,#38
  570. # asm 2: vshr.u64 >h9=q13,<h9=q13,#38
  571. vshr.u64 q13,q13,#38
  572. # qhasm: h0 &= mask26
  573. # asm 1: vand >h0=reg128#5,<h0=reg128#5,<mask26=reg128#3
  574. # asm 2: vand >h0=q4,<h0=q4,<mask26=q2
  575. vand q4,q4,q2
  576. # qhasm: h2 &= mask26
  577. # asm 1: vand >h2=reg128#7,<h2=reg128#7,<mask26=reg128#3
  578. # asm 2: vand >h2=q6,<h2=q6,<mask26=q2
  579. vand q6,q6,q2
  580. # qhasm: h4 &= mask26
  581. # asm 1: vand >h4=reg128#9,<h4=reg128#9,<mask26=reg128#3
  582. # asm 2: vand >h4=q8,<h4=q8,<mask26=q2
  583. vand q8,q8,q2
  584. # qhasm: h6 &= mask26
  585. # asm 1: vand >h6=reg128#11,<h6=reg128#11,<mask26=reg128#3
  586. # asm 2: vand >h6=q10,<h6=q10,<mask26=q2
  587. vand q10,q10,q2
  588. # qhasm: h8 &= mask26
  589. # asm 1: vand >h8=reg128#3,<h8=reg128#13,<mask26=reg128#3
  590. # asm 2: vand >h8=q2,<h8=q12,<mask26=q2
  591. vand q2,q12,q2
  592. # qhasm: h1 &= mask25
  593. # asm 1: vand >h1=reg128#6,<h1=reg128#6,<mask25=reg128#4
  594. # asm 2: vand >h1=q5,<h1=q5,<mask25=q3
  595. vand q5,q5,q3
  596. # qhasm: h3 &= mask25
  597. # asm 1: vand >h3=reg128#8,<h3=reg128#8,<mask25=reg128#4
  598. # asm 2: vand >h3=q7,<h3=q7,<mask25=q3
  599. vand q7,q7,q3
  600. # qhasm: h5 &= mask25
  601. # asm 1: vand >h5=reg128#10,<h5=reg128#10,<mask25=reg128#4
  602. # asm 2: vand >h5=q9,<h5=q9,<mask25=q3
  603. vand q9,q9,q3
  604. # qhasm: h7 &= mask25
  605. # asm 1: vand >h7=reg128#12,<h7=reg128#12,<mask25=reg128#4
  606. # asm 2: vand >h7=q11,<h7=q11,<mask25=q3
  607. vand q11,q11,q3
  608. # qhasm: h9 &= mask25
  609. # asm 1: vand >h9=reg128#4,<h9=reg128#14,<mask25=reg128#4
  610. # asm 2: vand >h9=q3,<h9=q13,<mask25=q3
  611. vand q3,q13,q3
  612. # qhasm: posh = playground1_ptr + 48
  613. # asm 1: add >posh=int32#3,<playground1_ptr=int32#4,#48
  614. # asm 2: add >posh=r2,<playground1_ptr=r3,#48
  615. add r2,r3,#48
  616. # qhasm: 2x t0 = h0 + _0x2000000
  617. # asm 1: vadd.i64 >t0=reg128#13,<h0=reg128#5,<_0x2000000=reg128#2
  618. # asm 2: vadd.i64 >t0=q12,<h0=q4,<_0x2000000=q1
  619. vadd.i64 q12,q4,q1
  620. # qhasm: 2x t6 = h6 + _0x2000000
  621. # asm 1: vadd.i64 >t6=reg128#14,<h6=reg128#11,<_0x2000000=reg128#2
  622. # asm 2: vadd.i64 >t6=q13,<h6=q10,<_0x2000000=q1
  623. vadd.i64 q13,q10,q1
  624. # qhasm: 2x c0 = t0 signed>> 26
  625. # asm 1: vshr.s64 >c0=reg128#13,<t0=reg128#13,#26
  626. # asm 2: vshr.s64 >c0=q12,<t0=q12,#26
  627. vshr.s64 q12,q12,#26
  628. # qhasm: 2x c6 = t6 signed>> 26
  629. # asm 1: vshr.s64 >c6=reg128#14,<t6=reg128#14,#26
  630. # asm 2: vshr.s64 >c6=q13,<t6=q13,#26
  631. vshr.s64 q13,q13,#26
  632. # qhasm: 2x h1 += c0
  633. # asm 1: vadd.i64 >h1=reg128#6,<h1=reg128#6,<c0=reg128#13
  634. # asm 2: vadd.i64 >h1=q5,<h1=q5,<c0=q12
  635. vadd.i64 q5,q5,q12
  636. # qhasm: 2x t0 = c0 << 26
  637. # asm 1: vshl.i64 >t0=reg128#13,<c0=reg128#13,#26
  638. # asm 2: vshl.i64 >t0=q12,<c0=q12,#26
  639. vshl.i64 q12,q12,#26
  640. # qhasm: 2x t1 = h1 + _0x1000000
  641. # asm 1: vadd.i64 >t1=reg128#15,<h1=reg128#6,<_0x1000000=reg128#1
  642. # asm 2: vadd.i64 >t1=q14,<h1=q5,<_0x1000000=q0
  643. vadd.i64 q14,q5,q0
  644. # qhasm: 2x h7 += c6
  645. # asm 1: vadd.i64 >h7=reg128#12,<h7=reg128#12,<c6=reg128#14
  646. # asm 2: vadd.i64 >h7=q11,<h7=q11,<c6=q13
  647. vadd.i64 q11,q11,q13
  648. # qhasm: 2x t6 = c6 << 26
  649. # asm 1: vshl.i64 >t6=reg128#14,<c6=reg128#14,#26
  650. # asm 2: vshl.i64 >t6=q13,<c6=q13,#26
  651. vshl.i64 q13,q13,#26
  652. # qhasm: 2x t7 = h7 + _0x1000000
  653. # asm 1: vadd.i64 >t7=reg128#16,<h7=reg128#12,<_0x1000000=reg128#1
  654. # asm 2: vadd.i64 >t7=q15,<h7=q11,<_0x1000000=q0
  655. vadd.i64 q15,q11,q0
  656. # qhasm: 2x h0 -= t0
  657. # asm 1: vsub.i64 >h0=reg128#5,<h0=reg128#5,<t0=reg128#13
  658. # asm 2: vsub.i64 >h0=q4,<h0=q4,<t0=q12
  659. vsub.i64 q4,q4,q12
  660. # qhasm: 2x c1 = t1 signed>> 25
  661. # asm 1: vshr.s64 >c1=reg128#13,<t1=reg128#15,#25
  662. # asm 2: vshr.s64 >c1=q12,<t1=q14,#25
  663. vshr.s64 q12,q14,#25
  664. # qhasm: 2x h6 -= t6
  665. # asm 1: vsub.i64 >h6=reg128#11,<h6=reg128#11,<t6=reg128#14
  666. # asm 2: vsub.i64 >h6=q10,<h6=q10,<t6=q13
  667. vsub.i64 q10,q10,q13
  668. # qhasm: 2x c7 = t7 signed>> 25
  669. # asm 1: vshr.s64 >c7=reg128#14,<t7=reg128#16,#25
  670. # asm 2: vshr.s64 >c7=q13,<t7=q15,#25
  671. vshr.s64 q13,q15,#25
  672. # qhasm: 2x h2 += c1
  673. # asm 1: vadd.i64 >h2=reg128#7,<h2=reg128#7,<c1=reg128#13
  674. # asm 2: vadd.i64 >h2=q6,<h2=q6,<c1=q12
  675. vadd.i64 q6,q6,q12
  676. # qhasm: 2x t1 = c1 << 25
  677. # asm 1: vshl.i64 >t1=reg128#13,<c1=reg128#13,#25
  678. # asm 2: vshl.i64 >t1=q12,<c1=q12,#25
  679. vshl.i64 q12,q12,#25
  680. # qhasm: 2x t2 = h2 + _0x2000000
  681. # asm 1: vadd.i64 >t2=reg128#15,<h2=reg128#7,<_0x2000000=reg128#2
  682. # asm 2: vadd.i64 >t2=q14,<h2=q6,<_0x2000000=q1
  683. vadd.i64 q14,q6,q1
  684. # qhasm: 2x h8 += c7
  685. # asm 1: vadd.i64 >h8=reg128#3,<h8=reg128#3,<c7=reg128#14
  686. # asm 2: vadd.i64 >h8=q2,<h8=q2,<c7=q13
  687. vadd.i64 q2,q2,q13
  688. # qhasm: 2x h1 -= t1
  689. # asm 1: vsub.i64 >h1=reg128#6,<h1=reg128#6,<t1=reg128#13
  690. # asm 2: vsub.i64 >h1=q5,<h1=q5,<t1=q12
  691. vsub.i64 q5,q5,q12
  692. # qhasm: 2x c2 = t2 signed>> 26
  693. # asm 1: vshr.s64 >c2=reg128#13,<t2=reg128#15,#26
  694. # asm 2: vshr.s64 >c2=q12,<t2=q14,#26
  695. vshr.s64 q12,q14,#26
  696. # qhasm: 2x t7 = c7 << 25
  697. # asm 1: vshl.i64 >t7=reg128#14,<c7=reg128#14,#25
  698. # asm 2: vshl.i64 >t7=q13,<c7=q13,#25
  699. vshl.i64 q13,q13,#25
  700. # qhasm: 2x t8 = h8 + _0x2000000
  701. # asm 1: vadd.i64 >t8=reg128#15,<h8=reg128#3,<_0x2000000=reg128#2
  702. # asm 2: vadd.i64 >t8=q14,<h8=q2,<_0x2000000=q1
  703. vadd.i64 q14,q2,q1
  704. # qhasm: 2x h3 += c2
  705. # asm 1: vadd.i64 >h3=reg128#8,<h3=reg128#8,<c2=reg128#13
  706. # asm 2: vadd.i64 >h3=q7,<h3=q7,<c2=q12
  707. vadd.i64 q7,q7,q12
  708. # qhasm: 2x t2 = c2 << 26
  709. # asm 1: vshl.i64 >t2=reg128#13,<c2=reg128#13,#26
  710. # asm 2: vshl.i64 >t2=q12,<c2=q12,#26
  711. vshl.i64 q12,q12,#26
  712. # qhasm: 2x t3 = h3 + _0x1000000
  713. # asm 1: vadd.i64 >t3=reg128#16,<h3=reg128#8,<_0x1000000=reg128#1
  714. # asm 2: vadd.i64 >t3=q15,<h3=q7,<_0x1000000=q0
  715. vadd.i64 q15,q7,q0
  716. # qhasm: 2x h7 -= t7
  717. # asm 1: vsub.i64 >h7=reg128#12,<h7=reg128#12,<t7=reg128#14
  718. # asm 2: vsub.i64 >h7=q11,<h7=q11,<t7=q13
  719. vsub.i64 q11,q11,q13
  720. # qhasm: 2x c8 = t8 signed>> 26
  721. # asm 1: vshr.s64 >c8=reg128#14,<t8=reg128#15,#26
  722. # asm 2: vshr.s64 >c8=q13,<t8=q14,#26
  723. vshr.s64 q13,q14,#26
  724. # qhasm: 2x h2 -= t2
  725. # asm 1: vsub.i64 >h2=reg128#7,<h2=reg128#7,<t2=reg128#13
  726. # asm 2: vsub.i64 >h2=q6,<h2=q6,<t2=q12
  727. vsub.i64 q6,q6,q12
  728. # qhasm: 2x c3 = t3 signed>> 25
  729. # asm 1: vshr.s64 >c3=reg128#13,<t3=reg128#16,#25
  730. # asm 2: vshr.s64 >c3=q12,<t3=q15,#25
  731. vshr.s64 q12,q15,#25
  732. # qhasm: 2x h9 += c8
  733. # asm 1: vadd.i64 >h9=reg128#4,<h9=reg128#4,<c8=reg128#14
  734. # asm 2: vadd.i64 >h9=q3,<h9=q3,<c8=q13
  735. vadd.i64 q3,q3,q13
  736. # qhasm: 2x t8 = c8 << 26
  737. # asm 1: vshl.i64 >t8=reg128#14,<c8=reg128#14,#26
  738. # asm 2: vshl.i64 >t8=q13,<c8=q13,#26
  739. vshl.i64 q13,q13,#26
  740. # qhasm: 2x t9 = h9 + _0x1000000
  741. # asm 1: vadd.i64 >t9=reg128#15,<h9=reg128#4,<_0x1000000=reg128#1
  742. # asm 2: vadd.i64 >t9=q14,<h9=q3,<_0x1000000=q0
  743. vadd.i64 q14,q3,q0
  744. # qhasm: 2x h4 += c3
  745. # asm 1: vadd.i64 >h4=reg128#9,<h4=reg128#9,<c3=reg128#13
  746. # asm 2: vadd.i64 >h4=q8,<h4=q8,<c3=q12
  747. vadd.i64 q8,q8,q12
  748. # qhasm: 2x t3 = c3 << 25
  749. # asm 1: vshl.i64 >t3=reg128#13,<c3=reg128#13,#25
  750. # asm 2: vshl.i64 >t3=q12,<c3=q12,#25
  751. vshl.i64 q12,q12,#25
  752. # qhasm: 2x t4 = h4 + _0x2000000
  753. # asm 1: vadd.i64 >t4=reg128#16,<h4=reg128#9,<_0x2000000=reg128#2
  754. # asm 2: vadd.i64 >t4=q15,<h4=q8,<_0x2000000=q1
  755. vadd.i64 q15,q8,q1
  756. # qhasm: posh+=8
  757. # asm 1: add >posh=int32#3,<posh=int32#3,#8
  758. # asm 2: add >posh=r2,<posh=r2,#8
  759. add r2,r2,#8
  760. # qhasm: 2x h8 -= t8
  761. # asm 1: vsub.i64 >h8=reg128#3,<h8=reg128#3,<t8=reg128#14
  762. # asm 2: vsub.i64 >h8=q2,<h8=q2,<t8=q13
  763. vsub.i64 q2,q2,q13
  764. # qhasm: 2x c9 = t9 signed>> 25
  765. # asm 1: vshr.s64 >c9=reg128#14,<t9=reg128#15,#25
  766. # asm 2: vshr.s64 >c9=q13,<t9=q14,#25
  767. vshr.s64 q13,q14,#25
  768. # qhasm: 2x h3 -= t3
  769. # asm 1: vsub.i64 >h3=reg128#8,<h3=reg128#8,<t3=reg128#13
  770. # asm 2: vsub.i64 >h3=q7,<h3=q7,<t3=q12
  771. vsub.i64 q7,q7,q12
  772. # qhasm: 2x c4 = t4 signed>> 26
  773. # asm 1: vshr.s64 >c4=reg128#13,<t4=reg128#16,#26
  774. # asm 2: vshr.s64 >c4=q12,<t4=q15,#26
  775. vshr.s64 q12,q15,#26
  776. # qhasm: 2x s = c9 + c9
  777. # asm 1: vadd.i64 >s=reg128#15,<c9=reg128#14,<c9=reg128#14
  778. # asm 2: vadd.i64 >s=q14,<c9=q13,<c9=q13
  779. vadd.i64 q14,q13,q13
  780. # qhasm: 2x h5 += c4
  781. # asm 1: vadd.i64 >h5=reg128#10,<h5=reg128#10,<c4=reg128#13
  782. # asm 2: vadd.i64 >h5=q9,<h5=q9,<c4=q12
  783. vadd.i64 q9,q9,q12
  784. # qhasm: h2 h3 = h2[0]h3[0]h2[2]h2[3] h2[1]h3[1]h3[2]h3[3]
  785. # asm 1: vtrn.32 <h2=reg128#7%bot,<h3=reg128#8%bot
  786. # asm 2: vtrn.32 <h2=d12,<h3=d14
  787. vtrn.32 d12,d14
  788. # qhasm: 2x t4 = c4 << 26
  789. # asm 1: vshl.i64 >t4=reg128#13,<c4=reg128#13,#26
  790. # asm 2: vshl.i64 >t4=q12,<c4=q12,#26
  791. vshl.i64 q12,q12,#26
  792. # qhasm: h2 h3 = h2[0]h2[1]h2[2]h3[2] h3[0]h3[1]h2[3]h3[3]
  793. # asm 1: vtrn.32 <h2=reg128#7%top,<h3=reg128#8%top
  794. # asm 2: vtrn.32 <h2=d13,<h3=d15
  795. vtrn.32 d13,d15
  796. # qhasm: 2x t5 = h5 + _0x1000000
  797. # asm 1: vadd.i64 >t5=reg128#1,<h5=reg128#10,<_0x1000000=reg128#1
  798. # asm 2: vadd.i64 >t5=q0,<h5=q9,<_0x1000000=q0
  799. vadd.i64 q0,q9,q0
  800. # qhasm: 2x h0 += s
  801. # asm 1: vadd.i64 >h0=reg128#5,<h0=reg128#5,<s=reg128#15
  802. # asm 2: vadd.i64 >h0=q4,<h0=q4,<s=q14
  803. vadd.i64 q4,q4,q14
  804. # qhasm: mem64[posh] aligned= h2[0];posh+=8
  805. # asm 1: vst1.8 <h2=reg128#7%bot,[<posh=int32#3,: 64]!
  806. # asm 2: vst1.8 <h2=d12,[<posh=r2,: 64]!
  807. vst1.8 d12,[r2,: 64]!
  808. # qhasm: 2x s = c9 << 4
  809. # asm 1: vshl.i64 >s=reg128#7,<c9=reg128#14,#4
  810. # asm 2: vshl.i64 >s=q6,<c9=q13,#4
  811. vshl.i64 q6,q13,#4
  812. # qhasm: 2x h4 -= t4
  813. # asm 1: vsub.i64 >h4=reg128#8,<h4=reg128#9,<t4=reg128#13
  814. # asm 2: vsub.i64 >h4=q7,<h4=q8,<t4=q12
  815. vsub.i64 q7,q8,q12
  816. # qhasm: 2x c5 = t5 signed>> 25
  817. # asm 1: vshr.s64 >c5=reg128#1,<t5=reg128#1,#25
  818. # asm 2: vshr.s64 >c5=q0,<t5=q0,#25
  819. vshr.s64 q0,q0,#25
  820. # qhasm: 2x h0 += s
  821. # asm 1: vadd.i64 >h0=reg128#5,<h0=reg128#5,<s=reg128#7
  822. # asm 2: vadd.i64 >h0=q4,<h0=q4,<s=q6
  823. vadd.i64 q4,q4,q6
  824. # qhasm: 2x h6 += c5
  825. # asm 1: vadd.i64 >h6=reg128#7,<h6=reg128#11,<c5=reg128#1
  826. # asm 2: vadd.i64 >h6=q6,<h6=q10,<c5=q0
  827. vadd.i64 q6,q10,q0
  828. # qhasm: 2x t5 = c5 << 25
  829. # asm 1: vshl.i64 >t5=reg128#1,<c5=reg128#1,#25
  830. # asm 2: vshl.i64 >t5=q0,<c5=q0,#25
  831. vshl.i64 q0,q0,#25
  832. # qhasm: 2x t6 = h6 + _0x2000000
  833. # asm 1: vadd.i64 >t6=reg128#9,<h6=reg128#7,<_0x2000000=reg128#2
  834. # asm 2: vadd.i64 >t6=q8,<h6=q6,<_0x2000000=q1
  835. vadd.i64 q8,q6,q1
  836. # qhasm: 2x h0 += c9
  837. # asm 1: vadd.i64 >h0=reg128#5,<h0=reg128#5,<c9=reg128#14
  838. # asm 2: vadd.i64 >h0=q4,<h0=q4,<c9=q13
  839. vadd.i64 q4,q4,q13
  840. # qhasm: 2x t9 = c9 << 25
  841. # asm 1: vshl.i64 >t9=reg128#11,<c9=reg128#14,#25
  842. # asm 2: vshl.i64 >t9=q10,<c9=q13,#25
  843. vshl.i64 q10,q13,#25
  844. # qhasm: 2x t0 = h0 + _0x2000000
  845. # asm 1: vadd.i64 >t0=reg128#2,<h0=reg128#5,<_0x2000000=reg128#2
  846. # asm 2: vadd.i64 >t0=q1,<h0=q4,<_0x2000000=q1
  847. vadd.i64 q1,q4,q1
  848. # qhasm: 2x h5 -= t5
  849. # asm 1: vsub.i64 >h5=reg128#1,<h5=reg128#10,<t5=reg128#1
  850. # asm 2: vsub.i64 >h5=q0,<h5=q9,<t5=q0
  851. vsub.i64 q0,q9,q0
  852. # qhasm: 2x c6 = t6 signed>> 26
  853. # asm 1: vshr.s64 >c6=reg128#9,<t6=reg128#9,#26
  854. # asm 2: vshr.s64 >c6=q8,<t6=q8,#26
  855. vshr.s64 q8,q8,#26
  856. # qhasm: 2x h9 -= t9
  857. # asm 1: vsub.i64 >h9=reg128#4,<h9=reg128#4,<t9=reg128#11
  858. # asm 2: vsub.i64 >h9=q3,<h9=q3,<t9=q10
  859. vsub.i64 q3,q3,q10
  860. # qhasm: h4 h5 = h4[0]h5[0]h4[2]h4[3] h4[1]h5[1]h5[2]h5[3]
  861. # asm 1: vtrn.32 <h4=reg128#8%bot,<h5=reg128#1%bot
  862. # asm 2: vtrn.32 <h4=d14,<h5=d0
  863. vtrn.32 d14,d0
  864. # qhasm: 2x c0 = t0 signed>> 26
  865. # asm 1: vshr.s64 >c0=reg128#2,<t0=reg128#2,#26
  866. # asm 2: vshr.s64 >c0=q1,<t0=q1,#26
  867. vshr.s64 q1,q1,#26
  868. # qhasm: h4 h5 = h4[0]h4[1]h4[2]h5[2] h5[0]h5[1]h4[3]h5[3]
  869. # asm 1: vtrn.32 <h4=reg128#8%top,<h5=reg128#1%top
  870. # asm 2: vtrn.32 <h4=d15,<h5=d1
  871. vtrn.32 d15,d1
  872. # qhasm: 2x h7 += c6
  873. # asm 1: vadd.i64 >h7=reg128#1,<h7=reg128#12,<c6=reg128#9
  874. # asm 2: vadd.i64 >h7=q0,<h7=q11,<c6=q8
  875. vadd.i64 q0,q11,q8
  876. # qhasm: mem64[posh] aligned= h4[0]
  877. # asm 1: vst1.8 <h4=reg128#8%bot,[<posh=int32#3,: 64]
  878. # asm 2: vst1.8 <h4=d14,[<posh=r2,: 64]
  879. vst1.8 d14,[r2,: 64]
  880. # qhasm: 2x t6 = c6 << 26
  881. # asm 1: vshl.i64 >t6=reg128#8,<c6=reg128#9,#26
  882. # asm 2: vshl.i64 >t6=q7,<c6=q8,#26
  883. vshl.i64 q7,q8,#26
  884. # qhasm: 2x h1 += c0
  885. # asm 1: vadd.i64 >h1=reg128#6,<h1=reg128#6,<c0=reg128#2
  886. # asm 2: vadd.i64 >h1=q5,<h1=q5,<c0=q1
  887. vadd.i64 q5,q5,q1
  888. # qhasm: h8 h9 = h8[0]h9[0]h8[2]h8[3] h8[1]h9[1]h9[2]h9[3]
  889. # asm 1: vtrn.32 <h8=reg128#3%bot,<h9=reg128#4%bot
  890. # asm 2: vtrn.32 <h8=d4,<h9=d6
  891. vtrn.32 d4,d6
  892. # qhasm: 2x t0 = c0 << 26
  893. # asm 1: vshl.i64 >t0=reg128#2,<c0=reg128#2,#26
  894. # asm 2: vshl.i64 >t0=q1,<c0=q1,#26
  895. vshl.i64 q1,q1,#26
  896. # qhasm: h8 h9 = h8[0]h8[1]h8[2]h9[2] h9[0]h9[1]h8[3]h9[3]
  897. # asm 1: vtrn.32 <h8=reg128#3%top,<h9=reg128#4%top
  898. # asm 2: vtrn.32 <h8=d5,<h9=d7
  899. vtrn.32 d5,d7
  900. # qhasm: 2x h6 -= t6
  901. # asm 1: vsub.i64 >h6=reg128#4,<h6=reg128#7,<t6=reg128#8
  902. # asm 2: vsub.i64 >h6=q3,<h6=q6,<t6=q7
  903. vsub.i64 q3,q6,q7
  904. # qhasm: posh+=16
  905. # asm 1: add >posh=int32#3,<posh=int32#3,#16
  906. # asm 2: add >posh=r2,<posh=r2,#16
  907. add r2,r2,#16
  908. # qhasm: 2x h0 -= t0
  909. # asm 1: vsub.i64 >h0=reg128#2,<h0=reg128#5,<t0=reg128#2
  910. # asm 2: vsub.i64 >h0=q1,<h0=q4,<t0=q1
  911. vsub.i64 q1,q4,q1
  912. # qhasm: mem64[posh] aligned= h8[0]
  913. # asm 1: vst1.8 <h8=reg128#3%bot,[<posh=int32#3,: 64]
  914. # asm 2: vst1.8 <h8=d4,[<posh=r2,: 64]
  915. vst1.8 d4,[r2,: 64]
  916. # qhasm: h6 h7 = h6[0]h7[0]h6[2]h6[3] h6[1]h7[1]h7[2]h7[3]
  917. # asm 1: vtrn.32 <h6=reg128#4%bot,<h7=reg128#1%bot
  918. # asm 2: vtrn.32 <h6=d6,<h7=d0
  919. vtrn.32 d6,d0
  920. # qhasm: h6 h7 = h6[0]h6[1]h6[2]h7[2] h7[0]h7[1]h6[3]h7[3]
  921. # asm 1: vtrn.32 <h6=reg128#4%top,<h7=reg128#1%top
  922. # asm 2: vtrn.32 <h6=d7,<h7=d1
  923. vtrn.32 d7,d1
  924. # qhasm: posh-=8
  925. # asm 1: sub >posh=int32#3,<posh=int32#3,#8
  926. # asm 2: sub >posh=r2,<posh=r2,#8
  927. sub r2,r2,#8
  928. # qhasm: h0 h1 = h0[0]h1[0]h0[2]h0[3] h0[1]h1[1]h1[2]h1[3]
  929. # asm 1: vtrn.32 <h0=reg128#2%bot,<h1=reg128#6%bot
  930. # asm 2: vtrn.32 <h0=d2,<h1=d10
  931. vtrn.32 d2,d10
  932. # qhasm: h0 h1 = h0[0]h0[1]h0[2]h1[2] h1[0]h1[1]h0[3]h1[3]
  933. # asm 1: vtrn.32 <h0=reg128#2%top,<h1=reg128#6%top
  934. # asm 2: vtrn.32 <h0=d3,<h1=d11
  935. vtrn.32 d3,d11
  936. # qhasm: mem64[posh] aligned= h6[0]
  937. # asm 1: vst1.8 <h6=reg128#4%bot,[<posh=int32#3,: 64]
  938. # asm 2: vst1.8 <h6=d6,[<posh=r2,: 64]
  939. vst1.8 d6,[r2,: 64]
  940. # qhasm: posh-=24
  941. # asm 1: sub >posh=int32#3,<posh=int32#3,#24
  942. # asm 2: sub >posh=r2,<posh=r2,#24
  943. sub r2,r2,#24
  944. # qhasm: mem64[posh] aligned= h0[0]
  945. # asm 1: vst1.8 <h0=reg128#2%bot,[<posh=int32#3,: 64]
  946. # asm 2: vst1.8 <h0=d2,[<posh=r2,: 64]
  947. vst1.8 d2,[r2,: 64]
  948. # qhasm: posx = playground1_ptr + 96
  949. # asm 1: add >posx=int32#3,<playground1_ptr=int32#4,#96
  950. # asm 2: add >posx=r2,<playground1_ptr=r3,#96
  951. add r2,r3,#96
  952. # qhasm: 4x zero = 0
  953. # asm 1: vmov.i32 >zero=reg128#1,#0
  954. # asm 2: vmov.i32 >zero=q0,#0
  955. vmov.i32 q0,#0
  956. # qhasm: new one
  957. # qhasm: one = 0xff,one[1]
  958. # asm 1: vmov.i64 <one=reg128#2%bot,#0xff
  959. # asm 2: vmov.i64 <one=d2,#0xff
  960. vmov.i64 d2,#0xff
  961. # qhasm: one = one[0],0
  962. # asm 1: vmov.i64 <one=reg128#2%top,#0
  963. # asm 2: vmov.i64 <one=d3,#0
  964. vmov.i64 d3,#0
  965. # qhasm: 4x one unsigned>>= 7
  966. # asm 1: vshr.u32 >one=reg128#2,<one=reg128#2,#7
  967. # asm 2: vshr.u32 >one=q1,<one=q1,#7
  968. vshr.u32 q1,q1,#7
  969. # qhasm: mem128[posx] aligned= one;posx += 16
  970. # asm 1: vst1.8 {<one=reg128#2%bot-<one=reg128#2%top},[<posx=int32#3,: 128]!
  971. # asm 2: vst1.8 {<one=d2-<one=d3},[<posx=r2,: 128]!
  972. vst1.8 {d2-d3},[r2,: 128]!
  973. # qhasm: mem128[posx] aligned= zero;posx += 16
  974. # asm 1: vst1.8 {<zero=reg128#1%bot-<zero=reg128#1%top},[<posx=int32#3,: 128]!
  975. # asm 2: vst1.8 {<zero=d0-<zero=d1},[<posx=r2,: 128]!
  976. vst1.8 {d0-d1},[r2,: 128]!
  977. # qhasm: mem64[posx] aligned= zero[0]
  978. # asm 1: vst1.8 <zero=reg128#1%bot,[<posx=int32#3,: 64]
  979. # asm 2: vst1.8 <zero=d0,[<posx=r2,: 64]
  980. vst1.8 d0,[r2,: 64]
  981. # qhasm: posx = playground1_ptr + 144
  982. # asm 1: add >posx=int32#3,<playground1_ptr=int32#4,#144
  983. # asm 2: add >posx=r2,<playground1_ptr=r3,#144
  984. add r2,r3,#144
  985. # qhasm: 4x zero = 0
  986. # asm 1: vmov.i32 >zero=reg128#1,#0
  987. # asm 2: vmov.i32 >zero=q0,#0
  988. vmov.i32 q0,#0
  989. # qhasm: mem128[posx] aligned= zero;posx += 16
  990. # asm 1: vst1.8 {<zero=reg128#1%bot-<zero=reg128#1%top},[<posx=int32#3,: 128]!
  991. # asm 2: vst1.8 {<zero=d0-<zero=d1},[<posx=r2,: 128]!
  992. vst1.8 {d0-d1},[r2,: 128]!
  993. # qhasm: mem128[posx] aligned= zero;posx += 16
  994. # asm 1: vst1.8 {<zero=reg128#1%bot-<zero=reg128#1%top},[<posx=int32#3,: 128]!
  995. # asm 2: vst1.8 {<zero=d0-<zero=d1},[<posx=r2,: 128]!
  996. vst1.8 {d0-d1},[r2,: 128]!
  997. # qhasm: mem64[posx] aligned= zero[0]
  998. # asm 1: vst1.8 <zero=reg128#1%bot,[<posx=int32#3,: 64]
  999. # asm 2: vst1.8 <zero=d0,[<posx=r2,: 64]
  1000. vst1.8 d0,[r2,: 64]
  1001. # qhasm: posx = playground1_ptr + 240
  1002. # asm 1: add >posx=int32#3,<playground1_ptr=int32#4,#240
  1003. # asm 2: add >posx=r2,<playground1_ptr=r3,#240
  1004. add r2,r3,#240
  1005. # qhasm: 4x zero = 0
  1006. # asm 1: vmov.i32 >zero=reg128#1,#0
  1007. # asm 2: vmov.i32 >zero=q0,#0
  1008. vmov.i32 q0,#0
  1009. # qhasm: new one
  1010. # qhasm: one = 0xff,one[1]
  1011. # asm 1: vmov.i64 <one=reg128#2%bot,#0xff
  1012. # asm 2: vmov.i64 <one=d2,#0xff
  1013. vmov.i64 d2,#0xff
  1014. # qhasm: one = one[0],0
  1015. # asm 1: vmov.i64 <one=reg128#2%top,#0
  1016. # asm 2: vmov.i64 <one=d3,#0
  1017. vmov.i64 d3,#0
  1018. # qhasm: 4x one unsigned>>= 7
  1019. # asm 1: vshr.u32 >one=reg128#2,<one=reg128#2,#7
  1020. # asm 2: vshr.u32 >one=q1,<one=q1,#7
  1021. vshr.u32 q1,q1,#7
  1022. # qhasm: mem128[posx] aligned= one;posx += 16
  1023. # asm 1: vst1.8 {<one=reg128#2%bot-<one=reg128#2%top},[<posx=int32#3,: 128]!
  1024. # asm 2: vst1.8 {<one=d2-<one=d3},[<posx=r2,: 128]!
  1025. vst1.8 {d2-d3},[r2,: 128]!
  1026. # qhasm: mem128[posx] aligned= zero;posx += 16
  1027. # asm 1: vst1.8 {<zero=reg128#1%bot-<zero=reg128#1%top},[<posx=int32#3,: 128]!
  1028. # asm 2: vst1.8 {<zero=d0-<zero=d1},[<posx=r2,: 128]!
  1029. vst1.8 {d0-d1},[r2,: 128]!
  1030. # qhasm: mem64[posx] aligned= zero[0]
  1031. # asm 1: vst1.8 <zero=reg128#1%bot,[<posx=int32#3,: 64]
  1032. # asm 2: vst1.8 <zero=d0,[<posx=r2,: 64]
  1033. vst1.8 d0,[r2,: 64]
  1034. # qhasm: posy = playground1_ptr + 48
  1035. # asm 1: add >posy=int32#3,<playground1_ptr=int32#4,#48
  1036. # asm 2: add >posy=r2,<playground1_ptr=r3,#48
  1037. add r2,r3,#48
  1038. # qhasm: posx = playground1_ptr + 192
  1039. # asm 1: add >posx=int32#7,<playground1_ptr=int32#4,#192
  1040. # asm 2: add >posx=r6,<playground1_ptr=r3,#192
  1041. add r6,r3,#192
  1042. # qhasm: f0 aligned= mem128[posy];posy += 16
  1043. # asm 1: vld1.8 {>f0=reg128#1%bot->f0=reg128#1%top},[<posy=int32#3,: 128]!
  1044. # asm 2: vld1.8 {>f0=d0->f0=d1},[<posy=r2,: 128]!
  1045. vld1.8 {d0-d1},[r2,: 128]!
  1046. # qhasm: f4 aligned= mem128[posy];posy += 16
  1047. # asm 1: vld1.8 {>f4=reg128#2%bot->f4=reg128#2%top},[<posy=int32#3,: 128]!
  1048. # asm 2: vld1.8 {>f4=d2->f4=d3},[<posy=r2,: 128]!
  1049. vld1.8 {d2-d3},[r2,: 128]!
  1050. # qhasm: new f8
  1051. # qhasm: f8 aligned= mem64[posy] f8[1]
  1052. # asm 1: vld1.8 {<f8=reg128#3%bot},[<posy=int32#3,: 64]
  1053. # asm 2: vld1.8 {<f8=d4},[<posy=r2,: 64]
  1054. vld1.8 {d4},[r2,: 64]
  1055. # qhasm: mem128[posx] aligned= f0;posx += 16
  1056. # asm 1: vst1.8 {<f0=reg128#1%bot-<f0=reg128#1%top},[<posx=int32#7,: 128]!
  1057. # asm 2: vst1.8 {<f0=d0-<f0=d1},[<posx=r6,: 128]!
  1058. vst1.8 {d0-d1},[r6,: 128]!
  1059. # qhasm: mem128[posx] aligned= f4;posx += 16
  1060. # asm 1: vst1.8 {<f4=reg128#2%bot-<f4=reg128#2%top},[<posx=int32#7,: 128]!
  1061. # asm 2: vst1.8 {<f4=d2-<f4=d3},[<posx=r6,: 128]!
  1062. vst1.8 {d2-d3},[r6,: 128]!
  1063. # qhasm: mem64[posx] aligned= f8[0]
  1064. # asm 1: vst1.8 <f8=reg128#3%bot,[<posx=int32#7,: 64]
  1065. # asm 2: vst1.8 <f8=d4,[<posx=r6,: 64]
  1066. vst1.8 d4,[r6,: 64]
  1067. # qhasm: mainloop:
  1068. ._mainloop:
  1069. # qhasm: pos8 = (pos unsigned>> 3)
  1070. # asm 1: mov >pos8=int32#3,<pos=int32#6,LSR #3
  1071. # asm 2: mov >pos8=r2,<pos=r5,LSR #3
  1072. mov r2,r5,LSR #3
  1073. # qhasm: pos7 = pos & 7
  1074. # asm 1: and >pos7=int32#7,<pos=int32#6,#7
  1075. # asm 2: and >pos7=r6,<pos=r5,#7
  1076. and r6,r5,#7
  1077. # qhasm: bit = mem8[eptr + pos8]
  1078. # asm 1: ldrb >bit=int32#3,[<eptr=int32#2,<pos8=int32#3]
  1079. # asm 2: ldrb >bit=r2,[<eptr=r1,<pos8=r2]
  1080. ldrb r2,[r1,r2]
  1081. # qhasm: bit unsigned>>= pos7
  1082. # asm 1: mov >bit=int32#3,<bit=int32#3,LSR <pos7=int32#7
  1083. # asm 2: mov >bit=r2,<bit=r2,LSR <pos7=r6
  1084. mov r2,r2,LSR r6
  1085. # qhasm: bit &= 1
  1086. # asm 1: and >bit=int32#3,<bit=int32#3,#1
  1087. # asm 2: and >bit=r2,<bit=r2,#1
  1088. and r2,r2,#1
  1089. # qhasm: pos_stack = pos
  1090. # asm 1: str <pos=int32#6,>pos_stack=stack32#3
  1091. # asm 2: str <pos=r5,>pos_stack=[sp,#488]
  1092. str r5,[sp,#488]
  1093. # qhasm: swap ^= bit
  1094. # asm 1: eor >swap=int32#5,<swap=int32#5,<bit=int32#3
  1095. # asm 2: eor >swap=r4,<swap=r4,<bit=r2
  1096. eor r4,r4,r2
  1097. # qhasm: swap_stack = bit
  1098. # asm 1: str <bit=int32#3,>swap_stack=stack32#4
  1099. # asm 2: str <bit=r2,>swap_stack=[sp,#492]
  1100. str r2,[sp,#492]
  1101. # qhasm: swap = -swap
  1102. # asm 1: neg >swap=int32#3,<swap=int32#5
  1103. # asm 2: neg >swap=r2,<swap=r4
  1104. neg r2,r4
  1105. # qhasm: new f8
  1106. # qhasm: new g8
  1107. # qhasm: new F8
  1108. # qhasm: new G8
  1109. # qhasm: pos0 = playground1_ptr + 96
  1110. # asm 1: add >pos0=int32#5,<playground1_ptr=int32#4,#96
  1111. # asm 2: add >pos0=r4,<playground1_ptr=r3,#96
  1112. add r4,r3,#96
  1113. # qhasm: pos1 = playground1_ptr + 192
  1114. # asm 1: add >pos1=int32#6,<playground1_ptr=int32#4,#192
  1115. # asm 2: add >pos1=r5,<playground1_ptr=r3,#192
  1116. add r5,r3,#192
  1117. # qhasm: pos2 = playground1_ptr + 144
  1118. # asm 1: add >pos2=int32#7,<playground1_ptr=int32#4,#144
  1119. # asm 2: add >pos2=r6,<playground1_ptr=r3,#144
  1120. add r6,r3,#144
  1121. # qhasm: f0 aligned= mem128[pos0];pos0 += 16
  1122. # asm 1: vld1.8 {>f0=reg128#5%bot->f0=reg128#5%top},[<pos0=int32#5,: 128]!
  1123. # asm 2: vld1.8 {>f0=d8->f0=d9},[<pos0=r4,: 128]!
  1124. vld1.8 {d8-d9},[r4,: 128]!
  1125. # qhasm: pos3 = playground1_ptr + 240
  1126. # asm 1: add >pos3=int32#8,<playground1_ptr=int32#4,#240
  1127. # asm 2: add >pos3=r7,<playground1_ptr=r3,#240
  1128. add r7,r3,#240
  1129. # qhasm: g0 aligned= mem128[pos1];pos1 += 16
  1130. # asm 1: vld1.8 {>g0=reg128#6%bot->g0=reg128#6%top},[<pos1=int32#6,: 128]!
  1131. # asm 2: vld1.8 {>g0=d10->g0=d11},[<pos1=r5,: 128]!
  1132. vld1.8 {d10-d11},[r5,: 128]!
  1133. # qhasm: x0 = f0 ^ g0
  1134. # asm 1: veor >x0=reg128#7,<f0=reg128#5,<g0=reg128#6
  1135. # asm 2: veor >x0=q6,<f0=q4,<g0=q5
  1136. veor q6,q4,q5
  1137. # qhasm: F0 aligned= mem128[pos2];pos2 += 16
  1138. # asm 1: vld1.8 {>F0=reg128#8%bot->F0=reg128#8%top},[<pos2=int32#7,: 128]!
  1139. # asm 2: vld1.8 {>F0=d14->F0=d15},[<pos2=r6,: 128]!
  1140. vld1.8 {d14-d15},[r6,: 128]!
  1141. # qhasm: b = swap,swap,swap,swap
  1142. # asm 1: vdup.i32 >b=reg128#9,<swap=int32#3
  1143. # asm 2: vdup.i32 >b=q8,<swap=r2
  1144. vdup.i32 q8,r2
  1145. # qhasm: G0 aligned= mem128[pos3];pos3 += 16
  1146. # asm 1: vld1.8 {>G0=reg128#10%bot->G0=reg128#10%top},[<pos3=int32#8,: 128]!
  1147. # asm 2: vld1.8 {>G0=d18->G0=d19},[<pos3=r7,: 128]!
  1148. vld1.8 {d18-d19},[r7,: 128]!
  1149. # qhasm: X0 = F0 ^ G0
  1150. # asm 1: veor >X0=reg128#11,<F0=reg128#8,<G0=reg128#10
  1151. # asm 2: veor >X0=q10,<F0=q7,<G0=q9
  1152. veor q10,q7,q9
  1153. # qhasm: f4 aligned= mem128[pos0];pos0 += 16
  1154. # asm 1: vld1.8 {>f4=reg128#12%bot->f4=reg128#12%top},[<pos0=int32#5,: 128]!
  1155. # asm 2: vld1.8 {>f4=d22->f4=d23},[<pos0=r4,: 128]!
  1156. vld1.8 {d22-d23},[r4,: 128]!
  1157. # qhasm: x0 &= b
  1158. # asm 1: vand >x0=reg128#7,<x0=reg128#7,<b=reg128#9
  1159. # asm 2: vand >x0=q6,<x0=q6,<b=q8
  1160. vand q6,q6,q8
  1161. # qhasm: g4 aligned= mem128[pos1];pos1 += 16
  1162. # asm 1: vld1.8 {>g4=reg128#13%bot->g4=reg128#13%top},[<pos1=int32#6,: 128]!
  1163. # asm 2: vld1.8 {>g4=d24->g4=d25},[<pos1=r5,: 128]!
  1164. vld1.8 {d24-d25},[r5,: 128]!
  1165. # qhasm: X0 &= b
  1166. # asm 1: vand >X0=reg128#11,<X0=reg128#11,<b=reg128#9
  1167. # asm 2: vand >X0=q10,<X0=q10,<b=q8
  1168. vand q10,q10,q8
  1169. # qhasm: F4 aligned= mem128[pos2];pos2 += 16
  1170. # asm 1: vld1.8 {>F4=reg128#14%bot->F4=reg128#14%top},[<pos2=int32#7,: 128]!
  1171. # asm 2: vld1.8 {>F4=d26->F4=d27},[<pos2=r6,: 128]!
  1172. vld1.8 {d26-d27},[r6,: 128]!
  1173. # qhasm: f0 ^= x0
  1174. # asm 1: veor >f0=reg128#5,<f0=reg128#5,<x0=reg128#7
  1175. # asm 2: veor >f0=q4,<f0=q4,<x0=q6
  1176. veor q4,q4,q6
  1177. # qhasm: G4 aligned= mem128[pos3];pos3 += 16
  1178. # asm 1: vld1.8 {>G4=reg128#15%bot->G4=reg128#15%top},[<pos3=int32#8,: 128]!
  1179. # asm 2: vld1.8 {>G4=d28->G4=d29},[<pos3=r7,: 128]!
  1180. vld1.8 {d28-d29},[r7,: 128]!
  1181. # qhasm: g0 ^= x0
  1182. # asm 1: veor >g0=reg128#6,<g0=reg128#6,<x0=reg128#7
  1183. # asm 2: veor >g0=q5,<g0=q5,<x0=q6
  1184. veor q5,q5,q6
  1185. # qhasm: f8 aligned= mem64[pos0] f8[1]
  1186. # asm 1: vld1.8 {<f8=reg128#1%bot},[<pos0=int32#5,: 64]
  1187. # asm 2: vld1.8 {<f8=d0},[<pos0=r4,: 64]
  1188. vld1.8 {d0},[r4,: 64]
  1189. # qhasm: F0 ^= X0
  1190. # asm 1: veor >F0=reg128#7,<F0=reg128#8,<X0=reg128#11
  1191. # asm 2: veor >F0=q6,<F0=q7,<X0=q10
  1192. veor q6,q7,q10
  1193. # qhasm: g8 aligned= mem64[pos1] g8[1]
  1194. # asm 1: vld1.8 {<g8=reg128#2%bot},[<pos1=int32#6,: 64]
  1195. # asm 2: vld1.8 {<g8=d2},[<pos1=r5,: 64]
  1196. vld1.8 {d2},[r5,: 64]
  1197. # qhasm: G0 ^= X0
  1198. # asm 1: veor >G0=reg128#8,<G0=reg128#10,<X0=reg128#11
  1199. # asm 2: veor >G0=q7,<G0=q9,<X0=q10
  1200. veor q7,q9,q10
  1201. # qhasm: F8 aligned= mem64[pos2] F8[1]
  1202. # asm 1: vld1.8 {<F8=reg128#3%bot},[<pos2=int32#7,: 64]
  1203. # asm 2: vld1.8 {<F8=d4},[<pos2=r6,: 64]
  1204. vld1.8 {d4},[r6,: 64]
  1205. # qhasm: x4 = f4 ^ g4
  1206. # asm 1: veor >x4=reg128#10,<f4=reg128#12,<g4=reg128#13
  1207. # asm 2: veor >x4=q9,<f4=q11,<g4=q12
  1208. veor q9,q11,q12
  1209. # qhasm: G8 aligned= mem64[pos3] G8[1]
  1210. # asm 1: vld1.8 {<G8=reg128#4%bot},[<pos3=int32#8,: 64]
  1211. # asm 2: vld1.8 {<G8=d6},[<pos3=r7,: 64]
  1212. vld1.8 {d6},[r7,: 64]
  1213. # qhasm: x8 = f8 ^ g8
  1214. # asm 1: veor >x8=reg128#11,<f8=reg128#1,<g8=reg128#2
  1215. # asm 2: veor >x8=q10,<f8=q0,<g8=q1
  1216. veor q10,q0,q1
  1217. # qhasm: pos0 -= 32
  1218. # asm 1: sub >pos0=int32#3,<pos0=int32#5,#32
  1219. # asm 2: sub >pos0=r2,<pos0=r4,#32
  1220. sub r2,r4,#32
  1221. # qhasm: x4 &= b
  1222. # asm 1: vand >x4=reg128#10,<x4=reg128#10,<b=reg128#9
  1223. # asm 2: vand >x4=q9,<x4=q9,<b=q8
  1224. vand q9,q9,q8
  1225. # qhasm: pos1 -= 32
  1226. # asm 1: sub >pos1=int32#5,<pos1=int32#6,#32
  1227. # asm 2: sub >pos1=r4,<pos1=r5,#32
  1228. sub r4,r5,#32
  1229. # qhasm: x8 &= b
  1230. # asm 1: vand >x8=reg128#11,<x8=reg128#11,<b=reg128#9
  1231. # asm 2: vand >x8=q10,<x8=q10,<b=q8
  1232. vand q10,q10,q8
  1233. # qhasm: pos2 -= 32
  1234. # asm 1: sub >pos2=int32#6,<pos2=int32#7,#32
  1235. # asm 2: sub >pos2=r5,<pos2=r6,#32
  1236. sub r5,r6,#32
  1237. # qhasm: f4 ^= x4
  1238. # asm 1: veor >f4=reg128#12,<f4=reg128#12,<x4=reg128#10
  1239. # asm 2: veor >f4=q11,<f4=q11,<x4=q9
  1240. veor q11,q11,q9
  1241. # qhasm: pos3 -= 32
  1242. # asm 1: sub >pos3=int32#7,<pos3=int32#8,#32
  1243. # asm 2: sub >pos3=r6,<pos3=r7,#32
  1244. sub r6,r7,#32
  1245. # qhasm: f8 ^= x8
  1246. # asm 1: veor >f8=reg128#1,<f8=reg128#1,<x8=reg128#11
  1247. # asm 2: veor >f8=q0,<f8=q0,<x8=q10
  1248. veor q0,q0,q10
  1249. # qhasm: g4 ^= x4
  1250. # asm 1: veor >g4=reg128#10,<g4=reg128#13,<x4=reg128#10
  1251. # asm 2: veor >g4=q9,<g4=q12,<x4=q9
  1252. veor q9,q12,q9
  1253. # qhasm: g8 ^= x8
  1254. # asm 1: veor >g8=reg128#2,<g8=reg128#2,<x8=reg128#11
  1255. # asm 2: veor >g8=q1,<g8=q1,<x8=q10
  1256. veor q1,q1,q10
  1257. # qhasm: X4 = F4 ^ G4
  1258. # asm 1: veor >X4=reg128#11,<F4=reg128#14,<G4=reg128#15
  1259. # asm 2: veor >X4=q10,<F4=q13,<G4=q14
  1260. veor q10,q13,q14
  1261. # qhasm: X8 = F8 ^ G8
  1262. # asm 1: veor >X8=reg128#13,<F8=reg128#3,<G8=reg128#4
  1263. # asm 2: veor >X8=q12,<F8=q2,<G8=q3
  1264. veor q12,q2,q3
  1265. # qhasm: X4 &= b
  1266. # asm 1: vand >X4=reg128#11,<X4=reg128#11,<b=reg128#9
  1267. # asm 2: vand >X4=q10,<X4=q10,<b=q8
  1268. vand q10,q10,q8
  1269. # qhasm: X8 &= b
  1270. # asm 1: vand >X8=reg128#9,<X8=reg128#13,<b=reg128#9
  1271. # asm 2: vand >X8=q8,<X8=q12,<b=q8
  1272. vand q8,q12,q8
  1273. # qhasm: F4 ^= X4
  1274. # asm 1: veor >F4=reg128#13,<F4=reg128#14,<X4=reg128#11
  1275. # asm 2: veor >F4=q12,<F4=q13,<X4=q10
  1276. veor q12,q13,q10
  1277. # qhasm: F8 ^= X8
  1278. # asm 1: veor >F8=reg128#3,<F8=reg128#3,<X8=reg128#9
  1279. # asm 2: veor >F8=q2,<F8=q2,<X8=q8
  1280. veor q2,q2,q8
  1281. # qhasm: G4 ^= X4
  1282. # asm 1: veor >G4=reg128#11,<G4=reg128#15,<X4=reg128#11
  1283. # asm 2: veor >G4=q10,<G4=q14,<X4=q10
  1284. veor q10,q14,q10
  1285. # qhasm: G8 ^= X8
  1286. # asm 1: veor >G8=reg128#4,<G8=reg128#4,<X8=reg128#9
  1287. # asm 2: veor >G8=q3,<G8=q3,<X8=q8
  1288. veor q3,q3,q8
  1289. # qhasm: 4x f0plusF0 = f0 + F0
  1290. # asm 1: vadd.i32 >f0plusF0=reg128#9,<f0=reg128#5,<F0=reg128#7
  1291. # asm 2: vadd.i32 >f0plusF0=q8,<f0=q4,<F0=q6
  1292. vadd.i32 q8,q4,q6
  1293. # qhasm: 4x f0minusF0 = f0 - F0
  1294. # asm 1: vsub.i32 >f0minusF0=reg128#5,<f0=reg128#5,<F0=reg128#7
  1295. # asm 2: vsub.i32 >f0minusF0=q4,<f0=q4,<F0=q6
  1296. vsub.i32 q4,q4,q6
  1297. # qhasm: mem128[pos0] aligned= f0plusF0;pos0 += 16
  1298. # asm 1: vst1.8 {<f0plusF0=reg128#9%bot-<f0plusF0=reg128#9%top},[<pos0=int32#3,: 128]!
  1299. # asm 2: vst1.8 {<f0plusF0=d16-<f0plusF0=d17},[<pos0=r2,: 128]!
  1300. vst1.8 {d16-d17},[r2,: 128]!
  1301. # qhasm: 4x f4plusF4 = f4 + F4
  1302. # asm 1: vadd.i32 >f4plusF4=reg128#7,<f4=reg128#12,<F4=reg128#13
  1303. # asm 2: vadd.i32 >f4plusF4=q6,<f4=q11,<F4=q12
  1304. vadd.i32 q6,q11,q12
  1305. # qhasm: mem128[pos2] aligned= f0minusF0;pos2 += 16
  1306. # asm 1: vst1.8 {<f0minusF0=reg128#5%bot-<f0minusF0=reg128#5%top},[<pos2=int32#6,: 128]!
  1307. # asm 2: vst1.8 {<f0minusF0=d8-<f0minusF0=d9},[<pos2=r5,: 128]!
  1308. vst1.8 {d8-d9},[r5,: 128]!
  1309. # qhasm: 4x f4minusF4 = f4 - F4
  1310. # asm 1: vsub.i32 >f4minusF4=reg128#5,<f4=reg128#12,<F4=reg128#13
  1311. # asm 2: vsub.i32 >f4minusF4=q4,<f4=q11,<F4=q12
  1312. vsub.i32 q4,q11,q12
  1313. # qhasm: mem128[pos0] aligned= f4plusF4;pos0 += 16
  1314. # asm 1: vst1.8 {<f4plusF4=reg128#7%bot-<f4plusF4=reg128#7%top},[<pos0=int32#3,: 128]!
  1315. # asm 2: vst1.8 {<f4plusF4=d12-<f4plusF4=d13},[<pos0=r2,: 128]!
  1316. vst1.8 {d12-d13},[r2,: 128]!
  1317. # qhasm: 4x f8plusF8 = f8 + F8
  1318. # asm 1: vadd.i32 >f8plusF8=reg128#7,<f8=reg128#1,<F8=reg128#3
  1319. # asm 2: vadd.i32 >f8plusF8=q6,<f8=q0,<F8=q2
  1320. vadd.i32 q6,q0,q2
  1321. # qhasm: mem128[pos2] aligned= f4minusF4;pos2 += 16
  1322. # asm 1: vst1.8 {<f4minusF4=reg128#5%bot-<f4minusF4=reg128#5%top},[<pos2=int32#6,: 128]!
  1323. # asm 2: vst1.8 {<f4minusF4=d8-<f4minusF4=d9},[<pos2=r5,: 128]!
  1324. vst1.8 {d8-d9},[r5,: 128]!
  1325. # qhasm: 4x f8minusF8 = f8 - F8
  1326. # asm 1: vsub.i32 >f8minusF8=reg128#1,<f8=reg128#1,<F8=reg128#3
  1327. # asm 2: vsub.i32 >f8minusF8=q0,<f8=q0,<F8=q2
  1328. vsub.i32 q0,q0,q2
  1329. # qhasm: mem64[pos0] aligned= f8plusF8[0]
  1330. # asm 1: vst1.8 <f8plusF8=reg128#7%bot,[<pos0=int32#3,: 64]
  1331. # asm 2: vst1.8 <f8plusF8=d12,[<pos0=r2,: 64]
  1332. vst1.8 d12,[r2,: 64]
  1333. # qhasm: 4x g0plusG0 = g0 + G0
  1334. # asm 1: vadd.i32 >g0plusG0=reg128#3,<g0=reg128#6,<G0=reg128#8
  1335. # asm 2: vadd.i32 >g0plusG0=q2,<g0=q5,<G0=q7
  1336. vadd.i32 q2,q5,q7
  1337. # qhasm: mem64[pos2] aligned= f8minusF8[0]
  1338. # asm 1: vst1.8 <f8minusF8=reg128#1%bot,[<pos2=int32#6,: 64]
  1339. # asm 2: vst1.8 <f8minusF8=d0,[<pos2=r5,: 64]
  1340. vst1.8 d0,[r5,: 64]
  1341. # qhasm: 4x g0minusG0 = g0 - G0
  1342. # asm 1: vsub.i32 >g0minusG0=reg128#1,<g0=reg128#6,<G0=reg128#8
  1343. # asm 2: vsub.i32 >g0minusG0=q0,<g0=q5,<G0=q7
  1344. vsub.i32 q0,q5,q7
  1345. # qhasm: mem128[pos1] aligned= g0plusG0;pos1 += 16
  1346. # asm 1: vst1.8 {<g0plusG0=reg128#3%bot-<g0plusG0=reg128#3%top},[<pos1=int32#5,: 128]!
  1347. # asm 2: vst1.8 {<g0plusG0=d4-<g0plusG0=d5},[<pos1=r4,: 128]!
  1348. vst1.8 {d4-d5},[r4,: 128]!
  1349. # qhasm: 4x g4plusG4 = g4 + G4
  1350. # asm 1: vadd.i32 >g4plusG4=reg128#3,<g4=reg128#10,<G4=reg128#11
  1351. # asm 2: vadd.i32 >g4plusG4=q2,<g4=q9,<G4=q10
  1352. vadd.i32 q2,q9,q10
  1353. # qhasm: mem128[pos3] aligned= g0minusG0;pos3 += 16
  1354. # asm 1: vst1.8 {<g0minusG0=reg128#1%bot-<g0minusG0=reg128#1%top},[<pos3=int32#7,: 128]!
  1355. # asm 2: vst1.8 {<g0minusG0=d0-<g0minusG0=d1},[<pos3=r6,: 128]!
  1356. vst1.8 {d0-d1},[r6,: 128]!
  1357. # qhasm: 4x g4minusG4 = g4 - G4
  1358. # asm 1: vsub.i32 >g4minusG4=reg128#1,<g4=reg128#10,<G4=reg128#11
  1359. # asm 2: vsub.i32 >g4minusG4=q0,<g4=q9,<G4=q10
  1360. vsub.i32 q0,q9,q10
  1361. # qhasm: mem128[pos1] aligned= g4plusG4;pos1 += 16
  1362. # asm 1: vst1.8 {<g4plusG4=reg128#3%bot-<g4plusG4=reg128#3%top},[<pos1=int32#5,: 128]!
  1363. # asm 2: vst1.8 {<g4plusG4=d4-<g4plusG4=d5},[<pos1=r4,: 128]!
  1364. vst1.8 {d4-d5},[r4,: 128]!
  1365. # qhasm: 4x g8plusG8 = g8 + G8
  1366. # asm 1: vadd.i32 >g8plusG8=reg128#3,<g8=reg128#2,<G8=reg128#4
  1367. # asm 2: vadd.i32 >g8plusG8=q2,<g8=q1,<G8=q3
  1368. vadd.i32 q2,q1,q3
  1369. # qhasm: mem128[pos3] aligned= g4minusG4;pos3 += 16
  1370. # asm 1: vst1.8 {<g4minusG4=reg128#1%bot-<g4minusG4=reg128#1%top},[<pos3=int32#7,: 128]!
  1371. # asm 2: vst1.8 {<g4minusG4=d0-<g4minusG4=d1},[<pos3=r6,: 128]!
  1372. vst1.8 {d0-d1},[r6,: 128]!
  1373. # qhasm: 4x g8minusG8 = g8 - G8
  1374. # asm 1: vsub.i32 >g8minusG8=reg128#1,<g8=reg128#2,<G8=reg128#4
  1375. # asm 2: vsub.i32 >g8minusG8=q0,<g8=q1,<G8=q3
  1376. vsub.i32 q0,q1,q3
  1377. # qhasm: mem64[pos1] aligned= g8plusG8[0]
  1378. # asm 1: vst1.8 <g8plusG8=reg128#3%bot,[<pos1=int32#5,: 64]
  1379. # asm 2: vst1.8 <g8plusG8=d4,[<pos1=r4,: 64]
  1380. vst1.8 d4,[r4,: 64]
  1381. # qhasm: mem64[pos3] aligned= g8minusG8[0]
  1382. # asm 1: vst1.8 <g8minusG8=reg128#1%bot,[<pos3=int32#7,: 64]
  1383. # asm 2: vst1.8 <g8minusG8=d0,[<pos3=r6,: 64]
  1384. vst1.8 d0,[r6,: 64]
  1385. # qhasm: ptr = &_19_19_38_38_stack
  1386. # asm 1: lea >ptr=int32#3,<_19_19_38_38_stack=stack128#3
  1387. # asm 2: lea >ptr=r2,<_19_19_38_38_stack=[sp,#544]
  1388. add r2,sp,#544
  1389. # qhasm: posf = playground1_ptr + 96
  1390. # asm 1: add >posf=int32#5,<playground1_ptr=int32#4,#96
  1391. # asm 2: add >posf=r4,<playground1_ptr=r3,#96
  1392. add r4,r3,#96
  1393. # qhasm: posF = playground1_ptr + 144
  1394. # asm 1: add >posF=int32#6,<playground1_ptr=int32#4,#144
  1395. # asm 2: add >posF=r5,<playground1_ptr=r3,#144
  1396. add r5,r3,#144
  1397. # qhasm: _19_19_38_38 aligned= mem128[ptr]
  1398. # asm 1: vld1.8 {>_19_19_38_38=reg128#1%bot->_19_19_38_38=reg128#1%top},[<ptr=int32#3,: 128]
  1399. # asm 2: vld1.8 {>_19_19_38_38=d0->_19_19_38_38=d1},[<ptr=r2,: 128]
  1400. vld1.8 {d0-d1},[r2,: 128]
  1401. # qhasm: fg01 aligned= mem128[posf];posf+=16
  1402. # asm 1: vld1.8 {>fg01=reg128#2%bot->fg01=reg128#2%top},[<posf=int32#5,: 128]!
  1403. # asm 2: vld1.8 {>fg01=d2->fg01=d3},[<posf=r4,: 128]!
  1404. vld1.8 {d2-d3},[r4,: 128]!
  1405. # qhasm: fg23 aligned= mem128[posF];posF+=16
  1406. # asm 1: vld1.8 {>fg23=reg128#3%bot->fg23=reg128#3%top},[<posF=int32#6,: 128]!
  1407. # asm 2: vld1.8 {>fg23=d4->fg23=d5},[<posF=r5,: 128]!
  1408. vld1.8 {d4-d5},[r5,: 128]!
  1409. # qhasm: fg01[0,1,2,3] fg23[0,1,2,3] = fg01[0]fg23[0]fg01[1]fg23[1] fg01[2]fg23[2]fg01[3]fg23[3]
  1410. # asm 1: vzip.i32 <fg01=reg128#2,<fg23=reg128#3
  1411. # asm 2: vzip.i32 <fg01=q1,<fg23=q2
  1412. vzip.i32 q1,q2
  1413. # qhasm: fg45 aligned= mem128[posf];posf+=16
  1414. # asm 1: vld1.8 {>fg45=reg128#4%bot->fg45=reg128#4%top},[<posf=int32#5,: 128]!
  1415. # asm 2: vld1.8 {>fg45=d6->fg45=d7},[<posf=r4,: 128]!
  1416. vld1.8 {d6-d7},[r4,: 128]!
  1417. # qhasm: fg67 aligned= mem128[posF];posF+=16
  1418. # asm 1: vld1.8 {>fg67=reg128#5%bot->fg67=reg128#5%top},[<posF=int32#6,: 128]!
  1419. # asm 2: vld1.8 {>fg67=d8->fg67=d9},[<posF=r5,: 128]!
  1420. vld1.8 {d8-d9},[r5,: 128]!
  1421. # qhasm: 4x fg01_2 = fg01 << 1
  1422. # asm 1: vshl.i32 >fg01_2=reg128#6,<fg01=reg128#2,#1
  1423. # asm 2: vshl.i32 >fg01_2=q5,<fg01=q1,#1
  1424. vshl.i32 q5,q1,#1
  1425. # qhasm: fg45[0,1,2,3] fg67[0,1,2,3] = fg45[0]fg67[0]fg45[1]fg67[1] fg45[2]fg67[2]fg45[3]fg67[3]
  1426. # asm 1: vzip.i32 <fg45=reg128#4,<fg67=reg128#5
  1427. # asm 2: vzip.i32 <fg45=q3,<fg67=q4
  1428. vzip.i32 q3,q4
  1429. # qhasm: 4x fg23_2 = fg23 << 1
  1430. # asm 1: vshl.i32 >fg23_2=reg128#7,<fg23=reg128#3,#1
  1431. # asm 2: vshl.i32 >fg23_2=q6,<fg23=q2,#1
  1432. vshl.i32 q6,q2,#1
  1433. # qhasm: new fg89
  1434. # qhasm: fg89 aligned= mem64[posf]fg89[1]
  1435. # asm 1: vld1.8 {<fg89=reg128#8%bot},[<posf=int32#5,: 64]
  1436. # asm 2: vld1.8 {<fg89=d14},[<posf=r4,: 64]
  1437. vld1.8 {d14},[r4,: 64]
  1438. # qhasm: 4x fg45_2 = fg45 << 1
  1439. # asm 1: vshl.i32 >fg45_2=reg128#9,<fg45=reg128#4,#1
  1440. # asm 2: vshl.i32 >fg45_2=q8,<fg45=q3,#1
  1441. vshl.i32 q8,q3,#1
  1442. # qhasm: fg89 aligned= fg89[0]mem64[posF]
  1443. # asm 1: vld1.8 {<fg89=reg128#8%top},[<posF=int32#6,: 64]
  1444. # asm 2: vld1.8 {<fg89=d15},[<posF=r5,: 64]
  1445. vld1.8 {d15},[r5,: 64]
  1446. # qhasm: 4x fg67_2 = fg67 << 1
  1447. # asm 1: vshl.i32 >fg67_2=reg128#10,<fg67=reg128#5,#1
  1448. # asm 2: vshl.i32 >fg67_2=q9,<fg67=q4,#1
  1449. vshl.i32 q9,q4,#1
  1450. # qhasm: fg45_19_38[0,1] = fg45_19_38[0,1];fg45_19_38[2] = fg45[2] * _19_19_38_38[2];fg45_19_38[3] = fg45[3] * _19_19_38_38[3]
  1451. # asm 1: vmul.i32 >fg45_19_38=reg128#11%top,<fg45=reg128#4%top,<_19_19_38_38=reg128#1%top
  1452. # asm 2: vmul.i32 >fg45_19_38=d21,<fg45=d7,<_19_19_38_38=d1
  1453. vmul.i32 d21,d7,d1
  1454. # qhasm: fg89 = fg89[0,2,1,3]
  1455. # asm 1: vtrn.32 <fg89=reg128#8%bot,<fg89=reg128#8%top
  1456. # asm 2: vtrn.32 <fg89=d14,<fg89=d15
  1457. vtrn.32 d14,d15
  1458. # qhasm: 4x fg67_19_38 = fg67 * _19_19_38_38
  1459. # asm 1: vmul.i32 >fg67_19_38=reg128#12,<fg67=reg128#5,<_19_19_38_38=reg128#1
  1460. # asm 2: vmul.i32 >fg67_19_38=q11,<fg67=q4,<_19_19_38_38=q0
  1461. vmul.i32 q11,q4,q0
  1462. # qhasm: 4x fg89_19_38 = fg89 * _19_19_38_38
  1463. # asm 1: vmul.i32 >fg89_19_38=reg128#1,<fg89=reg128#8,<_19_19_38_38=reg128#1
  1464. # asm 2: vmul.i32 >fg89_19_38=q0,<fg89=q7,<_19_19_38_38=q0
  1465. vmul.i32 q0,q7,q0
  1466. # qhasm: h0[0,1] = fg01[0] signed* fg01[0];h0[2,3] = fg01[1] signed* fg01[1]
  1467. # asm 1: vmull.s32 >h0=reg128#13,<fg01=reg128#2%bot,<fg01=reg128#2%bot
  1468. # asm 2: vmull.s32 >h0=q12,<fg01=d2,<fg01=d2
  1469. vmull.s32 q12,d2,d2
  1470. # qhasm: h0[0,1] += fg01_2[2] signed* fg89_19_38[2];h0[2,3] += fg01_2[3] signed* fg89_19_38[3]
  1471. # asm 1: vmlal.s32 <h0=reg128#13,<fg01_2=reg128#6%top,<fg89_19_38=reg128#1%top
  1472. # asm 2: vmlal.s32 <h0=q12,<fg01_2=d11,<fg89_19_38=d1
  1473. vmlal.s32 q12,d11,d1
  1474. # qhasm: h0[0,1] += fg23_2[0] signed* fg89_19_38[0];h0[2,3] += fg23_2[1] signed* fg89_19_38[1]
  1475. # asm 1: vmlal.s32 <h0=reg128#13,<fg23_2=reg128#7%bot,<fg89_19_38=reg128#1%bot
  1476. # asm 2: vmlal.s32 <h0=q12,<fg23_2=d12,<fg89_19_38=d0
  1477. vmlal.s32 q12,d12,d0
  1478. # qhasm: h0[0,1] += fg23_2[2] signed* fg67_19_38[2];h0[2,3] += fg23_2[3] signed* fg67_19_38[3]
  1479. # asm 1: vmlal.s32 <h0=reg128#13,<fg23_2=reg128#7%top,<fg67_19_38=reg128#12%top
  1480. # asm 2: vmlal.s32 <h0=q12,<fg23_2=d13,<fg67_19_38=d23
  1481. vmlal.s32 q12,d13,d23
  1482. # qhasm: h0[0,1] += fg45_2[0] signed* fg67_19_38[0];h0[2,3] += fg45_2[1] signed* fg67_19_38[1]
  1483. # asm 1: vmlal.s32 <h0=reg128#13,<fg45_2=reg128#9%bot,<fg67_19_38=reg128#12%bot
  1484. # asm 2: vmlal.s32 <h0=q12,<fg45_2=d16,<fg67_19_38=d22
  1485. vmlal.s32 q12,d16,d22
  1486. # qhasm: h0[0,1] += fg45[2] signed* fg45_19_38[2];h0[2,3] += fg45[3] signed* fg45_19_38[3]
  1487. # asm 1: vmlal.s32 <h0=reg128#13,<fg45=reg128#4%top,<fg45_19_38=reg128#11%top
  1488. # asm 2: vmlal.s32 <h0=q12,<fg45=d7,<fg45_19_38=d21
  1489. vmlal.s32 q12,d7,d21
  1490. # qhasm: h1[0,1] = fg01[0] signed* fg01_2[2];h1[2,3] = fg01[1] signed* fg01_2[3]
  1491. # asm 1: vmull.s32 >h1=reg128#11,<fg01=reg128#2%bot,<fg01_2=reg128#6%top
  1492. # asm 2: vmull.s32 >h1=q10,<fg01=d2,<fg01_2=d11
  1493. vmull.s32 q10,d2,d11
  1494. # qhasm: h1[0,1] += fg23[0] signed* fg89_19_38[2];h1[2,3] += fg23[1] signed* fg89_19_38[3]
  1495. # asm 1: vmlal.s32 <h1=reg128#11,<fg23=reg128#3%bot,<fg89_19_38=reg128#1%top
  1496. # asm 2: vmlal.s32 <h1=q10,<fg23=d4,<fg89_19_38=d1
  1497. vmlal.s32 q10,d4,d1
  1498. # qhasm: h1[0,1] += fg23_2[2] signed* fg89_19_38[0];h1[2,3] += fg23_2[3] signed* fg89_19_38[1]
  1499. # asm 1: vmlal.s32 <h1=reg128#11,<fg23_2=reg128#7%top,<fg89_19_38=reg128#1%bot
  1500. # asm 2: vmlal.s32 <h1=q10,<fg23_2=d13,<fg89_19_38=d0
  1501. vmlal.s32 q10,d13,d0
  1502. # qhasm: h1[0,1] += fg45[0] signed* fg67_19_38[2];h1[2,3] += fg45[1] signed* fg67_19_38[3]
  1503. # asm 1: vmlal.s32 <h1=reg128#11,<fg45=reg128#4%bot,<fg67_19_38=reg128#12%top
  1504. # asm 2: vmlal.s32 <h1=q10,<fg45=d6,<fg67_19_38=d23
  1505. vmlal.s32 q10,d6,d23
  1506. # qhasm: h1[0,1] += fg45_2[2] signed* fg67_19_38[0];h1[2,3] += fg45_2[3] signed* fg67_19_38[1]
  1507. # asm 1: vmlal.s32 <h1=reg128#11,<fg45_2=reg128#9%top,<fg67_19_38=reg128#12%bot
  1508. # asm 2: vmlal.s32 <h1=q10,<fg45_2=d17,<fg67_19_38=d22
  1509. vmlal.s32 q10,d17,d22
  1510. # qhasm: h2[0,1] = fg01_2[0] signed* fg23[0];h2[2,3] = fg01_2[1] signed* fg23[1]
  1511. # asm 1: vmull.s32 >h2=reg128#14,<fg01_2=reg128#6%bot,<fg23=reg128#3%bot
  1512. # asm 2: vmull.s32 >h2=q13,<fg01_2=d10,<fg23=d4
  1513. vmull.s32 q13,d10,d4
  1514. # qhasm: h2[0,1] += fg01_2[2] signed* fg01[2];h2[2,3] += fg01_2[3] signed* fg01[3]
  1515. # asm 1: vmlal.s32 <h2=reg128#14,<fg01_2=reg128#6%top,<fg01=reg128#2%top
  1516. # asm 2: vmlal.s32 <h2=q13,<fg01_2=d11,<fg01=d3
  1517. vmlal.s32 q13,d11,d3
  1518. # qhasm: h2[0,1] += fg23_2[2] signed* fg89_19_38[2];h2[2,3] += fg23_2[3] signed* fg89_19_38[3]
  1519. # asm 1: vmlal.s32 <h2=reg128#14,<fg23_2=reg128#7%top,<fg89_19_38=reg128#1%top
  1520. # asm 2: vmlal.s32 <h2=q13,<fg23_2=d13,<fg89_19_38=d1
  1521. vmlal.s32 q13,d13,d1
  1522. # qhasm: h2[0,1] += fg45_2[0] signed* fg89_19_38[0];h2[2,3] += fg45_2[1] signed* fg89_19_38[1]
  1523. # asm 1: vmlal.s32 <h2=reg128#14,<fg45_2=reg128#9%bot,<fg89_19_38=reg128#1%bot
  1524. # asm 2: vmlal.s32 <h2=q13,<fg45_2=d16,<fg89_19_38=d0
  1525. vmlal.s32 q13,d16,d0
  1526. # qhasm: h2[0,1] += fg45_2[2] signed* fg67_19_38[2];h2[2,3] += fg45_2[3] signed* fg67_19_38[3]
  1527. # asm 1: vmlal.s32 <h2=reg128#14,<fg45_2=reg128#9%top,<fg67_19_38=reg128#12%top
  1528. # asm 2: vmlal.s32 <h2=q13,<fg45_2=d17,<fg67_19_38=d23
  1529. vmlal.s32 q13,d17,d23
  1530. # qhasm: h2[0,1] += fg67[0] signed* fg67_19_38[0];h2[2,3] += fg67[1] signed* fg67_19_38[1]
  1531. # asm 1: vmlal.s32 <h2=reg128#14,<fg67=reg128#5%bot,<fg67_19_38=reg128#12%bot
  1532. # asm 2: vmlal.s32 <h2=q13,<fg67=d8,<fg67_19_38=d22
  1533. vmlal.s32 q13,d8,d22
  1534. # qhasm: h3[0,1] = fg01_2[0] signed* fg23[2];h3[2,3] = fg01_2[1] signed* fg23[3]
  1535. # asm 1: vmull.s32 >h3=reg128#2,<fg01_2=reg128#6%bot,<fg23=reg128#3%top
  1536. # asm 2: vmull.s32 >h3=q1,<fg01_2=d10,<fg23=d5
  1537. vmull.s32 q1,d10,d5
  1538. # qhasm: h3[0,1] += fg01_2[2] signed* fg23[0];h3[2,3] += fg01_2[3] signed* fg23[1]
  1539. # asm 1: vmlal.s32 <h3=reg128#2,<fg01_2=reg128#6%top,<fg23=reg128#3%bot
  1540. # asm 2: vmlal.s32 <h3=q1,<fg01_2=d11,<fg23=d4
  1541. vmlal.s32 q1,d11,d4
  1542. # qhasm: h3[0,1] += fg45[0] signed* fg89_19_38[2];h3[2,3] += fg45[1] signed* fg89_19_38[3]
  1543. # asm 1: vmlal.s32 <h3=reg128#2,<fg45=reg128#4%bot,<fg89_19_38=reg128#1%top
  1544. # asm 2: vmlal.s32 <h3=q1,<fg45=d6,<fg89_19_38=d1
  1545. vmlal.s32 q1,d6,d1
  1546. # qhasm: h3[0,1] += fg45_2[2] signed* fg89_19_38[0];h3[2,3] += fg45_2[3] signed* fg89_19_38[1]
  1547. # asm 1: vmlal.s32 <h3=reg128#2,<fg45_2=reg128#9%top,<fg89_19_38=reg128#1%bot
  1548. # asm 2: vmlal.s32 <h3=q1,<fg45_2=d17,<fg89_19_38=d0
  1549. vmlal.s32 q1,d17,d0
  1550. # qhasm: h3[0,1] += fg67[0] signed* fg67_19_38[2];h3[2,3] += fg67[1] signed* fg67_19_38[3]
  1551. # asm 1: vmlal.s32 <h3=reg128#2,<fg67=reg128#5%bot,<fg67_19_38=reg128#12%top
  1552. # asm 2: vmlal.s32 <h3=q1,<fg67=d8,<fg67_19_38=d23
  1553. vmlal.s32 q1,d8,d23
  1554. # qhasm: h4[0,1] = fg01_2[0] signed* fg45[0];h4[2,3] = fg01_2[1] signed* fg45[1]
  1555. # asm 1: vmull.s32 >h4=reg128#15,<fg01_2=reg128#6%bot,<fg45=reg128#4%bot
  1556. # asm 2: vmull.s32 >h4=q14,<fg01_2=d10,<fg45=d6
  1557. vmull.s32 q14,d10,d6
  1558. # qhasm: h4[0,1] += fg01_2[2] signed* fg23_2[2];h4[2,3] += fg01_2[3] signed* fg23_2[3]
  1559. # asm 1: vmlal.s32 <h4=reg128#15,<fg01_2=reg128#6%top,<fg23_2=reg128#7%top
  1560. # asm 2: vmlal.s32 <h4=q14,<fg01_2=d11,<fg23_2=d13
  1561. vmlal.s32 q14,d11,d13
  1562. # qhasm: h4[0,1] += fg23[0] signed* fg23[0];h4[2,3] += fg23[1] signed* fg23[1]
  1563. # asm 1: vmlal.s32 <h4=reg128#15,<fg23=reg128#3%bot,<fg23=reg128#3%bot
  1564. # asm 2: vmlal.s32 <h4=q14,<fg23=d4,<fg23=d4
  1565. vmlal.s32 q14,d4,d4
  1566. # qhasm: h4[0,1] += fg45_2[2] signed* fg89_19_38[2];h4[2,3] += fg45_2[3] signed* fg89_19_38[3]
  1567. # asm 1: vmlal.s32 <h4=reg128#15,<fg45_2=reg128#9%top,<fg89_19_38=reg128#1%top
  1568. # asm 2: vmlal.s32 <h4=q14,<fg45_2=d17,<fg89_19_38=d1
  1569. vmlal.s32 q14,d17,d1
  1570. # qhasm: h4[0,1] += fg67_2[0] signed* fg89_19_38[0];h4[2,3] += fg67_2[1] signed* fg89_19_38[1]
  1571. # asm 1: vmlal.s32 <h4=reg128#15,<fg67_2=reg128#10%bot,<fg89_19_38=reg128#1%bot
  1572. # asm 2: vmlal.s32 <h4=q14,<fg67_2=d18,<fg89_19_38=d0
  1573. vmlal.s32 q14,d18,d0
  1574. # qhasm: h4[0,1] += fg67[2] signed* fg67_19_38[2];h4[2,3] += fg67[3] signed* fg67_19_38[3]
  1575. # asm 1: vmlal.s32 <h4=reg128#15,<fg67=reg128#5%top,<fg67_19_38=reg128#12%top
  1576. # asm 2: vmlal.s32 <h4=q14,<fg67=d9,<fg67_19_38=d23
  1577. vmlal.s32 q14,d9,d23
  1578. # qhasm: h5[0,1] = fg01_2[0] signed* fg45[2];h5[2,3] = fg01_2[1] signed* fg45[3]
  1579. # asm 1: vmull.s32 >h5=reg128#12,<fg01_2=reg128#6%bot,<fg45=reg128#4%top
  1580. # asm 2: vmull.s32 >h5=q11,<fg01_2=d10,<fg45=d7
  1581. vmull.s32 q11,d10,d7
  1582. # qhasm: h5[0,1] += fg01_2[2] signed* fg45[0];h5[2,3] += fg01_2[3] signed* fg45[1]
  1583. # asm 1: vmlal.s32 <h5=reg128#12,<fg01_2=reg128#6%top,<fg45=reg128#4%bot
  1584. # asm 2: vmlal.s32 <h5=q11,<fg01_2=d11,<fg45=d6
  1585. vmlal.s32 q11,d11,d6
  1586. # qhasm: h5[0,1] += fg23_2[0] signed* fg23[2];h5[2,3] += fg23_2[1] signed* fg23[3]
  1587. # asm 1: vmlal.s32 <h5=reg128#12,<fg23_2=reg128#7%bot,<fg23=reg128#3%top
  1588. # asm 2: vmlal.s32 <h5=q11,<fg23_2=d12,<fg23=d5
  1589. vmlal.s32 q11,d12,d5
  1590. # qhasm: h5[0,1] += fg67[0] signed* fg89_19_38[2];h5[2,3] += fg67[1] signed* fg89_19_38[3]
  1591. # asm 1: vmlal.s32 <h5=reg128#12,<fg67=reg128#5%bot,<fg89_19_38=reg128#1%top
  1592. # asm 2: vmlal.s32 <h5=q11,<fg67=d8,<fg89_19_38=d1
  1593. vmlal.s32 q11,d8,d1
  1594. # qhasm: h5[0,1] += fg67_2[2] signed* fg89_19_38[0];h5[2,3] += fg67_2[3] signed* fg89_19_38[1]
  1595. # asm 1: vmlal.s32 <h5=reg128#12,<fg67_2=reg128#10%top,<fg89_19_38=reg128#1%bot
  1596. # asm 2: vmlal.s32 <h5=q11,<fg67_2=d19,<fg89_19_38=d0
  1597. vmlal.s32 q11,d19,d0
  1598. # qhasm: h6[0,1] = fg01_2[0] signed* fg67[0];h6[2,3] = fg01_2[1] signed* fg67[1]
  1599. # asm 1: vmull.s32 >h6=reg128#16,<fg01_2=reg128#6%bot,<fg67=reg128#5%bot
  1600. # asm 2: vmull.s32 >h6=q15,<fg01_2=d10,<fg67=d8
  1601. vmull.s32 q15,d10,d8
  1602. # qhasm: h6[0,1] += fg01_2[2] signed* fg45_2[2];h6[2,3] += fg01_2[3] signed* fg45_2[3]
  1603. # asm 1: vmlal.s32 <h6=reg128#16,<fg01_2=reg128#6%top,<fg45_2=reg128#9%top
  1604. # asm 2: vmlal.s32 <h6=q15,<fg01_2=d11,<fg45_2=d17
  1605. vmlal.s32 q15,d11,d17
  1606. # qhasm: h6[0,1] += fg23_2[0] signed* fg45[0];h6[2,3] += fg23_2[1] signed* fg45[1]
  1607. # asm 1: vmlal.s32 <h6=reg128#16,<fg23_2=reg128#7%bot,<fg45=reg128#4%bot
  1608. # asm 2: vmlal.s32 <h6=q15,<fg23_2=d12,<fg45=d6
  1609. vmlal.s32 q15,d12,d6
  1610. # qhasm: h6[0,1] += fg23_2[2] signed* fg23[2];h6[2,3] += fg23_2[3] signed* fg23[3]
  1611. # asm 1: vmlal.s32 <h6=reg128#16,<fg23_2=reg128#7%top,<fg23=reg128#3%top
  1612. # asm 2: vmlal.s32 <h6=q15,<fg23_2=d13,<fg23=d5
  1613. vmlal.s32 q15,d13,d5
  1614. # qhasm: h6[0,1] += fg67_2[2] signed* fg89_19_38[2];h6[2,3] += fg67_2[3] signed* fg89_19_38[3]
  1615. # asm 1: vmlal.s32 <h6=reg128#16,<fg67_2=reg128#10%top,<fg89_19_38=reg128#1%top
  1616. # asm 2: vmlal.s32 <h6=q15,<fg67_2=d19,<fg89_19_38=d1
  1617. vmlal.s32 q15,d19,d1
  1618. # qhasm: h6[0,1] += fg89[0] signed* fg89_19_38[0];h6[2,3] += fg89[1] signed* fg89_19_38[1]
  1619. # asm 1: vmlal.s32 <h6=reg128#16,<fg89=reg128#8%bot,<fg89_19_38=reg128#1%bot
  1620. # asm 2: vmlal.s32 <h6=q15,<fg89=d14,<fg89_19_38=d0
  1621. vmlal.s32 q15,d14,d0
  1622. # qhasm: h7[0,1] = fg01_2[0] signed* fg67[2];h7[2,3] = fg01_2[1] signed* fg67[3]
  1623. # asm 1: vmull.s32 >h7=reg128#3,<fg01_2=reg128#6%bot,<fg67=reg128#5%top
  1624. # asm 2: vmull.s32 >h7=q2,<fg01_2=d10,<fg67=d9
  1625. vmull.s32 q2,d10,d9
  1626. # qhasm: h7[0,1] += fg01_2[2] signed* fg67[0];h7[2,3] += fg01_2[3] signed* fg67[1]
  1627. # asm 1: vmlal.s32 <h7=reg128#3,<fg01_2=reg128#6%top,<fg67=reg128#5%bot
  1628. # asm 2: vmlal.s32 <h7=q2,<fg01_2=d11,<fg67=d8
  1629. vmlal.s32 q2,d11,d8
  1630. # qhasm: h7[0,1] += fg23_2[0] signed* fg45[2];h7[2,3] += fg23_2[1] signed* fg45[3]
  1631. # asm 1: vmlal.s32 <h7=reg128#3,<fg23_2=reg128#7%bot,<fg45=reg128#4%top
  1632. # asm 2: vmlal.s32 <h7=q2,<fg23_2=d12,<fg45=d7
  1633. vmlal.s32 q2,d12,d7
  1634. # qhasm: h7[0,1] += fg23_2[2] signed* fg45[0];h7[2,3] += fg23_2[3] signed* fg45[1]
  1635. # asm 1: vmlal.s32 <h7=reg128#3,<fg23_2=reg128#7%top,<fg45=reg128#4%bot
  1636. # asm 2: vmlal.s32 <h7=q2,<fg23_2=d13,<fg45=d6
  1637. vmlal.s32 q2,d13,d6
  1638. # qhasm: h7[0,1] += fg89[0] signed* fg89_19_38[2];h7[2,3] += fg89[1] signed* fg89_19_38[3]
  1639. # asm 1: vmlal.s32 <h7=reg128#3,<fg89=reg128#8%bot,<fg89_19_38=reg128#1%top
  1640. # asm 2: vmlal.s32 <h7=q2,<fg89=d14,<fg89_19_38=d1
  1641. vmlal.s32 q2,d14,d1
  1642. # qhasm: h8[0,1] = fg89[2] signed* fg89_19_38[2];h8[2,3] = fg89[3] signed* fg89_19_38[3]
  1643. # asm 1: vmull.s32 >h8=reg128#1,<fg89=reg128#8%top,<fg89_19_38=reg128#1%top
  1644. # asm 2: vmull.s32 >h8=q0,<fg89=d15,<fg89_19_38=d1
  1645. vmull.s32 q0,d15,d1
  1646. # qhasm: h8[0,1] += fg01_2[0] signed* fg89[0];h8[2,3] += fg01_2[1] signed* fg89[1]
  1647. # asm 1: vmlal.s32 <h8=reg128#1,<fg01_2=reg128#6%bot,<fg89=reg128#8%bot
  1648. # asm 2: vmlal.s32 <h8=q0,<fg01_2=d10,<fg89=d14
  1649. vmlal.s32 q0,d10,d14
  1650. # qhasm: h8[0,1] += fg01_2[2] signed* fg67_2[2];h8[2,3] += fg01_2[3] signed* fg67_2[3]
  1651. # asm 1: vmlal.s32 <h8=reg128#1,<fg01_2=reg128#6%top,<fg67_2=reg128#10%top
  1652. # asm 2: vmlal.s32 <h8=q0,<fg01_2=d11,<fg67_2=d19
  1653. vmlal.s32 q0,d11,d19
  1654. # qhasm: h8[0,1] += fg23_2[0] signed* fg67[0];h8[2,3] += fg23_2[1] signed* fg67[1]
  1655. # asm 1: vmlal.s32 <h8=reg128#1,<fg23_2=reg128#7%bot,<fg67=reg128#5%bot
  1656. # asm 2: vmlal.s32 <h8=q0,<fg23_2=d12,<fg67=d8
  1657. vmlal.s32 q0,d12,d8
  1658. # qhasm: h8[0,1] += fg23_2[2] signed* fg45_2[2];h8[2,3] += fg23_2[3] signed* fg45_2[3]
  1659. # asm 1: vmlal.s32 <h8=reg128#1,<fg23_2=reg128#7%top,<fg45_2=reg128#9%top
  1660. # asm 2: vmlal.s32 <h8=q0,<fg23_2=d13,<fg45_2=d17
  1661. vmlal.s32 q0,d13,d17
  1662. # qhasm: h8[0,1] += fg45[0] signed* fg45[0];h8[2,3] += fg45[1] signed* fg45[1]
  1663. # asm 1: vmlal.s32 <h8=reg128#1,<fg45=reg128#4%bot,<fg45=reg128#4%bot
  1664. # asm 2: vmlal.s32 <h8=q0,<fg45=d6,<fg45=d6
  1665. vmlal.s32 q0,d6,d6
  1666. # qhasm: ptr = &_0x2000000_stack
  1667. # asm 1: lea >ptr=int32#3,<_0x2000000_stack=stack128#1
  1668. # asm 2: lea >ptr=r2,<_0x2000000_stack=[sp,#512]
  1669. add r2,sp,#512
  1670. # qhasm: _0x2000000 aligned= mem128[ptr]
  1671. # asm 1: vld1.8 {>_0x2000000=reg128#10%bot->_0x2000000=reg128#10%top},[<ptr=int32#3,: 128]
  1672. # asm 2: vld1.8 {>_0x2000000=d18->_0x2000000=d19},[<ptr=r2,: 128]
  1673. vld1.8 {d18-d19},[r2,: 128]
  1674. # qhasm: h9[0,1] = fg45_2[0] signed* fg45[2];h9[2,3] = fg45_2[1] signed* fg45[3]
  1675. # asm 1: vmull.s32 >h9=reg128#4,<fg45_2=reg128#9%bot,<fg45=reg128#4%top
  1676. # asm 2: vmull.s32 >h9=q3,<fg45_2=d16,<fg45=d7
  1677. vmull.s32 q3,d16,d7
  1678. # qhasm: h9[0,1] += fg01_2[0] signed* fg89[2];h9[2,3] += fg01_2[1] signed* fg89[3]
  1679. # asm 1: vmlal.s32 <h9=reg128#4,<fg01_2=reg128#6%bot,<fg89=reg128#8%top
  1680. # asm 2: vmlal.s32 <h9=q3,<fg01_2=d10,<fg89=d15
  1681. vmlal.s32 q3,d10,d15
  1682. # qhasm: h9[0,1] += fg01_2[2] signed* fg89[0];h9[2,3] += fg01_2[3] signed* fg89[1]
  1683. # asm 1: vmlal.s32 <h9=reg128#4,<fg01_2=reg128#6%top,<fg89=reg128#8%bot
  1684. # asm 2: vmlal.s32 <h9=q3,<fg01_2=d11,<fg89=d14
  1685. vmlal.s32 q3,d11,d14
  1686. # qhasm: h9[0,1] += fg23_2[0] signed* fg67[2];h9[2,3] += fg23_2[1] signed* fg67[3]
  1687. # asm 1: vmlal.s32 <h9=reg128#4,<fg23_2=reg128#7%bot,<fg67=reg128#5%top
  1688. # asm 2: vmlal.s32 <h9=q3,<fg23_2=d12,<fg67=d9
  1689. vmlal.s32 q3,d12,d9
  1690. # qhasm: h9[0,1] += fg23_2[2] signed* fg67[0];h9[2,3] += fg23_2[3] signed* fg67[1]
  1691. # asm 1: vmlal.s32 <h9=reg128#4,<fg23_2=reg128#7%top,<fg67=reg128#5%bot
  1692. # asm 2: vmlal.s32 <h9=q3,<fg23_2=d13,<fg67=d8
  1693. vmlal.s32 q3,d13,d8
  1694. # qhasm: ptr = &_0x1000000_stack
  1695. # asm 1: lea >ptr=int32#3,<_0x1000000_stack=stack128#2
  1696. # asm 2: lea >ptr=r2,<_0x1000000_stack=[sp,#528]
  1697. add r2,sp,#528
  1698. # qhasm: _0x1000000 aligned= mem128[ptr]
  1699. # asm 1: vld1.8 {>_0x1000000=reg128#5%bot->_0x1000000=reg128#5%top},[<ptr=int32#3,: 128]
  1700. # asm 2: vld1.8 {>_0x1000000=d8->_0x1000000=d9},[<ptr=r2,: 128]
  1701. vld1.8 {d8-d9},[r2,: 128]
  1702. # qhasm: 2x t0 = h0 + _0x2000000
  1703. # asm 1: vadd.i64 >t0=reg128#6,<h0=reg128#13,<_0x2000000=reg128#10
  1704. # asm 2: vadd.i64 >t0=q5,<h0=q12,<_0x2000000=q9
  1705. vadd.i64 q5,q12,q9
  1706. # qhasm: 2x t6 = h6 + _0x2000000
  1707. # asm 1: vadd.i64 >t6=reg128#7,<h6=reg128#16,<_0x2000000=reg128#10
  1708. # asm 2: vadd.i64 >t6=q6,<h6=q15,<_0x2000000=q9
  1709. vadd.i64 q6,q15,q9
  1710. # qhasm: 2x c0 = t0 signed>> 26
  1711. # asm 1: vshr.s64 >c0=reg128#6,<t0=reg128#6,#26
  1712. # asm 2: vshr.s64 >c0=q5,<t0=q5,#26
  1713. vshr.s64 q5,q5,#26
  1714. # qhasm: 2x c6 = t6 signed>> 26
  1715. # asm 1: vshr.s64 >c6=reg128#7,<t6=reg128#7,#26
  1716. # asm 2: vshr.s64 >c6=q6,<t6=q6,#26
  1717. vshr.s64 q6,q6,#26
  1718. # qhasm: 2x h1 += c0
  1719. # asm 1: vadd.i64 >h1=reg128#8,<h1=reg128#11,<c0=reg128#6
  1720. # asm 2: vadd.i64 >h1=q7,<h1=q10,<c0=q5
  1721. vadd.i64 q7,q10,q5
  1722. # qhasm: 2x t0 = c0 << 26
  1723. # asm 1: vshl.i64 >t0=reg128#6,<c0=reg128#6,#26
  1724. # asm 2: vshl.i64 >t0=q5,<c0=q5,#26
  1725. vshl.i64 q5,q5,#26
  1726. # qhasm: 2x t1 = h1 + _0x1000000
  1727. # asm 1: vadd.i64 >t1=reg128#9,<h1=reg128#8,<_0x1000000=reg128#5
  1728. # asm 2: vadd.i64 >t1=q8,<h1=q7,<_0x1000000=q4
  1729. vadd.i64 q8,q7,q4
  1730. # qhasm: 2x h7 += c6
  1731. # asm 1: vadd.i64 >h7=reg128#3,<h7=reg128#3,<c6=reg128#7
  1732. # asm 2: vadd.i64 >h7=q2,<h7=q2,<c6=q6
  1733. vadd.i64 q2,q2,q6
  1734. # qhasm: 2x t6 = c6 << 26
  1735. # asm 1: vshl.i64 >t6=reg128#7,<c6=reg128#7,#26
  1736. # asm 2: vshl.i64 >t6=q6,<c6=q6,#26
  1737. vshl.i64 q6,q6,#26
  1738. # qhasm: 2x t7 = h7 + _0x1000000
  1739. # asm 1: vadd.i64 >t7=reg128#11,<h7=reg128#3,<_0x1000000=reg128#5
  1740. # asm 2: vadd.i64 >t7=q10,<h7=q2,<_0x1000000=q4
  1741. vadd.i64 q10,q2,q4
  1742. # qhasm: 2x h0 -= t0
  1743. # asm 1: vsub.i64 >h0=reg128#6,<h0=reg128#13,<t0=reg128#6
  1744. # asm 2: vsub.i64 >h0=q5,<h0=q12,<t0=q5
  1745. vsub.i64 q5,q12,q5
  1746. # qhasm: 2x c1 = t1 signed>> 25
  1747. # asm 1: vshr.s64 >c1=reg128#9,<t1=reg128#9,#25
  1748. # asm 2: vshr.s64 >c1=q8,<t1=q8,#25
  1749. vshr.s64 q8,q8,#25
  1750. # qhasm: 2x h6 -= t6
  1751. # asm 1: vsub.i64 >h6=reg128#7,<h6=reg128#16,<t6=reg128#7
  1752. # asm 2: vsub.i64 >h6=q6,<h6=q15,<t6=q6
  1753. vsub.i64 q6,q15,q6
  1754. # qhasm: 2x c7 = t7 signed>> 25
  1755. # asm 1: vshr.s64 >c7=reg128#11,<t7=reg128#11,#25
  1756. # asm 2: vshr.s64 >c7=q10,<t7=q10,#25
  1757. vshr.s64 q10,q10,#25
  1758. # qhasm: 2x h2 += c1
  1759. # asm 1: vadd.i64 >h2=reg128#13,<h2=reg128#14,<c1=reg128#9
  1760. # asm 2: vadd.i64 >h2=q12,<h2=q13,<c1=q8
  1761. vadd.i64 q12,q13,q8
  1762. # qhasm: 2x t1 = c1 << 25
  1763. # asm 1: vshl.i64 >t1=reg128#9,<c1=reg128#9,#25
  1764. # asm 2: vshl.i64 >t1=q8,<c1=q8,#25
  1765. vshl.i64 q8,q8,#25
  1766. # qhasm: 2x t2 = h2 + _0x2000000
  1767. # asm 1: vadd.i64 >t2=reg128#14,<h2=reg128#13,<_0x2000000=reg128#10
  1768. # asm 2: vadd.i64 >t2=q13,<h2=q12,<_0x2000000=q9
  1769. vadd.i64 q13,q12,q9
  1770. # qhasm: 2x h8 += c7
  1771. # asm 1: vadd.i64 >h8=reg128#1,<h8=reg128#1,<c7=reg128#11
  1772. # asm 2: vadd.i64 >h8=q0,<h8=q0,<c7=q10
  1773. vadd.i64 q0,q0,q10
  1774. # qhasm: 2x h1 -= t1
  1775. # asm 1: vsub.i64 >h1=reg128#8,<h1=reg128#8,<t1=reg128#9
  1776. # asm 2: vsub.i64 >h1=q7,<h1=q7,<t1=q8
  1777. vsub.i64 q7,q7,q8
  1778. # qhasm: 2x c2 = t2 signed>> 26
  1779. # asm 1: vshr.s64 >c2=reg128#9,<t2=reg128#14,#26
  1780. # asm 2: vshr.s64 >c2=q8,<t2=q13,#26
  1781. vshr.s64 q8,q13,#26
  1782. # qhasm: 2x t7 = c7 << 25
  1783. # asm 1: vshl.i64 >t7=reg128#11,<c7=reg128#11,#25
  1784. # asm 2: vshl.i64 >t7=q10,<c7=q10,#25
  1785. vshl.i64 q10,q10,#25
  1786. # qhasm: 2x t8 = h8 + _0x2000000
  1787. # asm 1: vadd.i64 >t8=reg128#14,<h8=reg128#1,<_0x2000000=reg128#10
  1788. # asm 2: vadd.i64 >t8=q13,<h8=q0,<_0x2000000=q9
  1789. vadd.i64 q13,q0,q9
  1790. # qhasm: 2x h3 += c2
  1791. # asm 1: vadd.i64 >h3=reg128#2,<h3=reg128#2,<c2=reg128#9
  1792. # asm 2: vadd.i64 >h3=q1,<h3=q1,<c2=q8
  1793. vadd.i64 q1,q1,q8
  1794. # qhasm: 2x t2 = c2 << 26
  1795. # asm 1: vshl.i64 >t2=reg128#9,<c2=reg128#9,#26
  1796. # asm 2: vshl.i64 >t2=q8,<c2=q8,#26
  1797. vshl.i64 q8,q8,#26
  1798. # qhasm: 2x t3 = h3 + _0x1000000
  1799. # asm 1: vadd.i64 >t3=reg128#16,<h3=reg128#2,<_0x1000000=reg128#5
  1800. # asm 2: vadd.i64 >t3=q15,<h3=q1,<_0x1000000=q4
  1801. vadd.i64 q15,q1,q4
  1802. # qhasm: 2x h7 -= t7
  1803. # asm 1: vsub.i64 >h7=reg128#3,<h7=reg128#3,<t7=reg128#11
  1804. # asm 2: vsub.i64 >h7=q2,<h7=q2,<t7=q10
  1805. vsub.i64 q2,q2,q10
  1806. # qhasm: 2x c8 = t8 signed>> 26
  1807. # asm 1: vshr.s64 >c8=reg128#11,<t8=reg128#14,#26
  1808. # asm 2: vshr.s64 >c8=q10,<t8=q13,#26
  1809. vshr.s64 q10,q13,#26
  1810. # qhasm: 2x h2 -= t2
  1811. # asm 1: vsub.i64 >h2=reg128#9,<h2=reg128#13,<t2=reg128#9
  1812. # asm 2: vsub.i64 >h2=q8,<h2=q12,<t2=q8
  1813. vsub.i64 q8,q12,q8
  1814. # qhasm: 2x c3 = t3 signed>> 25
  1815. # asm 1: vshr.s64 >c3=reg128#13,<t3=reg128#16,#25
  1816. # asm 2: vshr.s64 >c3=q12,<t3=q15,#25
  1817. vshr.s64 q12,q15,#25
  1818. # qhasm: 2x h9 += c8
  1819. # asm 1: vadd.i64 >h9=reg128#4,<h9=reg128#4,<c8=reg128#11
  1820. # asm 2: vadd.i64 >h9=q3,<h9=q3,<c8=q10
  1821. vadd.i64 q3,q3,q10
  1822. # qhasm: 2x t8 = c8 << 26
  1823. # asm 1: vshl.i64 >t8=reg128#11,<c8=reg128#11,#26
  1824. # asm 2: vshl.i64 >t8=q10,<c8=q10,#26
  1825. vshl.i64 q10,q10,#26
  1826. # qhasm: 2x t9 = h9 + _0x1000000
  1827. # asm 1: vadd.i64 >t9=reg128#14,<h9=reg128#4,<_0x1000000=reg128#5
  1828. # asm 2: vadd.i64 >t9=q13,<h9=q3,<_0x1000000=q4
  1829. vadd.i64 q13,q3,q4
  1830. # qhasm: 2x h4 += c3
  1831. # asm 1: vadd.i64 >h4=reg128#15,<h4=reg128#15,<c3=reg128#13
  1832. # asm 2: vadd.i64 >h4=q14,<h4=q14,<c3=q12
  1833. vadd.i64 q14,q14,q12
  1834. # qhasm: posh = playground1_ptr + 288
  1835. # asm 1: add >posh=int32#3,<playground1_ptr=int32#4,#288
  1836. # asm 2: add >posh=r2,<playground1_ptr=r3,#288
  1837. add r2,r3,#288
  1838. # qhasm: 2x t3 = c3 << 25
  1839. # asm 1: vshl.i64 >t3=reg128#13,<c3=reg128#13,#25
  1840. # asm 2: vshl.i64 >t3=q12,<c3=q12,#25
  1841. vshl.i64 q12,q12,#25
  1842. # qhasm: posH = playground1_ptr + 336
  1843. # asm 1: add >posH=int32#5,<playground1_ptr=int32#4,#336
  1844. # asm 2: add >posH=r4,<playground1_ptr=r3,#336
  1845. add r4,r3,#336
  1846. # qhasm: 2x t4 = h4 + _0x2000000
  1847. # asm 1: vadd.i64 >t4=reg128#16,<h4=reg128#15,<_0x2000000=reg128#10
  1848. # asm 2: vadd.i64 >t4=q15,<h4=q14,<_0x2000000=q9
  1849. vadd.i64 q15,q14,q9
  1850. # qhasm: posh+=8
  1851. # asm 1: add >posh=int32#3,<posh=int32#3,#8
  1852. # asm 2: add >posh=r2,<posh=r2,#8
  1853. add r2,r2,#8
  1854. # qhasm: 2x h8 -= t8
  1855. # asm 1: vsub.i64 >h8=reg128#1,<h8=reg128#1,<t8=reg128#11
  1856. # asm 2: vsub.i64 >h8=q0,<h8=q0,<t8=q10
  1857. vsub.i64 q0,q0,q10
  1858. # qhasm: posH+=8
  1859. # asm 1: add >posH=int32#5,<posH=int32#5,#8
  1860. # asm 2: add >posH=r4,<posH=r4,#8
  1861. add r4,r4,#8
  1862. # qhasm: 2x c9 = t9 signed>> 25
  1863. # asm 1: vshr.s64 >c9=reg128#11,<t9=reg128#14,#25
  1864. # asm 2: vshr.s64 >c9=q10,<t9=q13,#25
  1865. vshr.s64 q10,q13,#25
  1866. # qhasm: 2x h3 -= t3
  1867. # asm 1: vsub.i64 >h3=reg128#2,<h3=reg128#2,<t3=reg128#13
  1868. # asm 2: vsub.i64 >h3=q1,<h3=q1,<t3=q12
  1869. vsub.i64 q1,q1,q12
  1870. # qhasm: 2x c4 = t4 signed>> 26
  1871. # asm 1: vshr.s64 >c4=reg128#13,<t4=reg128#16,#26
  1872. # asm 2: vshr.s64 >c4=q12,<t4=q15,#26
  1873. vshr.s64 q12,q15,#26
  1874. # qhasm: 2x s = c9 + c9
  1875. # asm 1: vadd.i64 >s=reg128#14,<c9=reg128#11,<c9=reg128#11
  1876. # asm 2: vadd.i64 >s=q13,<c9=q10,<c9=q10
  1877. vadd.i64 q13,q10,q10
  1878. # qhasm: 2x h5 += c4
  1879. # asm 1: vadd.i64 >h5=reg128#12,<h5=reg128#12,<c4=reg128#13
  1880. # asm 2: vadd.i64 >h5=q11,<h5=q11,<c4=q12
  1881. vadd.i64 q11,q11,q12
  1882. # qhasm: h2 h3 = h2[0]h3[0]h2[2]h2[3] h2[1]h3[1]h3[2]h3[3]
  1883. # asm 1: vtrn.32 <h2=reg128#9%bot,<h3=reg128#2%bot
  1884. # asm 2: vtrn.32 <h2=d16,<h3=d2
  1885. vtrn.32 d16,d2
  1886. # qhasm: 2x t4 = c4 << 26
  1887. # asm 1: vshl.i64 >t4=reg128#13,<c4=reg128#13,#26
  1888. # asm 2: vshl.i64 >t4=q12,<c4=q12,#26
  1889. vshl.i64 q12,q12,#26
  1890. # qhasm: h2 h3 = h2[0]h2[1]h2[2]h3[2] h3[0]h3[1]h2[3]h3[3]
  1891. # asm 1: vtrn.32 <h2=reg128#9%top,<h3=reg128#2%top
  1892. # asm 2: vtrn.32 <h2=d17,<h3=d3
  1893. vtrn.32 d17,d3
  1894. # qhasm: 2x t5 = h5 + _0x1000000
  1895. # asm 1: vadd.i64 >t5=reg128#2,<h5=reg128#12,<_0x1000000=reg128#5
  1896. # asm 2: vadd.i64 >t5=q1,<h5=q11,<_0x1000000=q4
  1897. vadd.i64 q1,q11,q4
  1898. # qhasm: 2x h0 += s
  1899. # asm 1: vadd.i64 >h0=reg128#5,<h0=reg128#6,<s=reg128#14
  1900. # asm 2: vadd.i64 >h0=q4,<h0=q5,<s=q13
  1901. vadd.i64 q4,q5,q13
  1902. # qhasm: mem64[posh] aligned= h2[0];posh+=8
  1903. # asm 1: vst1.8 <h2=reg128#9%bot,[<posh=int32#3,: 64]!
  1904. # asm 2: vst1.8 <h2=d16,[<posh=r2,: 64]!
  1905. vst1.8 d16,[r2,: 64]!
  1906. # qhasm: 2x s = c9 << 4
  1907. # asm 1: vshl.i64 >s=reg128#6,<c9=reg128#11,#4
  1908. # asm 2: vshl.i64 >s=q5,<c9=q10,#4
  1909. vshl.i64 q5,q10,#4
  1910. # qhasm: mem64[posH] aligned= h2[1];posH+=8
  1911. # asm 1: vst1.8 <h2=reg128#9%top,[<posH=int32#5,: 64]!
  1912. # asm 2: vst1.8 <h2=d17,[<posH=r4,: 64]!
  1913. vst1.8 d17,[r4,: 64]!
  1914. # qhasm: 2x h4 -= t4
  1915. # asm 1: vsub.i64 >h4=reg128#9,<h4=reg128#15,<t4=reg128#13
  1916. # asm 2: vsub.i64 >h4=q8,<h4=q14,<t4=q12
  1917. vsub.i64 q8,q14,q12
  1918. # qhasm: 2x c5 = t5 signed>> 25
  1919. # asm 1: vshr.s64 >c5=reg128#2,<t5=reg128#2,#25
  1920. # asm 2: vshr.s64 >c5=q1,<t5=q1,#25
  1921. vshr.s64 q1,q1,#25
  1922. # qhasm: 2x h0 += s
  1923. # asm 1: vadd.i64 >h0=reg128#5,<h0=reg128#5,<s=reg128#6
  1924. # asm 2: vadd.i64 >h0=q4,<h0=q4,<s=q5
  1925. vadd.i64 q4,q4,q5
  1926. # qhasm: 2x h6 += c5
  1927. # asm 1: vadd.i64 >h6=reg128#6,<h6=reg128#7,<c5=reg128#2
  1928. # asm 2: vadd.i64 >h6=q5,<h6=q6,<c5=q1
  1929. vadd.i64 q5,q6,q1
  1930. # qhasm: 2x t5 = c5 << 25
  1931. # asm 1: vshl.i64 >t5=reg128#2,<c5=reg128#2,#25
  1932. # asm 2: vshl.i64 >t5=q1,<c5=q1,#25
  1933. vshl.i64 q1,q1,#25
  1934. # qhasm: 2x t6 = h6 + _0x2000000
  1935. # asm 1: vadd.i64 >t6=reg128#7,<h6=reg128#6,<_0x2000000=reg128#10
  1936. # asm 2: vadd.i64 >t6=q6,<h6=q5,<_0x2000000=q9
  1937. vadd.i64 q6,q5,q9
  1938. # qhasm: 2x h0 += c9
  1939. # asm 1: vadd.i64 >h0=reg128#5,<h0=reg128#5,<c9=reg128#11
  1940. # asm 2: vadd.i64 >h0=q4,<h0=q4,<c9=q10
  1941. vadd.i64 q4,q4,q10
  1942. # qhasm: 2x t9 = c9 << 25
  1943. # asm 1: vshl.i64 >t9=reg128#11,<c9=reg128#11,#25
  1944. # asm 2: vshl.i64 >t9=q10,<c9=q10,#25
  1945. vshl.i64 q10,q10,#25
  1946. # qhasm: 2x t0 = h0 + _0x2000000
  1947. # asm 1: vadd.i64 >t0=reg128#10,<h0=reg128#5,<_0x2000000=reg128#10
  1948. # asm 2: vadd.i64 >t0=q9,<h0=q4,<_0x2000000=q9
  1949. vadd.i64 q9,q4,q9
  1950. # qhasm: 2x h5 -= t5
  1951. # asm 1: vsub.i64 >h5=reg128#2,<h5=reg128#12,<t5=reg128#2
  1952. # asm 2: vsub.i64 >h5=q1,<h5=q11,<t5=q1
  1953. vsub.i64 q1,q11,q1
  1954. # qhasm: 2x c6 = t6 signed>> 26
  1955. # asm 1: vshr.s64 >c6=reg128#7,<t6=reg128#7,#26
  1956. # asm 2: vshr.s64 >c6=q6,<t6=q6,#26
  1957. vshr.s64 q6,q6,#26
  1958. # qhasm: 2x h9 -= t9
  1959. # asm 1: vsub.i64 >h9=reg128#4,<h9=reg128#4,<t9=reg128#11
  1960. # asm 2: vsub.i64 >h9=q3,<h9=q3,<t9=q10
  1961. vsub.i64 q3,q3,q10
  1962. # qhasm: h4 h5 = h4[0]h5[0]h4[2]h4[3] h4[1]h5[1]h5[2]h5[3]
  1963. # asm 1: vtrn.32 <h4=reg128#9%bot,<h5=reg128#2%bot
  1964. # asm 2: vtrn.32 <h4=d16,<h5=d2
  1965. vtrn.32 d16,d2
  1966. # qhasm: 2x c0 = t0 signed>> 26
  1967. # asm 1: vshr.s64 >c0=reg128#10,<t0=reg128#10,#26
  1968. # asm 2: vshr.s64 >c0=q9,<t0=q9,#26
  1969. vshr.s64 q9,q9,#26
  1970. # qhasm: h4 h5 = h4[0]h4[1]h4[2]h5[2] h5[0]h5[1]h4[3]h5[3]
  1971. # asm 1: vtrn.32 <h4=reg128#9%top,<h5=reg128#2%top
  1972. # asm 2: vtrn.32 <h4=d17,<h5=d3
  1973. vtrn.32 d17,d3
  1974. # qhasm: 2x h7 += c6
  1975. # asm 1: vadd.i64 >h7=reg128#2,<h7=reg128#3,<c6=reg128#7
  1976. # asm 2: vadd.i64 >h7=q1,<h7=q2,<c6=q6
  1977. vadd.i64 q1,q2,q6
  1978. # qhasm: mem64[posh] aligned= h4[0]
  1979. # asm 1: vst1.8 <h4=reg128#9%bot,[<posh=int32#3,: 64]
  1980. # asm 2: vst1.8 <h4=d16,[<posh=r2,: 64]
  1981. vst1.8 d16,[r2,: 64]
  1982. # qhasm: 2x t6 = c6 << 26
  1983. # asm 1: vshl.i64 >t6=reg128#3,<c6=reg128#7,#26
  1984. # asm 2: vshl.i64 >t6=q2,<c6=q6,#26
  1985. vshl.i64 q2,q6,#26
  1986. # qhasm: mem64[posH] aligned= h4[1]
  1987. # asm 1: vst1.8 <h4=reg128#9%top,[<posH=int32#5,: 64]
  1988. # asm 2: vst1.8 <h4=d17,[<posH=r4,: 64]
  1989. vst1.8 d17,[r4,: 64]
  1990. # qhasm: 2x h1 += c0
  1991. # asm 1: vadd.i64 >h1=reg128#7,<h1=reg128#8,<c0=reg128#10
  1992. # asm 2: vadd.i64 >h1=q6,<h1=q7,<c0=q9
  1993. vadd.i64 q6,q7,q9
  1994. # qhasm: h8 h9 = h8[0]h9[0]h8[2]h8[3] h8[1]h9[1]h9[2]h9[3]
  1995. # asm 1: vtrn.32 <h8=reg128#1%bot,<h9=reg128#4%bot
  1996. # asm 2: vtrn.32 <h8=d0,<h9=d6
  1997. vtrn.32 d0,d6
  1998. # qhasm: 2x t0 = c0 << 26
  1999. # asm 1: vshl.i64 >t0=reg128#8,<c0=reg128#10,#26
  2000. # asm 2: vshl.i64 >t0=q7,<c0=q9,#26
  2001. vshl.i64 q7,q9,#26
  2002. # qhasm: h8 h9 = h8[0]h8[1]h8[2]h9[2] h9[0]h9[1]h8[3]h9[3]
  2003. # asm 1: vtrn.32 <h8=reg128#1%top,<h9=reg128#4%top
  2004. # asm 2: vtrn.32 <h8=d1,<h9=d7
  2005. vtrn.32 d1,d7
  2006. # qhasm: 2x h6 -= t6
  2007. # asm 1: vsub.i64 >h6=reg128#3,<h6=reg128#6,<t6=reg128#3
  2008. # asm 2: vsub.i64 >h6=q2,<h6=q5,<t6=q2
  2009. vsub.i64 q2,q5,q2
  2010. # qhasm: posh+=16
  2011. # asm 1: add >posh=int32#3,<posh=int32#3,#16
  2012. # asm 2: add >posh=r2,<posh=r2,#16
  2013. add r2,r2,#16
  2014. # qhasm: 2x h0 -= t0
  2015. # asm 1: vsub.i64 >h0=reg128#4,<h0=reg128#5,<t0=reg128#8
  2016. # asm 2: vsub.i64 >h0=q3,<h0=q4,<t0=q7
  2017. vsub.i64 q3,q4,q7
  2018. # qhasm: mem64[posh] aligned= h8[0]
  2019. # asm 1: vst1.8 <h8=reg128#1%bot,[<posh=int32#3,: 64]
  2020. # asm 2: vst1.8 <h8=d0,[<posh=r2,: 64]
  2021. vst1.8 d0,[r2,: 64]
  2022. # qhasm: posH+=16
  2023. # asm 1: add >posH=int32#5,<posH=int32#5,#16
  2024. # asm 2: add >posH=r4,<posH=r4,#16
  2025. add r4,r4,#16
  2026. # qhasm: mem64[posH] aligned= h8[1]
  2027. # asm 1: vst1.8 <h8=reg128#1%top,[<posH=int32#5,: 64]
  2028. # asm 2: vst1.8 <h8=d1,[<posH=r4,: 64]
  2029. vst1.8 d1,[r4,: 64]
  2030. # qhasm: h6 h7 = h6[0]h7[0]h6[2]h6[3] h6[1]h7[1]h7[2]h7[3]
  2031. # asm 1: vtrn.32 <h6=reg128#3%bot,<h7=reg128#2%bot
  2032. # asm 2: vtrn.32 <h6=d4,<h7=d2
  2033. vtrn.32 d4,d2
  2034. # qhasm: h6 h7 = h6[0]h6[1]h6[2]h7[2] h7[0]h7[1]h6[3]h7[3]
  2035. # asm 1: vtrn.32 <h6=reg128#3%top,<h7=reg128#2%top
  2036. # asm 2: vtrn.32 <h6=d5,<h7=d3
  2037. vtrn.32 d5,d3
  2038. # qhasm: posh-=8
  2039. # asm 1: sub >posh=int32#3,<posh=int32#3,#8
  2040. # asm 2: sub >posh=r2,<posh=r2,#8
  2041. sub r2,r2,#8
  2042. # qhasm: posH-=8
  2043. # asm 1: sub >posH=int32#5,<posH=int32#5,#8
  2044. # asm 2: sub >posH=r4,<posH=r4,#8
  2045. sub r4,r4,#8
  2046. # qhasm: h0 h1 = h0[0]h1[0]h0[2]h0[3] h0[1]h1[1]h1[2]h1[3]
  2047. # asm 1: vtrn.32 <h0=reg128#4%bot,<h1=reg128#7%bot
  2048. # asm 2: vtrn.32 <h0=d6,<h1=d12
  2049. vtrn.32 d6,d12
  2050. # qhasm: h0 h1 = h0[0]h0[1]h0[2]h1[2] h1[0]h1[1]h0[3]h1[3]
  2051. # asm 1: vtrn.32 <h0=reg128#4%top,<h1=reg128#7%top
  2052. # asm 2: vtrn.32 <h0=d7,<h1=d13
  2053. vtrn.32 d7,d13
  2054. # qhasm: mem64[posh] aligned= h6[0]
  2055. # asm 1: vst1.8 <h6=reg128#3%bot,[<posh=int32#3,: 64]
  2056. # asm 2: vst1.8 <h6=d4,[<posh=r2,: 64]
  2057. vst1.8 d4,[r2,: 64]
  2058. # qhasm: mem64[posH] aligned= h6[1]
  2059. # asm 1: vst1.8 <h6=reg128#3%top,[<posH=int32#5,: 64]
  2060. # asm 2: vst1.8 <h6=d5,[<posH=r4,: 64]
  2061. vst1.8 d5,[r4,: 64]
  2062. # qhasm: posh-=24
  2063. # asm 1: sub >posh=int32#3,<posh=int32#3,#24
  2064. # asm 2: sub >posh=r2,<posh=r2,#24
  2065. sub r2,r2,#24
  2066. # qhasm: posH-=24
  2067. # asm 1: sub >posH=int32#5,<posH=int32#5,#24
  2068. # asm 2: sub >posH=r4,<posH=r4,#24
  2069. sub r4,r4,#24
  2070. # qhasm: mem64[posh] aligned= h0[0]
  2071. # asm 1: vst1.8 <h0=reg128#4%bot,[<posh=int32#3,: 64]
  2072. # asm 2: vst1.8 <h0=d6,[<posh=r2,: 64]
  2073. vst1.8 d6,[r2,: 64]
  2074. # qhasm: mem64[posH] aligned= h0[1]
  2075. # asm 1: vst1.8 <h0=reg128#4%top,[<posH=int32#5,: 64]
  2076. # asm 2: vst1.8 <h0=d7,[<posH=r4,: 64]
  2077. vst1.8 d7,[r4,: 64]
  2078. # qhasm: posf = playground1_ptr + 240
  2079. # asm 1: add >posf=int32#3,<playground1_ptr=int32#4,#240
  2080. # asm 2: add >posf=r2,<playground1_ptr=r3,#240
  2081. add r2,r3,#240
  2082. # qhasm: posg = playground1_ptr + 96
  2083. # asm 1: add >posg=int32#5,<playground1_ptr=int32#4,#96
  2084. # asm 2: add >posg=r4,<playground1_ptr=r3,#96
  2085. add r4,r3,#96
  2086. # qhasm: g02 aligned= mem128[posg];posg += 16
  2087. # asm 1: vld1.8 {>g02=reg128#1%bot->g02=reg128#1%top},[<posg=int32#5,: 128]!
  2088. # asm 2: vld1.8 {>g02=d0->g02=d1},[<posg=r4,: 128]!
  2089. vld1.8 {d0-d1},[r4,: 128]!
  2090. # qhasm: g46 aligned= mem128[posg];posg += 16
  2091. # asm 1: vld1.8 {>g46=reg128#2%bot->g46=reg128#2%top},[<posg=int32#5,: 128]!
  2092. # asm 2: vld1.8 {>g46=d2->g46=d3},[<posg=r4,: 128]!
  2093. vld1.8 {d2-d3},[r4,: 128]!
  2094. # qhasm: new g89
  2095. # qhasm: g89 aligned= mem64[posg] g89[1]
  2096. # asm 1: vld1.8 {<g89=reg128#3%bot},[<posg=int32#5,: 64]
  2097. # asm 2: vld1.8 {<g89=d4},[<posg=r4,: 64]
  2098. vld1.8 {d4},[r4,: 64]
  2099. # qhasm: posG = playground1_ptr + 144
  2100. # asm 1: add >posG=int32#5,<playground1_ptr=int32#4,#144
  2101. # asm 2: add >posG=r4,<playground1_ptr=r3,#144
  2102. add r4,r3,#144
  2103. # qhasm: g13 aligned= mem128[posG];posG += 16
  2104. # asm 1: vld1.8 {>g13=reg128#4%bot->g13=reg128#4%top},[<posG=int32#5,: 128]!
  2105. # asm 2: vld1.8 {>g13=d6->g13=d7},[<posG=r4,: 128]!
  2106. vld1.8 {d6-d7},[r4,: 128]!
  2107. # qhasm: g02 g13 = g02[0]g13[0] g02[2]g13[2] g02[1]g13[1] g02[3]g13[3]
  2108. # asm 1: vtrn.32 <g02=reg128#1,<g13=reg128#4
  2109. # asm 2: vtrn.32 <g02=q0,<g13=q3
  2110. vtrn.32 q0,q3
  2111. # qhasm: g57 aligned= mem128[posG];posG += 16
  2112. # asm 1: vld1.8 {>g57=reg128#5%bot->g57=reg128#5%top},[<posG=int32#5,: 128]!
  2113. # asm 2: vld1.8 {>g57=d8->g57=d9},[<posG=r4,: 128]!
  2114. vld1.8 {d8-d9},[r4,: 128]!
  2115. # qhasm: 4x mix = g02 << 4
  2116. # asm 1: vshl.i32 >mix=reg128#6,<g02=reg128#1,#4
  2117. # asm 2: vshl.i32 >mix=q5,<g02=q0,#4
  2118. vshl.i32 q5,q0,#4
  2119. # qhasm: g46 g57 = g46[0]g57[0] g46[2]g57[2] g46[1]g57[1] g46[3]g57[3]
  2120. # asm 1: vtrn.32 <g46=reg128#2,<g57=reg128#5
  2121. # asm 2: vtrn.32 <g46=q1,<g57=q4
  2122. vtrn.32 q1,q4
  2123. # qhasm: 4x g13_19 = g13 << 4
  2124. # asm 1: vshl.i32 >g13_19=reg128#7,<g13=reg128#4,#4
  2125. # asm 2: vshl.i32 >g13_19=q6,<g13=q3,#4
  2126. vshl.i32 q6,q3,#4
  2127. # qhasm: 4x mix += g02
  2128. # asm 1: vadd.i32 >mix=reg128#6,<mix=reg128#6,<g02=reg128#1
  2129. # asm 2: vadd.i32 >mix=q5,<mix=q5,<g02=q0
  2130. vadd.i32 q5,q5,q0
  2131. # qhasm: 4x g13_19 += g13
  2132. # asm 1: vadd.i32 >g13_19=reg128#7,<g13_19=reg128#7,<g13=reg128#4
  2133. # asm 2: vadd.i32 >g13_19=q6,<g13_19=q6,<g13=q3
  2134. vadd.i32 q6,q6,q3
  2135. # qhasm: 4x g46_19 = g46 << 4
  2136. # asm 1: vshl.i32 >g46_19=reg128#8,<g46=reg128#2,#4
  2137. # asm 2: vshl.i32 >g46_19=q7,<g46=q1,#4
  2138. vshl.i32 q7,q1,#4
  2139. # qhasm: g89 aligned= g89[0] mem64[posG]
  2140. # asm 1: vld1.8 {<g89=reg128#3%top},[<posG=int32#5,: 64]
  2141. # asm 2: vld1.8 {<g89=d5},[<posG=r4,: 64]
  2142. vld1.8 {d5},[r4,: 64]
  2143. # qhasm: 4x g57_19 = g57 << 4
  2144. # asm 1: vshl.i32 >g57_19=reg128#9,<g57=reg128#5,#4
  2145. # asm 2: vshl.i32 >g57_19=q8,<g57=q4,#4
  2146. vshl.i32 q8,q4,#4
  2147. # qhasm: g89 = g89[0] g89[2] g89[1] g89[3]
  2148. # asm 1: vtrn.32 <g89=reg128#3%bot,<g89=reg128#3%top
  2149. # asm 2: vtrn.32 <g89=d4,<g89=d5
  2150. vtrn.32 d4,d5
  2151. # qhasm: 4x g46_19 += g46
  2152. # asm 1: vadd.i32 >g46_19=reg128#8,<g46_19=reg128#8,<g46=reg128#2
  2153. # asm 2: vadd.i32 >g46_19=q7,<g46_19=q7,<g46=q1
  2154. vadd.i32 q7,q7,q1
  2155. # qhasm: 4x g57_19 += g57
  2156. # asm 1: vadd.i32 >g57_19=reg128#9,<g57_19=reg128#9,<g57=reg128#5
  2157. # asm 2: vadd.i32 >g57_19=q8,<g57_19=q8,<g57=q4
  2158. vadd.i32 q8,q8,q4
  2159. # qhasm: f02 aligned= mem128[posf];posf += 16
  2160. # asm 1: vld1.8 {>f02=reg128#10%bot->f02=reg128#10%top},[<posf=int32#3,: 128]!
  2161. # asm 2: vld1.8 {>f02=d18->f02=d19},[<posf=r2,: 128]!
  2162. vld1.8 {d18-d19},[r2,: 128]!
  2163. # qhasm: 4x g89_19 = g89 << 4
  2164. # asm 1: vshl.i32 >g89_19=reg128#11,<g89=reg128#3,#4
  2165. # asm 2: vshl.i32 >g89_19=q10,<g89=q2,#4
  2166. vshl.i32 q10,q2,#4
  2167. # qhasm: f46 aligned= mem128[posf];posf += 16
  2168. # asm 1: vld1.8 {>f46=reg128#12%bot->f46=reg128#12%top},[<posf=int32#3,: 128]!
  2169. # asm 2: vld1.8 {>f46=d22->f46=d23},[<posf=r2,: 128]!
  2170. vld1.8 {d22-d23},[r2,: 128]!
  2171. # qhasm: 4x g89_19 += g89
  2172. # asm 1: vadd.i32 >g89_19=reg128#11,<g89_19=reg128#11,<g89=reg128#3
  2173. # asm 2: vadd.i32 >g89_19=q10,<g89_19=q10,<g89=q2
  2174. vadd.i32 q10,q10,q2
  2175. # qhasm: new f89
  2176. # qhasm: f89 aligned= mem64[posf] f89[1]
  2177. # asm 1: vld1.8 {<f89=reg128#13%bot},[<posf=int32#3,: 64]
  2178. # asm 2: vld1.8 {<f89=d24},[<posf=r2,: 64]
  2179. vld1.8 {d24},[r2,: 64]
  2180. # qhasm: 4x mix += g02
  2181. # asm 1: vadd.i32 >mix=reg128#6,<mix=reg128#6,<g02=reg128#1
  2182. # asm 2: vadd.i32 >mix=q5,<mix=q5,<g02=q0
  2183. vadd.i32 q5,q5,q0
  2184. # qhasm: posF = playground1_ptr + 192
  2185. # asm 1: add >posF=int32#3,<playground1_ptr=int32#4,#192
  2186. # asm 2: add >posF=r2,<playground1_ptr=r3,#192
  2187. add r2,r3,#192
  2188. # qhasm: f13 aligned= mem128[posF];posF += 16
  2189. # asm 1: vld1.8 {>f13=reg128#14%bot->f13=reg128#14%top},[<posF=int32#3,: 128]!
  2190. # asm 2: vld1.8 {>f13=d26->f13=d27},[<posF=r2,: 128]!
  2191. vld1.8 {d26-d27},[r2,: 128]!
  2192. # qhasm: 4x g13_19 += g13
  2193. # asm 1: vadd.i32 >g13_19=reg128#7,<g13_19=reg128#7,<g13=reg128#4
  2194. # asm 2: vadd.i32 >g13_19=q6,<g13_19=q6,<g13=q3
  2195. vadd.i32 q6,q6,q3
  2196. # qhasm: f57 aligned= mem128[posF];posF += 16
  2197. # asm 1: vld1.8 {>f57=reg128#15%bot->f57=reg128#15%top},[<posF=int32#3,: 128]!
  2198. # asm 2: vld1.8 {>f57=d28->f57=d29},[<posF=r2,: 128]!
  2199. vld1.8 {d28-d29},[r2,: 128]!
  2200. # qhasm: 4x g57_19 += g57
  2201. # asm 1: vadd.i32 >g57_19=reg128#9,<g57_19=reg128#9,<g57=reg128#5
  2202. # asm 2: vadd.i32 >g57_19=q8,<g57_19=q8,<g57=q4
  2203. vadd.i32 q8,q8,q4
  2204. # qhasm: f89 aligned= f89[0] mem64[posF]
  2205. # asm 1: vld1.8 {<f89=reg128#13%top},[<posF=int32#3,: 64]
  2206. # asm 2: vld1.8 {<f89=d25},[<posF=r2,: 64]
  2207. vld1.8 {d25},[r2,: 64]
  2208. # qhasm: 4x g89_19 += g89
  2209. # asm 1: vadd.i32 >g89_19=reg128#11,<g89_19=reg128#11,<g89=reg128#3
  2210. # asm 2: vadd.i32 >g89_19=q10,<g89_19=q10,<g89=q2
  2211. vadd.i32 q10,q10,q2
  2212. # qhasm: f02 f13 = f02[0]f13[0] f02[2]f13[2] f02[1]f13[1] f02[3]f13[3]
  2213. # asm 1: vtrn.32 <f02=reg128#10,<f13=reg128#14
  2214. # asm 2: vtrn.32 <f02=q9,<f13=q13
  2215. vtrn.32 q9,q13
  2216. # qhasm: 4x g46_19 += g46
  2217. # asm 1: vadd.i32 >g46_19=reg128#8,<g46_19=reg128#8,<g46=reg128#2
  2218. # asm 2: vadd.i32 >g46_19=q7,<g46_19=q7,<g46=q1
  2219. vadd.i32 q7,q7,q1
  2220. # qhasm: 4x mix += g02
  2221. # asm 1: vadd.i32 >mix=reg128#6,<mix=reg128#6,<g02=reg128#1
  2222. # asm 2: vadd.i32 >mix=q5,<mix=q5,<g02=q0
  2223. vadd.i32 q5,q5,q0
  2224. # qhasm: f46 f57 = f46[0]f57[0] f46[2]f57[2] f46[1]f57[1] f46[3]f57[3]
  2225. # asm 1: vtrn.32 <f46=reg128#12,<f57=reg128#15
  2226. # asm 2: vtrn.32 <f46=q11,<f57=q14
  2227. vtrn.32 q11,q14
  2228. # qhasm: 4x g13_19 += g13
  2229. # asm 1: vadd.i32 >g13_19=reg128#7,<g13_19=reg128#7,<g13=reg128#4
  2230. # asm 2: vadd.i32 >g13_19=q6,<g13_19=q6,<g13=q3
  2231. vadd.i32 q6,q6,q3
  2232. # qhasm: new g13_19_stack
  2233. # qhasm: ptr = &g13_19_stack
  2234. # asm 1: lea >ptr=int32#3,<g13_19_stack=stack128#4
  2235. # asm 2: lea >ptr=r2,<g13_19_stack=[sp,#560]
  2236. add r2,sp,#560
  2237. # qhasm: 4x g89_19 += g89
  2238. # asm 1: vadd.i32 >g89_19=reg128#11,<g89_19=reg128#11,<g89=reg128#3
  2239. # asm 2: vadd.i32 >g89_19=q10,<g89_19=q10,<g89=q2
  2240. vadd.i32 q10,q10,q2
  2241. # qhasm: f89 = f89[0] f89[2] f89[1] f89[3]
  2242. # asm 1: vtrn.32 <f89=reg128#13%bot,<f89=reg128#13%top
  2243. # asm 2: vtrn.32 <f89=d24,<f89=d25
  2244. vtrn.32 d24,d25
  2245. # qhasm: mem128[ptr] aligned= g13_19
  2246. # asm 1: vst1.8 {<g13_19=reg128#7%bot-<g13_19=reg128#7%top},[<ptr=int32#3,: 128]
  2247. # asm 2: vst1.8 {<g13_19=d12-<g13_19=d13},[<ptr=r2,: 128]
  2248. vst1.8 {d12-d13},[r2,: 128]
  2249. # qhasm: 4x f13_2 = f13 << 1
  2250. # asm 1: vshl.i32 >f13_2=reg128#7,<f13=reg128#14,#1
  2251. # asm 2: vshl.i32 >f13_2=q6,<f13=q13,#1
  2252. vshl.i32 q6,q13,#1
  2253. # qhasm: new g89_19_stack
  2254. # qhasm: ptr = &g89_19_stack
  2255. # asm 1: lea >ptr=int32#3,<g89_19_stack=stack128#5
  2256. # asm 2: lea >ptr=r2,<g89_19_stack=[sp,#576]
  2257. add r2,sp,#576
  2258. # qhasm: mem128[ptr] aligned= g89_19
  2259. # asm 1: vst1.8 {<g89_19=reg128#11%bot-<g89_19=reg128#11%top},[<ptr=int32#3,: 128]
  2260. # asm 2: vst1.8 {<g89_19=d20-<g89_19=d21},[<ptr=r2,: 128]
  2261. vst1.8 {d20-d21},[r2,: 128]
  2262. # qhasm: 4x f57_2 = f57 << 1
  2263. # asm 1: vshl.i32 >f57_2=reg128#11,<f57=reg128#15,#1
  2264. # asm 2: vshl.i32 >f57_2=q10,<f57=q14,#1
  2265. vshl.i32 q10,q14,#1
  2266. # qhasm: new f13_2_stack
  2267. # qhasm: ptr = &f13_2_stack
  2268. # asm 1: lea >ptr=int32#3,<f13_2_stack=stack128#6
  2269. # asm 2: lea >ptr=r2,<f13_2_stack=[sp,#592]
  2270. add r2,sp,#592
  2271. # qhasm: mem128[ptr] aligned= f13_2
  2272. # asm 1: vst1.8 {<f13_2=reg128#7%bot-<f13_2=reg128#7%top},[<ptr=int32#3,: 128]
  2273. # asm 2: vst1.8 {<f13_2=d12-<f13_2=d13},[<ptr=r2,: 128]
  2274. vst1.8 {d12-d13},[r2,: 128]
  2275. # qhasm: 4x f89_2 = f89 << 1
  2276. # asm 1: vshl.i32 >f89_2=reg128#16,<f89=reg128#13,#1
  2277. # asm 2: vshl.i32 >f89_2=q15,<f89=q12,#1
  2278. vshl.i32 q15,q12,#1
  2279. # qhasm: 4x g57_19 += g57
  2280. # asm 1: vadd.i32 >g57_19=reg128#9,<g57_19=reg128#9,<g57=reg128#5
  2281. # asm 2: vadd.i32 >g57_19=q8,<g57_19=q8,<g57=q4
  2282. vadd.i32 q8,q8,q4
  2283. # qhasm: mix = f89_2[2,3] mix[2,3]
  2284. # asm 1: vext.32 <mix=reg128#6%bot,<f89_2=reg128#16%top,<f89_2=reg128#16%bot,#0
  2285. # asm 2: vext.32 <mix=d10,<f89_2=d31,<f89_2=d30,#0
  2286. vext.32 d10,d31,d30,#0
  2287. # qhasm: 4x g46_19 += g46
  2288. # asm 1: vadd.i32 >g46_19=reg128#8,<g46_19=reg128#8,<g46=reg128#2
  2289. # asm 2: vadd.i32 >g46_19=q7,<g46_19=q7,<g46=q1
  2290. vadd.i32 q7,q7,q1
  2291. # qhasm: new g57_19_stack
  2292. # qhasm: ptr = &g57_19_stack
  2293. # asm 1: lea >ptr=int32#3,<g57_19_stack=stack128#7
  2294. # asm 2: lea >ptr=r2,<g57_19_stack=[sp,#608]
  2295. add r2,sp,#608
  2296. # qhasm: mem128[ptr] aligned= g57_19
  2297. # asm 1: vst1.8 {<g57_19=reg128#9%bot-<g57_19=reg128#9%top},[<ptr=int32#3,: 128]
  2298. # asm 2: vst1.8 {<g57_19=d16-<g57_19=d17},[<ptr=r2,: 128]
  2299. vst1.8 {d16-d17},[r2,: 128]
  2300. # qhasm: h9[0,1] = f02[0] signed* g89[2];h9[2,3] = f02[1] signed* g89[3]
  2301. # asm 1: vmull.s32 >h9=reg128#9,<f02=reg128#10%bot,<g89=reg128#3%top
  2302. # asm 2: vmull.s32 >h9=q8,<f02=d18,<g89=d5
  2303. vmull.s32 q8,d18,d5
  2304. # qhasm: h9[0,1] += f13[0] signed* g89[0];h9[2,3] += f13[1] signed* g89[1]
  2305. # asm 1: vmlal.s32 <h9=reg128#9,<f13=reg128#14%bot,<g89=reg128#3%bot
  2306. # asm 2: vmlal.s32 <h9=q8,<f13=d26,<g89=d4
  2307. vmlal.s32 q8,d26,d4
  2308. # qhasm: h9[0,1] += f02[2] signed* g57[2];h9[2,3] += f02[3] signed* g57[3]
  2309. # asm 1: vmlal.s32 <h9=reg128#9,<f02=reg128#10%top,<g57=reg128#5%top
  2310. # asm 2: vmlal.s32 <h9=q8,<f02=d19,<g57=d9
  2311. vmlal.s32 q8,d19,d9
  2312. # qhasm: h9[0,1] += f13[2] signed* g46[2];h9[2,3] += f13[3] signed* g46[3]
  2313. # asm 1: vmlal.s32 <h9=reg128#9,<f13=reg128#14%top,<g46=reg128#2%top
  2314. # asm 2: vmlal.s32 <h9=q8,<f13=d27,<g46=d3
  2315. vmlal.s32 q8,d27,d3
  2316. # qhasm: h9[0,1] += f46[0] signed* g57[0];h9[2,3] += f46[1] signed* g57[1]
  2317. # asm 1: vmlal.s32 <h9=reg128#9,<f46=reg128#12%bot,<g57=reg128#5%bot
  2318. # asm 2: vmlal.s32 <h9=q8,<f46=d22,<g57=d8
  2319. vmlal.s32 q8,d22,d8
  2320. # qhasm: h9[0,1] += f57[0] signed* g46[0];h9[2,3] += f57[1] signed* g46[1]
  2321. # asm 1: vmlal.s32 <h9=reg128#9,<f57=reg128#15%bot,<g46=reg128#2%bot
  2322. # asm 2: vmlal.s32 <h9=q8,<f57=d28,<g46=d2
  2323. vmlal.s32 q8,d28,d2
  2324. # qhasm: h9[0,1] += f46[2] signed* g13[2];h9[2,3] += f46[3] signed* g13[3]
  2325. # asm 1: vmlal.s32 <h9=reg128#9,<f46=reg128#12%top,<g13=reg128#4%top
  2326. # asm 2: vmlal.s32 <h9=q8,<f46=d23,<g13=d7
  2327. vmlal.s32 q8,d23,d7
  2328. # qhasm: h9[0,1] += f57[2] signed* g02[2];h9[2,3] += f57[3] signed* g02[3]
  2329. # asm 1: vmlal.s32 <h9=reg128#9,<f57=reg128#15%top,<g02=reg128#1%top
  2330. # asm 2: vmlal.s32 <h9=q8,<f57=d29,<g02=d1
  2331. vmlal.s32 q8,d29,d1
  2332. # qhasm: h9[0,1] += f89[0] signed* g13[0];h9[2,3] += f89[1] signed* g13[1]
  2333. # asm 1: vmlal.s32 <h9=reg128#9,<f89=reg128#13%bot,<g13=reg128#4%bot
  2334. # asm 2: vmlal.s32 <h9=q8,<f89=d24,<g13=d6
  2335. vmlal.s32 q8,d24,d6
  2336. # qhasm: h9[0,1] += f89[2] signed* g02[0];h9[2,3] += f89[3] signed* g02[1]
  2337. # asm 1: vmlal.s32 <h9=reg128#9,<f89=reg128#13%top,<g02=reg128#1%bot
  2338. # asm 2: vmlal.s32 <h9=q8,<f89=d25,<g02=d0
  2339. vmlal.s32 q8,d25,d0
  2340. # qhasm: new g46_19_stack
  2341. # qhasm: ptr = &g46_19_stack
  2342. # asm 1: lea >ptr=int32#3,<g46_19_stack=stack128#8
  2343. # asm 2: lea >ptr=r2,<g46_19_stack=[sp,#624]
  2344. add r2,sp,#624
  2345. # qhasm: mem128[ptr] aligned= g46_19
  2346. # asm 1: vst1.8 {<g46_19=reg128#8%bot-<g46_19=reg128#8%top},[<ptr=int32#3,: 128]
  2347. # asm 2: vst1.8 {<g46_19=d14-<g46_19=d15},[<ptr=r2,: 128]
  2348. vst1.8 {d14-d15},[r2,: 128]
  2349. # qhasm: h8[0,1] = f02[0] signed* g89[0];h8[2,3] = f02[1] signed* g89[1]
  2350. # asm 1: vmull.s32 >h8=reg128#3,<f02=reg128#10%bot,<g89=reg128#3%bot
  2351. # asm 2: vmull.s32 >h8=q2,<f02=d18,<g89=d4
  2352. vmull.s32 q2,d18,d4
  2353. # qhasm: h8[0,1] += f13_2[0] signed* g57[2];h8[2,3] += f13_2[1] signed* g57[3]
  2354. # asm 1: vmlal.s32 <h8=reg128#3,<f13_2=reg128#7%bot,<g57=reg128#5%top
  2355. # asm 2: vmlal.s32 <h8=q2,<f13_2=d12,<g57=d9
  2356. vmlal.s32 q2,d12,d9
  2357. # qhasm: h8[0,1] += f13_2[2] signed* g57[0];h8[2,3] += f13_2[3] signed* g57[1]
  2358. # asm 1: vmlal.s32 <h8=reg128#3,<f13_2=reg128#7%top,<g57=reg128#5%bot
  2359. # asm 2: vmlal.s32 <h8=q2,<f13_2=d13,<g57=d8
  2360. vmlal.s32 q2,d13,d8
  2361. # qhasm: h8[0,1] += f02[2] signed* g46[2];h8[2,3] += f02[3] signed* g46[3]
  2362. # asm 1: vmlal.s32 <h8=reg128#3,<f02=reg128#10%top,<g46=reg128#2%top
  2363. # asm 2: vmlal.s32 <h8=q2,<f02=d19,<g46=d3
  2364. vmlal.s32 q2,d19,d3
  2365. # qhasm: h8[0,1] += f46[0] signed* g46[0];h8[2,3] += f46[1] signed* g46[1]
  2366. # asm 1: vmlal.s32 <h8=reg128#3,<f46=reg128#12%bot,<g46=reg128#2%bot
  2367. # asm 2: vmlal.s32 <h8=q2,<f46=d22,<g46=d2
  2368. vmlal.s32 q2,d22,d2
  2369. # qhasm: h8[0,1] += f46[2] signed* g02[2];h8[2,3] += f46[3] signed* g02[3]
  2370. # asm 1: vmlal.s32 <h8=reg128#3,<f46=reg128#12%top,<g02=reg128#1%top
  2371. # asm 2: vmlal.s32 <h8=q2,<f46=d23,<g02=d1
  2372. vmlal.s32 q2,d23,d1
  2373. # qhasm: h8[0,1] += f89[0] signed* g02[0];h8[2,3] += f89[1] signed* g02[1]
  2374. # asm 1: vmlal.s32 <h8=reg128#3,<f89=reg128#13%bot,<g02=reg128#1%bot
  2375. # asm 2: vmlal.s32 <h8=q2,<f89=d24,<g02=d0
  2376. vmlal.s32 q2,d24,d0
  2377. # qhasm: new f57_2_stack
  2378. # qhasm: ptr = &f57_2_stack
  2379. # asm 1: lea >ptr=int32#3,<f57_2_stack=stack128#9
  2380. # asm 2: lea >ptr=r2,<f57_2_stack=[sp,#640]
  2381. add r2,sp,#640
  2382. # qhasm: mem128[ptr] aligned= f57_2
  2383. # asm 1: vst1.8 {<f57_2=reg128#11%bot-<f57_2=reg128#11%top},[<ptr=int32#3,: 128]
  2384. # asm 2: vst1.8 {<f57_2=d20-<f57_2=d21},[<ptr=r2,: 128]
  2385. vst1.8 {d20-d21},[r2,: 128]
  2386. # qhasm: h7[0,1] = f02[0] signed* g57[2];h7[2,3] = f02[1] signed* g57[3]
  2387. # asm 1: vmull.s32 >h7=reg128#8,<f02=reg128#10%bot,<g57=reg128#5%top
  2388. # asm 2: vmull.s32 >h7=q7,<f02=d18,<g57=d9
  2389. vmull.s32 q7,d18,d9
  2390. # qhasm: h7[0,1] += f13[0] signed* g46[2];h7[2,3] += f13[1] signed* g46[3]
  2391. # asm 1: vmlal.s32 <h7=reg128#8,<f13=reg128#14%bot,<g46=reg128#2%top
  2392. # asm 2: vmlal.s32 <h7=q7,<f13=d26,<g46=d3
  2393. vmlal.s32 q7,d26,d3
  2394. # qhasm: h7[0,1] += f02[2] signed* g57[0];h7[2,3] += f02[3] signed* g57[1]
  2395. # asm 1: vmlal.s32 <h7=reg128#8,<f02=reg128#10%top,<g57=reg128#5%bot
  2396. # asm 2: vmlal.s32 <h7=q7,<f02=d19,<g57=d8
  2397. vmlal.s32 q7,d19,d8
  2398. # qhasm: h7[0,1] += f13[2] signed* g46[0];h7[2,3] += f13[3] signed* g46[1]
  2399. # asm 1: vmlal.s32 <h7=reg128#8,<f13=reg128#14%top,<g46=reg128#2%bot
  2400. # asm 2: vmlal.s32 <h7=q7,<f13=d27,<g46=d2
  2401. vmlal.s32 q7,d27,d2
  2402. # qhasm: h7[0,1] += f46[0] signed* g13[2];h7[2,3] += f46[1] signed* g13[3]
  2403. # asm 1: vmlal.s32 <h7=reg128#8,<f46=reg128#12%bot,<g13=reg128#4%top
  2404. # asm 2: vmlal.s32 <h7=q7,<f46=d22,<g13=d7
  2405. vmlal.s32 q7,d22,d7
  2406. # qhasm: h7[0,1] += f57[0] signed* g02[2];h7[2,3] += f57[1] signed* g02[3]
  2407. # asm 1: vmlal.s32 <h7=reg128#8,<f57=reg128#15%bot,<g02=reg128#1%top
  2408. # asm 2: vmlal.s32 <h7=q7,<f57=d28,<g02=d1
  2409. vmlal.s32 q7,d28,d1
  2410. # qhasm: h7[0,1] += f46[2] signed* g13[0];h7[2,3] += f46[3] signed* g13[1]
  2411. # asm 1: vmlal.s32 <h7=reg128#8,<f46=reg128#12%top,<g13=reg128#4%bot
  2412. # asm 2: vmlal.s32 <h7=q7,<f46=d23,<g13=d6
  2413. vmlal.s32 q7,d23,d6
  2414. # qhasm: h7[0,1] += f57[2] signed* g02[0];h7[2,3] += f57[3] signed* g02[1]
  2415. # asm 1: vmlal.s32 <h7=reg128#8,<f57=reg128#15%top,<g02=reg128#1%bot
  2416. # asm 2: vmlal.s32 <h7=q7,<f57=d29,<g02=d0
  2417. vmlal.s32 q7,d29,d0
  2418. # qhasm: new mix_stack
  2419. # qhasm: ptr = &mix_stack
  2420. # asm 1: lea >ptr=int32#3,<mix_stack=stack128#10
  2421. # asm 2: lea >ptr=r2,<mix_stack=[sp,#656]
  2422. add r2,sp,#656
  2423. # qhasm: mem128[ptr] aligned= mix
  2424. # asm 1: vst1.8 {<mix=reg128#6%bot-<mix=reg128#6%top},[<ptr=int32#3,: 128]
  2425. # asm 2: vst1.8 {<mix=d10-<mix=d11},[<ptr=r2,: 128]
  2426. vst1.8 {d10-d11},[r2,: 128]
  2427. # qhasm: h6[0,1] = f02[0] signed* g46[2];h6[2,3] = f02[1] signed* g46[3]
  2428. # asm 1: vmull.s32 >h6=reg128#6,<f02=reg128#10%bot,<g46=reg128#2%top
  2429. # asm 2: vmull.s32 >h6=q5,<f02=d18,<g46=d3
  2430. vmull.s32 q5,d18,d3
  2431. # qhasm: h6[0,1] += f02[2] signed* g46[0];h6[2,3] += f02[3] signed* g46[1]
  2432. # asm 1: vmlal.s32 <h6=reg128#6,<f02=reg128#10%top,<g46=reg128#2%bot
  2433. # asm 2: vmlal.s32 <h6=q5,<f02=d19,<g46=d2
  2434. vmlal.s32 q5,d19,d2
  2435. # qhasm: h6[0,1] += f46[0] signed* g02[2];h6[2,3] += f46[1] signed* g02[3]
  2436. # asm 1: vmlal.s32 <h6=reg128#6,<f46=reg128#12%bot,<g02=reg128#1%top
  2437. # asm 2: vmlal.s32 <h6=q5,<f46=d22,<g02=d1
  2438. vmlal.s32 q5,d22,d1
  2439. # qhasm: h6[0,1] += f46[2] signed* g02[0];h6[2,3] += f46[3] signed* g02[1]
  2440. # asm 1: vmlal.s32 <h6=reg128#6,<f46=reg128#12%top,<g02=reg128#1%bot
  2441. # asm 2: vmlal.s32 <h6=q5,<f46=d23,<g02=d0
  2442. vmlal.s32 q5,d23,d0
  2443. # qhasm: h6[0,1] += f13_2[0] signed* g57[0];h6[2,3] += f13_2[1] signed* g57[1]
  2444. # asm 1: vmlal.s32 <h6=reg128#6,<f13_2=reg128#7%bot,<g57=reg128#5%bot
  2445. # asm 2: vmlal.s32 <h6=q5,<f13_2=d12,<g57=d8
  2446. vmlal.s32 q5,d12,d8
  2447. # qhasm: new h9_stack
  2448. # qhasm: ptr = &h9_stack
  2449. # asm 1: lea >ptr=int32#3,<h9_stack=stack128#11
  2450. # asm 2: lea >ptr=r2,<h9_stack=[sp,#672]
  2451. add r2,sp,#672
  2452. # qhasm: mem128[ptr] aligned= h9
  2453. # asm 1: vst1.8 {<h9=reg128#9%bot-<h9=reg128#9%top},[<ptr=int32#3,: 128]
  2454. # asm 2: vst1.8 {<h9=d16-<h9=d17},[<ptr=r2,: 128]
  2455. vst1.8 {d16-d17},[r2,: 128]
  2456. # qhasm: h5[0,1] = f02[0] signed* g57[0];h5[2,3] = f02[1] signed* g57[1]
  2457. # asm 1: vmull.s32 >h5=reg128#5,<f02=reg128#10%bot,<g57=reg128#5%bot
  2458. # asm 2: vmull.s32 >h5=q4,<f02=d18,<g57=d8
  2459. vmull.s32 q4,d18,d8
  2460. # qhasm: h5[0,1] += f13[0] signed* g46[0];h5[2,3] += f13[1] signed* g46[1]
  2461. # asm 1: vmlal.s32 <h5=reg128#5,<f13=reg128#14%bot,<g46=reg128#2%bot
  2462. # asm 2: vmlal.s32 <h5=q4,<f13=d26,<g46=d2
  2463. vmlal.s32 q4,d26,d2
  2464. # qhasm: h5[0,1] += f02[2] signed* g13[2];h5[2,3] += f02[3] signed* g13[3]
  2465. # asm 1: vmlal.s32 <h5=reg128#5,<f02=reg128#10%top,<g13=reg128#4%top
  2466. # asm 2: vmlal.s32 <h5=q4,<f02=d19,<g13=d7
  2467. vmlal.s32 q4,d19,d7
  2468. # qhasm: h5[0,1] += f13[2] signed* g02[2];h5[2,3] += f13[3] signed* g02[3]
  2469. # asm 1: vmlal.s32 <h5=reg128#5,<f13=reg128#14%top,<g02=reg128#1%top
  2470. # asm 2: vmlal.s32 <h5=q4,<f13=d27,<g02=d1
  2471. vmlal.s32 q4,d27,d1
  2472. # qhasm: h5[0,1] += f46[0] signed* g13[0];h5[2,3] += f46[1] signed* g13[1]
  2473. # asm 1: vmlal.s32 <h5=reg128#5,<f46=reg128#12%bot,<g13=reg128#4%bot
  2474. # asm 2: vmlal.s32 <h5=q4,<f46=d22,<g13=d6
  2475. vmlal.s32 q4,d22,d6
  2476. # qhasm: h5[0,1] += f57[0] signed* g02[0];h5[2,3] += f57[1] signed* g02[1]
  2477. # asm 1: vmlal.s32 <h5=reg128#5,<f57=reg128#15%bot,<g02=reg128#1%bot
  2478. # asm 2: vmlal.s32 <h5=q4,<f57=d28,<g02=d0
  2479. vmlal.s32 q4,d28,d0
  2480. # qhasm: h3[0,1] = f02[0] signed* g13[2];h3[2,3] = f02[1] signed* g13[3]
  2481. # asm 1: vmull.s32 >h3=reg128#9,<f02=reg128#10%bot,<g13=reg128#4%top
  2482. # asm 2: vmull.s32 >h3=q8,<f02=d18,<g13=d7
  2483. vmull.s32 q8,d18,d7
  2484. # qhasm: h3[0,1] += f13[0] signed* g02[2];h3[2,3] += f13[1] signed* g02[3]
  2485. # asm 1: vmlal.s32 <h3=reg128#9,<f13=reg128#14%bot,<g02=reg128#1%top
  2486. # asm 2: vmlal.s32 <h3=q8,<f13=d26,<g02=d1
  2487. vmlal.s32 q8,d26,d1
  2488. # qhasm: h3[0,1] += f02[2] signed* g13[0];h3[2,3] += f02[3] signed* g13[1]
  2489. # asm 1: vmlal.s32 <h3=reg128#9,<f02=reg128#10%top,<g13=reg128#4%bot
  2490. # asm 2: vmlal.s32 <h3=q8,<f02=d19,<g13=d6
  2491. vmlal.s32 q8,d19,d6
  2492. # qhasm: h3[0,1] += f13[2] signed* g02[0];h3[2,3] += f13[3] signed* g02[1]
  2493. # asm 1: vmlal.s32 <h3=reg128#9,<f13=reg128#14%top,<g02=reg128#1%bot
  2494. # asm 2: vmlal.s32 <h3=q8,<f13=d27,<g02=d0
  2495. vmlal.s32 q8,d27,d0
  2496. # qhasm: ptr = &g89_19_stack
  2497. # asm 1: lea >ptr=int32#3,<g89_19_stack=stack128#5
  2498. # asm 2: lea >ptr=r2,<g89_19_stack=[sp,#576]
  2499. add r2,sp,#576
  2500. # qhasm: g89_19 aligned= mem128[ptr]
  2501. # asm 1: vld1.8 {>g89_19=reg128#11%bot->g89_19=reg128#11%top},[<ptr=int32#3,: 128]
  2502. # asm 2: vld1.8 {>g89_19=d20->g89_19=d21},[<ptr=r2,: 128]
  2503. vld1.8 {d20-d21},[r2,: 128]
  2504. # qhasm: h7[0,1] += f89[0] signed* g89_19[2];h7[2,3] += f89[1] signed* g89_19[3]
  2505. # asm 1: vmlal.s32 <h7=reg128#8,<f89=reg128#13%bot,<g89_19=reg128#11%top
  2506. # asm 2: vmlal.s32 <h7=q7,<f89=d24,<g89_19=d21
  2507. vmlal.s32 q7,d24,d21
  2508. # qhasm: h7[0,1] += f89[2] signed* g89_19[0];h7[2,3] += f89[3] signed* g89_19[1]
  2509. # asm 1: vmlal.s32 <h7=reg128#8,<f89=reg128#13%top,<g89_19=reg128#11%bot
  2510. # asm 2: vmlal.s32 <h7=q7,<f89=d25,<g89_19=d20
  2511. vmlal.s32 q7,d25,d20
  2512. # qhasm: h5[0,1] += f46[2] signed* g89_19[2];h5[2,3] += f46[3] signed* g89_19[3]
  2513. # asm 1: vmlal.s32 <h5=reg128#5,<f46=reg128#12%top,<g89_19=reg128#11%top
  2514. # asm 2: vmlal.s32 <h5=q4,<f46=d23,<g89_19=d21
  2515. vmlal.s32 q4,d23,d21
  2516. # qhasm: h5[0,1] += f57[2] signed* g89_19[0];h5[2,3] += f57[3] signed* g89_19[1]
  2517. # asm 1: vmlal.s32 <h5=reg128#5,<f57=reg128#15%top,<g89_19=reg128#11%bot
  2518. # asm 2: vmlal.s32 <h5=q4,<f57=d29,<g89_19=d20
  2519. vmlal.s32 q4,d29,d20
  2520. # qhasm: h3[0,1] += f46[0] signed* g89_19[2];h3[2,3] += f46[1] signed* g89_19[3]
  2521. # asm 1: vmlal.s32 <h3=reg128#9,<f46=reg128#12%bot,<g89_19=reg128#11%top
  2522. # asm 2: vmlal.s32 <h3=q8,<f46=d22,<g89_19=d21
  2523. vmlal.s32 q8,d22,d21
  2524. # qhasm: h3[0,1] += f57[0] signed* g89_19[0];h3[2,3] += f57[1] signed* g89_19[1]
  2525. # asm 1: vmlal.s32 <h3=reg128#9,<f57=reg128#15%bot,<g89_19=reg128#11%bot
  2526. # asm 2: vmlal.s32 <h3=q8,<f57=d28,<g89_19=d20
  2527. vmlal.s32 q8,d28,d20
  2528. # qhasm: h6[0,1] += f89[0] signed* g89_19[0];h6[2,3] += f89[1] signed* g89_19[1]
  2529. # asm 1: vmlal.s32 <h6=reg128#6,<f89=reg128#13%bot,<g89_19=reg128#11%bot
  2530. # asm 2: vmlal.s32 <h6=q5,<f89=d24,<g89_19=d20
  2531. vmlal.s32 q5,d24,d20
  2532. # qhasm: new h7_stack
  2533. # qhasm: ptr = &h7_stack
  2534. # asm 1: lea >ptr=int32#3,<h7_stack=stack128#5
  2535. # asm 2: lea >ptr=r2,<h7_stack=[sp,#576]
  2536. add r2,sp,#576
  2537. # qhasm: mem128[ptr] aligned= h7
  2538. # asm 1: vst1.8 {<h7=reg128#8%bot-<h7=reg128#8%top},[<ptr=int32#3,: 128]
  2539. # asm 2: vst1.8 {<h7=d14-<h7=d15},[<ptr=r2,: 128]
  2540. vst1.8 {d14-d15},[r2,: 128]
  2541. # qhasm: h1[0,1] = f02[0] signed* g13[0];h1[2,3] = f02[1] signed* g13[1]
  2542. # asm 1: vmull.s32 >h1=reg128#8,<f02=reg128#10%bot,<g13=reg128#4%bot
  2543. # asm 2: vmull.s32 >h1=q7,<f02=d18,<g13=d6
  2544. vmull.s32 q7,d18,d6
  2545. # qhasm: h1[0,1] += f13[0] signed* g02[0];h1[2,3] += f13[1] signed* g02[1]
  2546. # asm 1: vmlal.s32 <h1=reg128#8,<f13=reg128#14%bot,<g02=reg128#1%bot
  2547. # asm 2: vmlal.s32 <h1=q7,<f13=d26,<g02=d0
  2548. vmlal.s32 q7,d26,d0
  2549. # qhasm: ptr = &mix_stack
  2550. # asm 1: lea >ptr=int32#3,<mix_stack=stack128#10
  2551. # asm 2: lea >ptr=r2,<mix_stack=[sp,#656]
  2552. add r2,sp,#656
  2553. # qhasm: mix aligned= mem128[ptr]
  2554. # asm 1: vld1.8 {>mix=reg128#16%bot->mix=reg128#16%top},[<ptr=int32#3,: 128]
  2555. # asm 2: vld1.8 {>mix=d30->mix=d31},[<ptr=r2,: 128]
  2556. vld1.8 {d30-d31},[r2,: 128]
  2557. # qhasm: h8[0,1] += mix[0] signed* g89_19[2];h8[2,3] += mix[1] signed* g89_19[3]
  2558. # asm 1: vmlal.s32 <h8=reg128#3,<mix=reg128#16%bot,<g89_19=reg128#11%top
  2559. # asm 2: vmlal.s32 <h8=q2,<mix=d30,<g89_19=d21
  2560. vmlal.s32 q2,d30,d21
  2561. # qhasm: h1[0,1] += f02[2] signed* g89_19[2];h1[2,3] += f02[3] signed* g89_19[3]
  2562. # asm 1: vmlal.s32 <h1=reg128#8,<f02=reg128#10%top,<g89_19=reg128#11%top
  2563. # asm 2: vmlal.s32 <h1=q7,<f02=d19,<g89_19=d21
  2564. vmlal.s32 q7,d19,d21
  2565. # qhasm: h1[0,1] += f13[2] signed* g89_19[0];h1[2,3] += f13[3] signed* g89_19[1]
  2566. # asm 1: vmlal.s32 <h1=reg128#8,<f13=reg128#14%top,<g89_19=reg128#11%bot
  2567. # asm 2: vmlal.s32 <h1=q7,<f13=d27,<g89_19=d20
  2568. vmlal.s32 q7,d27,d20
  2569. # qhasm: ptr = &g46_19_stack
  2570. # asm 1: lea >ptr=int32#3,<g46_19_stack=stack128#8
  2571. # asm 2: lea >ptr=r2,<g46_19_stack=[sp,#624]
  2572. add r2,sp,#624
  2573. # qhasm: g46_19 aligned= mem128[ptr]
  2574. # asm 1: vld1.8 {>g46_19=reg128#14%bot->g46_19=reg128#14%top},[<ptr=int32#3,: 128]
  2575. # asm 2: vld1.8 {>g46_19=d26->g46_19=d27},[<ptr=r2,: 128]
  2576. vld1.8 {d26-d27},[r2,: 128]
  2577. # qhasm: h5[0,1] += f89[2] signed* g46_19[2];h5[2,3] += f89[3] signed* g46_19[3]
  2578. # asm 1: vmlal.s32 <h5=reg128#5,<f89=reg128#13%top,<g46_19=reg128#14%top
  2579. # asm 2: vmlal.s32 <h5=q4,<f89=d25,<g46_19=d27
  2580. vmlal.s32 q4,d25,d27
  2581. # qhasm: h3[0,1] += f57[2] signed* g46_19[2];h3[2,3] += f57[3] signed* g46_19[3]
  2582. # asm 1: vmlal.s32 <h3=reg128#9,<f57=reg128#15%top,<g46_19=reg128#14%top
  2583. # asm 2: vmlal.s32 <h3=q8,<f57=d29,<g46_19=d27
  2584. vmlal.s32 q8,d29,d27
  2585. # qhasm: h3[0,1] += f89[2] signed* g46_19[0];h3[2,3] += f89[3] signed* g46_19[1]
  2586. # asm 1: vmlal.s32 <h3=reg128#9,<f89=reg128#13%top,<g46_19=reg128#14%bot
  2587. # asm 2: vmlal.s32 <h3=q8,<f89=d25,<g46_19=d26
  2588. vmlal.s32 q8,d25,d26
  2589. # qhasm: h1[0,1] += f57[0] signed* g46_19[2];h1[2,3] += f57[1] signed* g46_19[3]
  2590. # asm 1: vmlal.s32 <h1=reg128#8,<f57=reg128#15%bot,<g46_19=reg128#14%top
  2591. # asm 2: vmlal.s32 <h1=q7,<f57=d28,<g46_19=d27
  2592. vmlal.s32 q7,d28,d27
  2593. # qhasm: h1[0,1] += f57[2] signed* g46_19[0];h1[2,3] += f57[3] signed* g46_19[1]
  2594. # asm 1: vmlal.s32 <h1=reg128#8,<f57=reg128#15%top,<g46_19=reg128#14%bot
  2595. # asm 2: vmlal.s32 <h1=q7,<f57=d29,<g46_19=d26
  2596. vmlal.s32 q7,d29,d26
  2597. # qhasm: ptr = &g57_19_stack
  2598. # asm 1: lea >ptr=int32#3,<g57_19_stack=stack128#7
  2599. # asm 2: lea >ptr=r2,<g57_19_stack=[sp,#608]
  2600. add r2,sp,#608
  2601. # qhasm: g57_19 aligned= mem128[ptr]
  2602. # asm 1: vld1.8 {>g57_19=reg128#15%bot->g57_19=reg128#15%top},[<ptr=int32#3,: 128]
  2603. # asm 2: vld1.8 {>g57_19=d28->g57_19=d29},[<ptr=r2,: 128]
  2604. vld1.8 {d28-d29},[r2,: 128]
  2605. # qhasm: h5[0,1] += f89[0] signed* g57_19[2];h5[2,3] += f89[1] signed* g57_19[3]
  2606. # asm 1: vmlal.s32 <h5=reg128#5,<f89=reg128#13%bot,<g57_19=reg128#15%top
  2607. # asm 2: vmlal.s32 <h5=q4,<f89=d24,<g57_19=d29
  2608. vmlal.s32 q4,d24,d29
  2609. # qhasm: h3[0,1] += f46[2] signed* g57_19[2];h3[2,3] += f46[3] signed* g57_19[3]
  2610. # asm 1: vmlal.s32 <h3=reg128#9,<f46=reg128#12%top,<g57_19=reg128#15%top
  2611. # asm 2: vmlal.s32 <h3=q8,<f46=d23,<g57_19=d29
  2612. vmlal.s32 q8,d23,d29
  2613. # qhasm: h3[0,1] += f89[0] signed* g57_19[0];h3[2,3] += f89[1] signed* g57_19[1]
  2614. # asm 1: vmlal.s32 <h3=reg128#9,<f89=reg128#13%bot,<g57_19=reg128#15%bot
  2615. # asm 2: vmlal.s32 <h3=q8,<f89=d24,<g57_19=d28
  2616. vmlal.s32 q8,d24,d28
  2617. # qhasm: h1[0,1] += f46[0] signed* g57_19[2];h1[2,3] += f46[1] signed* g57_19[3]
  2618. # asm 1: vmlal.s32 <h1=reg128#8,<f46=reg128#12%bot,<g57_19=reg128#15%top
  2619. # asm 2: vmlal.s32 <h1=q7,<f46=d22,<g57_19=d29
  2620. vmlal.s32 q7,d22,d29
  2621. # qhasm: h1[0,1] += f46[2] signed* g57_19[0];h1[2,3] += f46[3] signed* g57_19[1]
  2622. # asm 1: vmlal.s32 <h1=reg128#8,<f46=reg128#12%top,<g57_19=reg128#15%bot
  2623. # asm 2: vmlal.s32 <h1=q7,<f46=d23,<g57_19=d28
  2624. vmlal.s32 q7,d23,d28
  2625. # qhasm: new h5_stack
  2626. # qhasm: ptr = &h5_stack
  2627. # asm 1: lea >ptr=int32#3,<h5_stack=stack128#7
  2628. # asm 2: lea >ptr=r2,<h5_stack=[sp,#608]
  2629. add r2,sp,#608
  2630. # qhasm: mem128[ptr] aligned= h5
  2631. # asm 1: vst1.8 {<h5=reg128#5%bot-<h5=reg128#5%top},[<ptr=int32#3,: 128]
  2632. # asm 2: vst1.8 {<h5=d8-<h5=d9},[<ptr=r2,: 128]
  2633. vst1.8 {d8-d9},[r2,: 128]
  2634. # qhasm: ptr = &g13_19_stack
  2635. # asm 1: lea >ptr=int32#3,<g13_19_stack=stack128#4
  2636. # asm 2: lea >ptr=r2,<g13_19_stack=[sp,#560]
  2637. add r2,sp,#560
  2638. # qhasm: g13_19 aligned= mem128[ptr]
  2639. # asm 1: vld1.8 {>g13_19=reg128#5%bot->g13_19=reg128#5%top},[<ptr=int32#3,: 128]
  2640. # asm 2: vld1.8 {>g13_19=d8->g13_19=d9},[<ptr=r2,: 128]
  2641. vld1.8 {d8-d9},[r2,: 128]
  2642. # qhasm: h1[0,1] += f89[0] signed* g13_19[2];h1[2,3] += f89[1] signed* g13_19[3]
  2643. # asm 1: vmlal.s32 <h1=reg128#8,<f89=reg128#13%bot,<g13_19=reg128#5%top
  2644. # asm 2: vmlal.s32 <h1=q7,<f89=d24,<g13_19=d9
  2645. vmlal.s32 q7,d24,d9
  2646. # qhasm: h1[0,1] += f89[2] signed* mix[2];h1[2,3] += f89[3] signed* mix[3]
  2647. # asm 1: vmlal.s32 <h1=reg128#8,<f89=reg128#13%top,<mix=reg128#16%top
  2648. # asm 2: vmlal.s32 <h1=q7,<f89=d25,<mix=d31
  2649. vmlal.s32 q7,d25,d31
  2650. # qhasm: h4[0,1] = f02[0] signed* g46[0];h4[2,3] = f02[1] signed* g46[1]
  2651. # asm 1: vmull.s32 >h4=reg128#2,<f02=reg128#10%bot,<g46=reg128#2%bot
  2652. # asm 2: vmull.s32 >h4=q1,<f02=d18,<g46=d2
  2653. vmull.s32 q1,d18,d2
  2654. # qhasm: h4[0,1] += f02[2] signed* g02[2];h4[2,3] += f02[3] signed* g02[3]
  2655. # asm 1: vmlal.s32 <h4=reg128#2,<f02=reg128#10%top,<g02=reg128#1%top
  2656. # asm 2: vmlal.s32 <h4=q1,<f02=d19,<g02=d1
  2657. vmlal.s32 q1,d19,d1
  2658. # qhasm: h4[0,1] += f46[0] signed* g02[0];h4[2,3] += f46[1] signed* g02[1]
  2659. # asm 1: vmlal.s32 <h4=reg128#2,<f46=reg128#12%bot,<g02=reg128#1%bot
  2660. # asm 2: vmlal.s32 <h4=q1,<f46=d22,<g02=d0
  2661. vmlal.s32 q1,d22,d0
  2662. # qhasm: h4[0,1] += f89[0] signed* g46_19[2];h4[2,3] += f89[1] signed* g46_19[3]
  2663. # asm 1: vmlal.s32 <h4=reg128#2,<f89=reg128#13%bot,<g46_19=reg128#14%top
  2664. # asm 2: vmlal.s32 <h4=q1,<f89=d24,<g46_19=d27
  2665. vmlal.s32 q1,d24,d27
  2666. # qhasm: h4[0,1] += f46[2] signed* g89_19[0];h4[2,3] += f46[3] signed* g89_19[1]
  2667. # asm 1: vmlal.s32 <h4=reg128#2,<f46=reg128#12%top,<g89_19=reg128#11%bot
  2668. # asm 2: vmlal.s32 <h4=q1,<f46=d23,<g89_19=d20
  2669. vmlal.s32 q1,d23,d20
  2670. # qhasm: h4[0,1] += f13_2[0] signed* g13[2];h4[2,3] += f13_2[1] signed* g13[3]
  2671. # asm 1: vmlal.s32 <h4=reg128#2,<f13_2=reg128#7%bot,<g13=reg128#4%top
  2672. # asm 2: vmlal.s32 <h4=q1,<f13_2=d12,<g13=d7
  2673. vmlal.s32 q1,d12,d7
  2674. # qhasm: h4[0,1] += f13_2[2] signed* g13[0];h4[2,3] += f13_2[3] signed* g13[1]
  2675. # asm 1: vmlal.s32 <h4=reg128#2,<f13_2=reg128#7%top,<g13=reg128#4%bot
  2676. # asm 2: vmlal.s32 <h4=q1,<f13_2=d13,<g13=d6
  2677. vmlal.s32 q1,d13,d6
  2678. # qhasm: h2[0,1] = f02[0] signed* g02[2];h2[2,3] = f02[1] signed* g02[3]
  2679. # asm 1: vmull.s32 >h2=reg128#7,<f02=reg128#10%bot,<g02=reg128#1%top
  2680. # asm 2: vmull.s32 >h2=q6,<f02=d18,<g02=d1
  2681. vmull.s32 q6,d18,d1
  2682. # qhasm: h2[0,1] += f02[2] signed* g02[0];h2[2,3] += f02[3] signed* g02[1]
  2683. # asm 1: vmlal.s32 <h2=reg128#7,<f02=reg128#10%top,<g02=reg128#1%bot
  2684. # asm 2: vmlal.s32 <h2=q6,<f02=d19,<g02=d0
  2685. vmlal.s32 q6,d19,d0
  2686. # qhasm: h2[0,1] += f46[2] signed* g46_19[2];h2[2,3] += f46[3] signed* g46_19[3]
  2687. # asm 1: vmlal.s32 <h2=reg128#7,<f46=reg128#12%top,<g46_19=reg128#14%top
  2688. # asm 2: vmlal.s32 <h2=q6,<f46=d23,<g46_19=d27
  2689. vmlal.s32 q6,d23,d27
  2690. # qhasm: h2[0,1] += f46[0] signed* g89_19[0];h2[2,3] += f46[1] signed* g89_19[1]
  2691. # asm 1: vmlal.s32 <h2=reg128#7,<f46=reg128#12%bot,<g89_19=reg128#11%bot
  2692. # asm 2: vmlal.s32 <h2=q6,<f46=d22,<g89_19=d20
  2693. vmlal.s32 q6,d22,d20
  2694. # qhasm: h2[0,1] += f89[0] signed* g46_19[0];h2[2,3] += f89[1] signed* g46_19[1]
  2695. # asm 1: vmlal.s32 <h2=reg128#7,<f89=reg128#13%bot,<g46_19=reg128#14%bot
  2696. # asm 2: vmlal.s32 <h2=q6,<f89=d24,<g46_19=d26
  2697. vmlal.s32 q6,d24,d26
  2698. # qhasm: h0[0,1] = f02[0] signed* g02[0];h0[2,3] = f02[1] signed* g02[1]
  2699. # asm 1: vmull.s32 >h0=reg128#1,<f02=reg128#10%bot,<g02=reg128#1%bot
  2700. # asm 2: vmull.s32 >h0=q0,<f02=d18,<g02=d0
  2701. vmull.s32 q0,d18,d0
  2702. # qhasm: h0[0,1] += f46[0] signed* g46_19[2];h0[2,3] += f46[1] signed* g46_19[3]
  2703. # asm 1: vmlal.s32 <h0=reg128#1,<f46=reg128#12%bot,<g46_19=reg128#14%top
  2704. # asm 2: vmlal.s32 <h0=q0,<f46=d22,<g46_19=d27
  2705. vmlal.s32 q0,d22,d27
  2706. # qhasm: h0[0,1] += f46[2] signed* g46_19[0];h0[2,3] += f46[3] signed* g46_19[1]
  2707. # asm 1: vmlal.s32 <h0=reg128#1,<f46=reg128#12%top,<g46_19=reg128#14%bot
  2708. # asm 2: vmlal.s32 <h0=q0,<f46=d23,<g46_19=d26
  2709. vmlal.s32 q0,d23,d26
  2710. # qhasm: h0[0,1] += f89[0] signed* mix[2];h0[2,3] += f89[1] signed* mix[3]
  2711. # asm 1: vmlal.s32 <h0=reg128#1,<f89=reg128#13%bot,<mix=reg128#16%top
  2712. # asm 2: vmlal.s32 <h0=q0,<f89=d24,<mix=d31
  2713. vmlal.s32 q0,d24,d31
  2714. # qhasm: h0[0,1] += f02[2] signed* g89_19[0];h0[2,3] += f02[3] signed* g89_19[1]
  2715. # asm 1: vmlal.s32 <h0=reg128#1,<f02=reg128#10%top,<g89_19=reg128#11%bot
  2716. # asm 2: vmlal.s32 <h0=q0,<f02=d19,<g89_19=d20
  2717. vmlal.s32 q0,d19,d20
  2718. # qhasm: ptr = &f57_2_stack
  2719. # asm 1: lea >ptr=int32#3,<f57_2_stack=stack128#9
  2720. # asm 2: lea >ptr=r2,<f57_2_stack=[sp,#640]
  2721. add r2,sp,#640
  2722. # qhasm: f57_2 aligned= mem128[ptr]
  2723. # asm 1: vld1.8 {>f57_2=reg128#10%bot->f57_2=reg128#10%top},[<ptr=int32#3,: 128]
  2724. # asm 2: vld1.8 {>f57_2=d18->f57_2=d19},[<ptr=r2,: 128]
  2725. vld1.8 {d18-d19},[r2,: 128]
  2726. # qhasm: h8[0,1] += f57_2[0] signed* g13[2];h8[2,3] += f57_2[1] signed* g13[3]
  2727. # asm 1: vmlal.s32 <h8=reg128#3,<f57_2=reg128#10%bot,<g13=reg128#4%top
  2728. # asm 2: vmlal.s32 <h8=q2,<f57_2=d18,<g13=d7
  2729. vmlal.s32 q2,d18,d7
  2730. # qhasm: h8[0,1] += f57_2[2] signed* g13[0];h8[2,3] += f57_2[3] signed* g13[1]
  2731. # asm 1: vmlal.s32 <h8=reg128#3,<f57_2=reg128#10%top,<g13=reg128#4%bot
  2732. # asm 2: vmlal.s32 <h8=q2,<f57_2=d19,<g13=d6
  2733. vmlal.s32 q2,d19,d6
  2734. # qhasm: h6[0,1] += f57_2[0] signed* g13[0];h6[2,3] += f57_2[1] signed* g13[1]
  2735. # asm 1: vmlal.s32 <h6=reg128#6,<f57_2=reg128#10%bot,<g13=reg128#4%bot
  2736. # asm 2: vmlal.s32 <h6=q5,<f57_2=d18,<g13=d6
  2737. vmlal.s32 q5,d18,d6
  2738. # qhasm: h6[0,1] += f57_2[2] signed* g89_19[2];h6[2,3] += f57_2[3] signed* g89_19[3]
  2739. # asm 1: vmlal.s32 <h6=reg128#6,<f57_2=reg128#10%top,<g89_19=reg128#11%top
  2740. # asm 2: vmlal.s32 <h6=q5,<f57_2=d19,<g89_19=d21
  2741. vmlal.s32 q5,d19,d21
  2742. # qhasm: h4[0,1] += f57_2[0] signed* g89_19[2];h4[2,3] += f57_2[1] signed* g89_19[3]
  2743. # asm 1: vmlal.s32 <h4=reg128#2,<f57_2=reg128#10%bot,<g89_19=reg128#11%top
  2744. # asm 2: vmlal.s32 <h4=q1,<f57_2=d18,<g89_19=d21
  2745. vmlal.s32 q1,d18,d21
  2746. # qhasm: h4[0,1] += f57_2[2] signed* g57_19[2];h4[2,3] += f57_2[3] signed* g57_19[3]
  2747. # asm 1: vmlal.s32 <h4=reg128#2,<f57_2=reg128#10%top,<g57_19=reg128#15%top
  2748. # asm 2: vmlal.s32 <h4=q1,<f57_2=d19,<g57_19=d29
  2749. vmlal.s32 q1,d19,d29
  2750. # qhasm: h0[0,1] += f57_2[0] signed* g57_19[0];h0[2,3] += f57_2[1] signed* g57_19[1]
  2751. # asm 1: vmlal.s32 <h0=reg128#1,<f57_2=reg128#10%bot,<g57_19=reg128#15%bot
  2752. # asm 2: vmlal.s32 <h0=q0,<f57_2=d18,<g57_19=d28
  2753. vmlal.s32 q0,d18,d28
  2754. # qhasm: h0[0,1] += f57_2[2] signed* g13_19[2];h0[2,3] += f57_2[3] signed* g13_19[3]
  2755. # asm 1: vmlal.s32 <h0=reg128#1,<f57_2=reg128#10%top,<g13_19=reg128#5%top
  2756. # asm 2: vmlal.s32 <h0=q0,<f57_2=d19,<g13_19=d9
  2757. vmlal.s32 q0,d19,d9
  2758. # qhasm: h2[0,1] += f57_2[0] signed* g57_19[2];h2[2,3] += f57_2[1] signed* g57_19[3]
  2759. # asm 1: vmlal.s32 <h2=reg128#7,<f57_2=reg128#10%bot,<g57_19=reg128#15%top
  2760. # asm 2: vmlal.s32 <h2=q6,<f57_2=d18,<g57_19=d29
  2761. vmlal.s32 q6,d18,d29
  2762. # qhasm: h2[0,1] += f57_2[2] signed* g57_19[0];h2[2,3] += f57_2[3] signed* g57_19[1]
  2763. # asm 1: vmlal.s32 <h2=reg128#7,<f57_2=reg128#10%top,<g57_19=reg128#15%bot
  2764. # asm 2: vmlal.s32 <h2=q6,<f57_2=d19,<g57_19=d28
  2765. vmlal.s32 q6,d19,d28
  2766. # qhasm: ptr = &f13_2_stack
  2767. # asm 1: lea >ptr=int32#3,<f13_2_stack=stack128#6
  2768. # asm 2: lea >ptr=r2,<f13_2_stack=[sp,#592]
  2769. add r2,sp,#592
  2770. # qhasm: f13_2 aligned= mem128[ptr]
  2771. # asm 1: vld1.8 {>f13_2=reg128#10%bot->f13_2=reg128#10%top},[<ptr=int32#3,: 128]
  2772. # asm 2: vld1.8 {>f13_2=d18->f13_2=d19},[<ptr=r2,: 128]
  2773. vld1.8 {d18-d19},[r2,: 128]
  2774. # qhasm: ptr = &_0x2000000_stack
  2775. # asm 1: lea >ptr=int32#3,<_0x2000000_stack=stack128#1
  2776. # asm 2: lea >ptr=r2,<_0x2000000_stack=[sp,#512]
  2777. add r2,sp,#512
  2778. # qhasm: _0x2000000 aligned= mem128[ptr]
  2779. # asm 1: vld1.8 {>_0x2000000=reg128#12%bot->_0x2000000=reg128#12%top},[<ptr=int32#3,: 128]
  2780. # asm 2: vld1.8 {>_0x2000000=d22->_0x2000000=d23},[<ptr=r2,: 128]
  2781. vld1.8 {d22-d23},[r2,: 128]
  2782. # qhasm: h6[0,1] += f13_2[2] signed* g13[2];h6[2,3] += f13_2[3] signed* g13[3]
  2783. # asm 1: vmlal.s32 <h6=reg128#6,<f13_2=reg128#10%top,<g13=reg128#4%top
  2784. # asm 2: vmlal.s32 <h6=q5,<f13_2=d19,<g13=d7
  2785. vmlal.s32 q5,d19,d7
  2786. # qhasm: h0[0,1] += f13_2[0] signed* g89_19[2];h0[2,3] += f13_2[1] signed* g89_19[3]
  2787. # asm 1: vmlal.s32 <h0=reg128#1,<f13_2=reg128#10%bot,<g89_19=reg128#11%top
  2788. # asm 2: vmlal.s32 <h0=q0,<f13_2=d18,<g89_19=d21
  2789. vmlal.s32 q0,d18,d21
  2790. # qhasm: h0[0,1] += f13_2[2] signed* g57_19[2];h0[2,3] += f13_2[3] signed* g57_19[3]
  2791. # asm 1: vmlal.s32 <h0=reg128#1,<f13_2=reg128#10%top,<g57_19=reg128#15%top
  2792. # asm 2: vmlal.s32 <h0=q0,<f13_2=d19,<g57_19=d29
  2793. vmlal.s32 q0,d19,d29
  2794. # qhasm: h2[0,1] += f13_2[0] signed* g13[0];h2[2,3] += f13_2[1] signed* g13[1]
  2795. # asm 1: vmlal.s32 <h2=reg128#7,<f13_2=reg128#10%bot,<g13=reg128#4%bot
  2796. # asm 2: vmlal.s32 <h2=q6,<f13_2=d18,<g13=d6
  2797. vmlal.s32 q6,d18,d6
  2798. # qhasm: ptr = &_0x1000000_stack
  2799. # asm 1: lea >ptr=int32#3,<_0x1000000_stack=stack128#2
  2800. # asm 2: lea >ptr=r2,<_0x1000000_stack=[sp,#528]
  2801. add r2,sp,#528
  2802. # qhasm: _0x1000000 aligned= mem128[ptr]
  2803. # asm 1: vld1.8 {>_0x1000000=reg128#4%bot->_0x1000000=reg128#4%top},[<ptr=int32#3,: 128]
  2804. # asm 2: vld1.8 {>_0x1000000=d6->_0x1000000=d7},[<ptr=r2,: 128]
  2805. vld1.8 {d6-d7},[r2,: 128]
  2806. # qhasm: h2[0,1] += f13_2[2] signed* g89_19[2];h2[2,3] += f13_2[3] signed* g89_19[3]
  2807. # asm 1: vmlal.s32 <h2=reg128#7,<f13_2=reg128#10%top,<g89_19=reg128#11%top
  2808. # asm 2: vmlal.s32 <h2=q6,<f13_2=d19,<g89_19=d21
  2809. vmlal.s32 q6,d19,d21
  2810. # qhasm: ptr = &h7_stack
  2811. # asm 1: lea >ptr=int32#3,<h7_stack=stack128#5
  2812. # asm 2: lea >ptr=r2,<h7_stack=[sp,#576]
  2813. add r2,sp,#576
  2814. # qhasm: h7 aligned= mem128[ptr]
  2815. # asm 1: vld1.8 {>h7=reg128#10%bot->h7=reg128#10%top},[<ptr=int32#3,: 128]
  2816. # asm 2: vld1.8 {>h7=d18->h7=d19},[<ptr=r2,: 128]
  2817. vld1.8 {d18-d19},[r2,: 128]
  2818. # qhasm: h0[0,1] += mix[0] signed* g13_19[0];h0[2,3] += mix[1] signed* g13_19[1]
  2819. # asm 1: vmlal.s32 <h0=reg128#1,<mix=reg128#16%bot,<g13_19=reg128#5%bot
  2820. # asm 2: vmlal.s32 <h0=q0,<mix=d30,<g13_19=d8
  2821. vmlal.s32 q0,d30,d8
  2822. # qhasm: ptr = &h9_stack
  2823. # asm 1: lea >ptr=int32#3,<h9_stack=stack128#11
  2824. # asm 2: lea >ptr=r2,<h9_stack=[sp,#672]
  2825. add r2,sp,#672
  2826. # qhasm: h9 aligned= mem128[ptr]
  2827. # asm 1: vld1.8 {>h9=reg128#11%bot->h9=reg128#11%top},[<ptr=int32#3,: 128]
  2828. # asm 2: vld1.8 {>h9=d20->h9=d21},[<ptr=r2,: 128]
  2829. vld1.8 {d20-d21},[r2,: 128]
  2830. # qhasm: h6[0,1] += mix[0] signed* g57_19[2];h6[2,3] += mix[1] signed* g57_19[3]
  2831. # asm 1: vmlal.s32 <h6=reg128#6,<mix=reg128#16%bot,<g57_19=reg128#15%top
  2832. # asm 2: vmlal.s32 <h6=q5,<mix=d30,<g57_19=d29
  2833. vmlal.s32 q5,d30,d29
  2834. # qhasm: ptr = &h5_stack
  2835. # asm 1: lea >ptr=int32#3,<h5_stack=stack128#7
  2836. # asm 2: lea >ptr=r2,<h5_stack=[sp,#608]
  2837. add r2,sp,#608
  2838. # qhasm: h5 aligned= mem128[ptr]
  2839. # asm 1: vld1.8 {>h5=reg128#13%bot->h5=reg128#13%top},[<ptr=int32#3,: 128]
  2840. # asm 2: vld1.8 {>h5=d24->h5=d25},[<ptr=r2,: 128]
  2841. vld1.8 {d24-d25},[r2,: 128]
  2842. # qhasm: h4[0,1] += mix[0] signed* g57_19[0];h4[2,3] += mix[1] signed* g57_19[1]
  2843. # asm 1: vmlal.s32 <h4=reg128#2,<mix=reg128#16%bot,<g57_19=reg128#15%bot
  2844. # asm 2: vmlal.s32 <h4=q1,<mix=d30,<g57_19=d28
  2845. vmlal.s32 q1,d30,d28
  2846. # qhasm: 2x t0 = h0 + _0x2000000
  2847. # asm 1: vadd.i64 >t0=reg128#14,<h0=reg128#1,<_0x2000000=reg128#12
  2848. # asm 2: vadd.i64 >t0=q13,<h0=q0,<_0x2000000=q11
  2849. vadd.i64 q13,q0,q11
  2850. # qhasm: 2x t6 = h6 + _0x2000000
  2851. # asm 1: vadd.i64 >t6=reg128#15,<h6=reg128#6,<_0x2000000=reg128#12
  2852. # asm 2: vadd.i64 >t6=q14,<h6=q5,<_0x2000000=q11
  2853. vadd.i64 q14,q5,q11
  2854. # qhasm: h2[0,1] += mix[0] signed* g13_19[2];h2[2,3] += mix[1] signed* g13_19[3]
  2855. # asm 1: vmlal.s32 <h2=reg128#7,<mix=reg128#16%bot,<g13_19=reg128#5%top
  2856. # asm 2: vmlal.s32 <h2=q6,<mix=d30,<g13_19=d9
  2857. vmlal.s32 q6,d30,d9
  2858. # qhasm: 2x c0 = t0 signed>> 26
  2859. # asm 1: vshr.s64 >c0=reg128#5,<t0=reg128#14,#26
  2860. # asm 2: vshr.s64 >c0=q4,<t0=q13,#26
  2861. vshr.s64 q4,q13,#26
  2862. # qhasm: 2x c6 = t6 signed>> 26
  2863. # asm 1: vshr.s64 >c6=reg128#14,<t6=reg128#15,#26
  2864. # asm 2: vshr.s64 >c6=q13,<t6=q14,#26
  2865. vshr.s64 q13,q14,#26
  2866. # qhasm: 2x h1 += c0
  2867. # asm 1: vadd.i64 >h1=reg128#8,<h1=reg128#8,<c0=reg128#5
  2868. # asm 2: vadd.i64 >h1=q7,<h1=q7,<c0=q4
  2869. vadd.i64 q7,q7,q4
  2870. # qhasm: 2x t0 = c0 << 26
  2871. # asm 1: vshl.i64 >t0=reg128#5,<c0=reg128#5,#26
  2872. # asm 2: vshl.i64 >t0=q4,<c0=q4,#26
  2873. vshl.i64 q4,q4,#26
  2874. # qhasm: 2x t1 = h1 + _0x1000000
  2875. # asm 1: vadd.i64 >t1=reg128#15,<h1=reg128#8,<_0x1000000=reg128#4
  2876. # asm 2: vadd.i64 >t1=q14,<h1=q7,<_0x1000000=q3
  2877. vadd.i64 q14,q7,q3
  2878. # qhasm: 2x h7 += c6
  2879. # asm 1: vadd.i64 >h7=reg128#10,<h7=reg128#10,<c6=reg128#14
  2880. # asm 2: vadd.i64 >h7=q9,<h7=q9,<c6=q13
  2881. vadd.i64 q9,q9,q13
  2882. # qhasm: 2x t6 = c6 << 26
  2883. # asm 1: vshl.i64 >t6=reg128#14,<c6=reg128#14,#26
  2884. # asm 2: vshl.i64 >t6=q13,<c6=q13,#26
  2885. vshl.i64 q13,q13,#26
  2886. # qhasm: 2x t7 = h7 + _0x1000000
  2887. # asm 1: vadd.i64 >t7=reg128#16,<h7=reg128#10,<_0x1000000=reg128#4
  2888. # asm 2: vadd.i64 >t7=q15,<h7=q9,<_0x1000000=q3
  2889. vadd.i64 q15,q9,q3
  2890. # qhasm: 2x h0 -= t0
  2891. # asm 1: vsub.i64 >h0=reg128#1,<h0=reg128#1,<t0=reg128#5
  2892. # asm 2: vsub.i64 >h0=q0,<h0=q0,<t0=q4
  2893. vsub.i64 q0,q0,q4
  2894. # qhasm: 2x c1 = t1 signed>> 25
  2895. # asm 1: vshr.s64 >c1=reg128#5,<t1=reg128#15,#25
  2896. # asm 2: vshr.s64 >c1=q4,<t1=q14,#25
  2897. vshr.s64 q4,q14,#25
  2898. # qhasm: 2x h6 -= t6
  2899. # asm 1: vsub.i64 >h6=reg128#6,<h6=reg128#6,<t6=reg128#14
  2900. # asm 2: vsub.i64 >h6=q5,<h6=q5,<t6=q13
  2901. vsub.i64 q5,q5,q13
  2902. # qhasm: 2x c7 = t7 signed>> 25
  2903. # asm 1: vshr.s64 >c7=reg128#14,<t7=reg128#16,#25
  2904. # asm 2: vshr.s64 >c7=q13,<t7=q15,#25
  2905. vshr.s64 q13,q15,#25
  2906. # qhasm: 2x h2 += c1
  2907. # asm 1: vadd.i64 >h2=reg128#7,<h2=reg128#7,<c1=reg128#5
  2908. # asm 2: vadd.i64 >h2=q6,<h2=q6,<c1=q4
  2909. vadd.i64 q6,q6,q4
  2910. # qhasm: 2x t1 = c1 << 25
  2911. # asm 1: vshl.i64 >t1=reg128#5,<c1=reg128#5,#25
  2912. # asm 2: vshl.i64 >t1=q4,<c1=q4,#25
  2913. vshl.i64 q4,q4,#25
  2914. # qhasm: 2x t2 = h2 + _0x2000000
  2915. # asm 1: vadd.i64 >t2=reg128#15,<h2=reg128#7,<_0x2000000=reg128#12
  2916. # asm 2: vadd.i64 >t2=q14,<h2=q6,<_0x2000000=q11
  2917. vadd.i64 q14,q6,q11
  2918. # qhasm: 2x h8 += c7
  2919. # asm 1: vadd.i64 >h8=reg128#3,<h8=reg128#3,<c7=reg128#14
  2920. # asm 2: vadd.i64 >h8=q2,<h8=q2,<c7=q13
  2921. vadd.i64 q2,q2,q13
  2922. # qhasm: 2x h1 -= t1
  2923. # asm 1: vsub.i64 >h1=reg128#5,<h1=reg128#8,<t1=reg128#5
  2924. # asm 2: vsub.i64 >h1=q4,<h1=q7,<t1=q4
  2925. vsub.i64 q4,q7,q4
  2926. # qhasm: 2x c2 = t2 signed>> 26
  2927. # asm 1: vshr.s64 >c2=reg128#8,<t2=reg128#15,#26
  2928. # asm 2: vshr.s64 >c2=q7,<t2=q14,#26
  2929. vshr.s64 q7,q14,#26
  2930. # qhasm: 2x t7 = c7 << 25
  2931. # asm 1: vshl.i64 >t7=reg128#14,<c7=reg128#14,#25
  2932. # asm 2: vshl.i64 >t7=q13,<c7=q13,#25
  2933. vshl.i64 q13,q13,#25
  2934. # qhasm: 2x t8 = h8 + _0x2000000
  2935. # asm 1: vadd.i64 >t8=reg128#15,<h8=reg128#3,<_0x2000000=reg128#12
  2936. # asm 2: vadd.i64 >t8=q14,<h8=q2,<_0x2000000=q11
  2937. vadd.i64 q14,q2,q11
  2938. # qhasm: 2x h3 += c2
  2939. # asm 1: vadd.i64 >h3=reg128#9,<h3=reg128#9,<c2=reg128#8
  2940. # asm 2: vadd.i64 >h3=q8,<h3=q8,<c2=q7
  2941. vadd.i64 q8,q8,q7
  2942. # qhasm: 2x t2 = c2 << 26
  2943. # asm 1: vshl.i64 >t2=reg128#8,<c2=reg128#8,#26
  2944. # asm 2: vshl.i64 >t2=q7,<c2=q7,#26
  2945. vshl.i64 q7,q7,#26
  2946. # qhasm: 2x t3 = h3 + _0x1000000
  2947. # asm 1: vadd.i64 >t3=reg128#16,<h3=reg128#9,<_0x1000000=reg128#4
  2948. # asm 2: vadd.i64 >t3=q15,<h3=q8,<_0x1000000=q3
  2949. vadd.i64 q15,q8,q3
  2950. # qhasm: 2x h7 -= t7
  2951. # asm 1: vsub.i64 >h7=reg128#10,<h7=reg128#10,<t7=reg128#14
  2952. # asm 2: vsub.i64 >h7=q9,<h7=q9,<t7=q13
  2953. vsub.i64 q9,q9,q13
  2954. # qhasm: 2x c8 = t8 signed>> 26
  2955. # asm 1: vshr.s64 >c8=reg128#14,<t8=reg128#15,#26
  2956. # asm 2: vshr.s64 >c8=q13,<t8=q14,#26
  2957. vshr.s64 q13,q14,#26
  2958. # qhasm: 2x h2 -= t2
  2959. # asm 1: vsub.i64 >h2=reg128#7,<h2=reg128#7,<t2=reg128#8
  2960. # asm 2: vsub.i64 >h2=q6,<h2=q6,<t2=q7
  2961. vsub.i64 q6,q6,q7
  2962. # qhasm: 2x c3 = t3 signed>> 25
  2963. # asm 1: vshr.s64 >c3=reg128#8,<t3=reg128#16,#25
  2964. # asm 2: vshr.s64 >c3=q7,<t3=q15,#25
  2965. vshr.s64 q7,q15,#25
  2966. # qhasm: 2x h9 += c8
  2967. # asm 1: vadd.i64 >h9=reg128#11,<h9=reg128#11,<c8=reg128#14
  2968. # asm 2: vadd.i64 >h9=q10,<h9=q10,<c8=q13
  2969. vadd.i64 q10,q10,q13
  2970. # qhasm: 2x t8 = c8 << 26
  2971. # asm 1: vshl.i64 >t8=reg128#14,<c8=reg128#14,#26
  2972. # asm 2: vshl.i64 >t8=q13,<c8=q13,#26
  2973. vshl.i64 q13,q13,#26
  2974. # qhasm: 2x t9 = h9 + _0x1000000
  2975. # asm 1: vadd.i64 >t9=reg128#15,<h9=reg128#11,<_0x1000000=reg128#4
  2976. # asm 2: vadd.i64 >t9=q14,<h9=q10,<_0x1000000=q3
  2977. vadd.i64 q14,q10,q3
  2978. # qhasm: 2x h4 += c3
  2979. # asm 1: vadd.i64 >h4=reg128#2,<h4=reg128#2,<c3=reg128#8
  2980. # asm 2: vadd.i64 >h4=q1,<h4=q1,<c3=q7
  2981. vadd.i64 q1,q1,q7
  2982. # qhasm: posh = playground1_ptr + 144
  2983. # asm 1: add >posh=int32#3,<playground1_ptr=int32#4,#144
  2984. # asm 2: add >posh=r2,<playground1_ptr=r3,#144
  2985. add r2,r3,#144
  2986. # qhasm: 2x t3 = c3 << 25
  2987. # asm 1: vshl.i64 >t3=reg128#8,<c3=reg128#8,#25
  2988. # asm 2: vshl.i64 >t3=q7,<c3=q7,#25
  2989. vshl.i64 q7,q7,#25
  2990. # qhasm: posH = playground1_ptr + 96
  2991. # asm 1: add >posH=int32#5,<playground1_ptr=int32#4,#96
  2992. # asm 2: add >posH=r4,<playground1_ptr=r3,#96
  2993. add r4,r3,#96
  2994. # qhasm: 2x t4 = h4 + _0x2000000
  2995. # asm 1: vadd.i64 >t4=reg128#16,<h4=reg128#2,<_0x2000000=reg128#12
  2996. # asm 2: vadd.i64 >t4=q15,<h4=q1,<_0x2000000=q11
  2997. vadd.i64 q15,q1,q11
  2998. # qhasm: posh+=8
  2999. # asm 1: add >posh=int32#3,<posh=int32#3,#8
  3000. # asm 2: add >posh=r2,<posh=r2,#8
  3001. add r2,r2,#8
  3002. # qhasm: 2x h8 -= t8
  3003. # asm 1: vsub.i64 >h8=reg128#3,<h8=reg128#3,<t8=reg128#14
  3004. # asm 2: vsub.i64 >h8=q2,<h8=q2,<t8=q13
  3005. vsub.i64 q2,q2,q13
  3006. # qhasm: posH+=8
  3007. # asm 1: add >posH=int32#5,<posH=int32#5,#8
  3008. # asm 2: add >posH=r4,<posH=r4,#8
  3009. add r4,r4,#8
  3010. # qhasm: 2x c9 = t9 signed>> 25
  3011. # asm 1: vshr.s64 >c9=reg128#14,<t9=reg128#15,#25
  3012. # asm 2: vshr.s64 >c9=q13,<t9=q14,#25
  3013. vshr.s64 q13,q14,#25
  3014. # qhasm: 2x h3 -= t3
  3015. # asm 1: vsub.i64 >h3=reg128#8,<h3=reg128#9,<t3=reg128#8
  3016. # asm 2: vsub.i64 >h3=q7,<h3=q8,<t3=q7
  3017. vsub.i64 q7,q8,q7
  3018. # qhasm: 2x c4 = t4 signed>> 26
  3019. # asm 1: vshr.s64 >c4=reg128#9,<t4=reg128#16,#26
  3020. # asm 2: vshr.s64 >c4=q8,<t4=q15,#26
  3021. vshr.s64 q8,q15,#26
  3022. # qhasm: 2x s = c9 + c9
  3023. # asm 1: vadd.i64 >s=reg128#15,<c9=reg128#14,<c9=reg128#14
  3024. # asm 2: vadd.i64 >s=q14,<c9=q13,<c9=q13
  3025. vadd.i64 q14,q13,q13
  3026. # qhasm: 2x h5 += c4
  3027. # asm 1: vadd.i64 >h5=reg128#13,<h5=reg128#13,<c4=reg128#9
  3028. # asm 2: vadd.i64 >h5=q12,<h5=q12,<c4=q8
  3029. vadd.i64 q12,q12,q8
  3030. # qhasm: h2 h3 = h2[0]h3[0]h2[2]h2[3] h2[1]h3[1]h3[2]h3[3]
  3031. # asm 1: vtrn.32 <h2=reg128#7%bot,<h3=reg128#8%bot
  3032. # asm 2: vtrn.32 <h2=d12,<h3=d14
  3033. vtrn.32 d12,d14
  3034. # qhasm: 2x t4 = c4 << 26
  3035. # asm 1: vshl.i64 >t4=reg128#9,<c4=reg128#9,#26
  3036. # asm 2: vshl.i64 >t4=q8,<c4=q8,#26
  3037. vshl.i64 q8,q8,#26
  3038. # qhasm: h2 h3 = h2[0]h2[1]h2[2]h3[2] h3[0]h3[1]h2[3]h3[3]
  3039. # asm 1: vtrn.32 <h2=reg128#7%top,<h3=reg128#8%top
  3040. # asm 2: vtrn.32 <h2=d13,<h3=d15
  3041. vtrn.32 d13,d15
  3042. # qhasm: 2x t5 = h5 + _0x1000000
  3043. # asm 1: vadd.i64 >t5=reg128#4,<h5=reg128#13,<_0x1000000=reg128#4
  3044. # asm 2: vadd.i64 >t5=q3,<h5=q12,<_0x1000000=q3
  3045. vadd.i64 q3,q12,q3
  3046. # qhasm: 2x h0 += s
  3047. # asm 1: vadd.i64 >h0=reg128#1,<h0=reg128#1,<s=reg128#15
  3048. # asm 2: vadd.i64 >h0=q0,<h0=q0,<s=q14
  3049. vadd.i64 q0,q0,q14
  3050. # qhasm: mem64[posh] aligned= h2[0];posh+=8
  3051. # asm 1: vst1.8 <h2=reg128#7%bot,[<posh=int32#3,: 64]!
  3052. # asm 2: vst1.8 <h2=d12,[<posh=r2,: 64]!
  3053. vst1.8 d12,[r2,: 64]!
  3054. # qhasm: 2x s = c9 << 4
  3055. # asm 1: vshl.i64 >s=reg128#8,<c9=reg128#14,#4
  3056. # asm 2: vshl.i64 >s=q7,<c9=q13,#4
  3057. vshl.i64 q7,q13,#4
  3058. # qhasm: mem64[posH] aligned= h2[1];posH+=8
  3059. # asm 1: vst1.8 <h2=reg128#7%top,[<posH=int32#5,: 64]!
  3060. # asm 2: vst1.8 <h2=d13,[<posH=r4,: 64]!
  3061. vst1.8 d13,[r4,: 64]!
  3062. # qhasm: 2x h4 -= t4
  3063. # asm 1: vsub.i64 >h4=reg128#2,<h4=reg128#2,<t4=reg128#9
  3064. # asm 2: vsub.i64 >h4=q1,<h4=q1,<t4=q8
  3065. vsub.i64 q1,q1,q8
  3066. # qhasm: 2x c5 = t5 signed>> 25
  3067. # asm 1: vshr.s64 >c5=reg128#4,<t5=reg128#4,#25
  3068. # asm 2: vshr.s64 >c5=q3,<t5=q3,#25
  3069. vshr.s64 q3,q3,#25
  3070. # qhasm: 2x h0 += s
  3071. # asm 1: vadd.i64 >h0=reg128#1,<h0=reg128#1,<s=reg128#8
  3072. # asm 2: vadd.i64 >h0=q0,<h0=q0,<s=q7
  3073. vadd.i64 q0,q0,q7
  3074. # qhasm: 2x h6 += c5
  3075. # asm 1: vadd.i64 >h6=reg128#6,<h6=reg128#6,<c5=reg128#4
  3076. # asm 2: vadd.i64 >h6=q5,<h6=q5,<c5=q3
  3077. vadd.i64 q5,q5,q3
  3078. # qhasm: 2x t5 = c5 << 25
  3079. # asm 1: vshl.i64 >t5=reg128#4,<c5=reg128#4,#25
  3080. # asm 2: vshl.i64 >t5=q3,<c5=q3,#25
  3081. vshl.i64 q3,q3,#25
  3082. # qhasm: 2x t6 = h6 + _0x2000000
  3083. # asm 1: vadd.i64 >t6=reg128#7,<h6=reg128#6,<_0x2000000=reg128#12
  3084. # asm 2: vadd.i64 >t6=q6,<h6=q5,<_0x2000000=q11
  3085. vadd.i64 q6,q5,q11
  3086. # qhasm: 2x h0 += c9
  3087. # asm 1: vadd.i64 >h0=reg128#1,<h0=reg128#1,<c9=reg128#14
  3088. # asm 2: vadd.i64 >h0=q0,<h0=q0,<c9=q13
  3089. vadd.i64 q0,q0,q13
  3090. # qhasm: 2x t9 = c9 << 25
  3091. # asm 1: vshl.i64 >t9=reg128#8,<c9=reg128#14,#25
  3092. # asm 2: vshl.i64 >t9=q7,<c9=q13,#25
  3093. vshl.i64 q7,q13,#25
  3094. # qhasm: 2x t0 = h0 + _0x2000000
  3095. # asm 1: vadd.i64 >t0=reg128#9,<h0=reg128#1,<_0x2000000=reg128#12
  3096. # asm 2: vadd.i64 >t0=q8,<h0=q0,<_0x2000000=q11
  3097. vadd.i64 q8,q0,q11
  3098. # qhasm: 2x h5 -= t5
  3099. # asm 1: vsub.i64 >h5=reg128#4,<h5=reg128#13,<t5=reg128#4
  3100. # asm 2: vsub.i64 >h5=q3,<h5=q12,<t5=q3
  3101. vsub.i64 q3,q12,q3
  3102. # qhasm: 2x c6 = t6 signed>> 26
  3103. # asm 1: vshr.s64 >c6=reg128#7,<t6=reg128#7,#26
  3104. # asm 2: vshr.s64 >c6=q6,<t6=q6,#26
  3105. vshr.s64 q6,q6,#26
  3106. # qhasm: 2x h9 -= t9
  3107. # asm 1: vsub.i64 >h9=reg128#8,<h9=reg128#11,<t9=reg128#8
  3108. # asm 2: vsub.i64 >h9=q7,<h9=q10,<t9=q7
  3109. vsub.i64 q7,q10,q7
  3110. # qhasm: h4 h5 = h4[0]h5[0]h4[2]h4[3] h4[1]h5[1]h5[2]h5[3]
  3111. # asm 1: vtrn.32 <h4=reg128#2%bot,<h5=reg128#4%bot
  3112. # asm 2: vtrn.32 <h4=d2,<h5=d6
  3113. vtrn.32 d2,d6
  3114. # qhasm: 2x c0 = t0 signed>> 26
  3115. # asm 1: vshr.s64 >c0=reg128#9,<t0=reg128#9,#26
  3116. # asm 2: vshr.s64 >c0=q8,<t0=q8,#26
  3117. vshr.s64 q8,q8,#26
  3118. # qhasm: h4 h5 = h4[0]h4[1]h4[2]h5[2] h5[0]h5[1]h4[3]h5[3]
  3119. # asm 1: vtrn.32 <h4=reg128#2%top,<h5=reg128#4%top
  3120. # asm 2: vtrn.32 <h4=d3,<h5=d7
  3121. vtrn.32 d3,d7
  3122. # qhasm: 2x h7 += c6
  3123. # asm 1: vadd.i64 >h7=reg128#4,<h7=reg128#10,<c6=reg128#7
  3124. # asm 2: vadd.i64 >h7=q3,<h7=q9,<c6=q6
  3125. vadd.i64 q3,q9,q6
  3126. # qhasm: mem64[posh] aligned= h4[0]
  3127. # asm 1: vst1.8 <h4=reg128#2%bot,[<posh=int32#3,: 64]
  3128. # asm 2: vst1.8 <h4=d2,[<posh=r2,: 64]
  3129. vst1.8 d2,[r2,: 64]
  3130. # qhasm: 2x t6 = c6 << 26
  3131. # asm 1: vshl.i64 >t6=reg128#7,<c6=reg128#7,#26
  3132. # asm 2: vshl.i64 >t6=q6,<c6=q6,#26
  3133. vshl.i64 q6,q6,#26
  3134. # qhasm: mem64[posH] aligned= h4[1]
  3135. # asm 1: vst1.8 <h4=reg128#2%top,[<posH=int32#5,: 64]
  3136. # asm 2: vst1.8 <h4=d3,[<posH=r4,: 64]
  3137. vst1.8 d3,[r4,: 64]
  3138. # qhasm: 2x h1 += c0
  3139. # asm 1: vadd.i64 >h1=reg128#2,<h1=reg128#5,<c0=reg128#9
  3140. # asm 2: vadd.i64 >h1=q1,<h1=q4,<c0=q8
  3141. vadd.i64 q1,q4,q8
  3142. # qhasm: h8 h9 = h8[0]h9[0]h8[2]h8[3] h8[1]h9[1]h9[2]h9[3]
  3143. # asm 1: vtrn.32 <h8=reg128#3%bot,<h9=reg128#8%bot
  3144. # asm 2: vtrn.32 <h8=d4,<h9=d14
  3145. vtrn.32 d4,d14
  3146. # qhasm: 2x t0 = c0 << 26
  3147. # asm 1: vshl.i64 >t0=reg128#5,<c0=reg128#9,#26
  3148. # asm 2: vshl.i64 >t0=q4,<c0=q8,#26
  3149. vshl.i64 q4,q8,#26
  3150. # qhasm: h8 h9 = h8[0]h8[1]h8[2]h9[2] h9[0]h9[1]h8[3]h9[3]
  3151. # asm 1: vtrn.32 <h8=reg128#3%top,<h9=reg128#8%top
  3152. # asm 2: vtrn.32 <h8=d5,<h9=d15
  3153. vtrn.32 d5,d15
  3154. # qhasm: 2x h6 -= t6
  3155. # asm 1: vsub.i64 >h6=reg128#6,<h6=reg128#6,<t6=reg128#7
  3156. # asm 2: vsub.i64 >h6=q5,<h6=q5,<t6=q6
  3157. vsub.i64 q5,q5,q6
  3158. # qhasm: posh+=16
  3159. # asm 1: add >posh=int32#3,<posh=int32#3,#16
  3160. # asm 2: add >posh=r2,<posh=r2,#16
  3161. add r2,r2,#16
  3162. # qhasm: 2x h0 -= t0
  3163. # asm 1: vsub.i64 >h0=reg128#1,<h0=reg128#1,<t0=reg128#5
  3164. # asm 2: vsub.i64 >h0=q0,<h0=q0,<t0=q4
  3165. vsub.i64 q0,q0,q4
  3166. # qhasm: mem64[posh] aligned= h8[0]
  3167. # asm 1: vst1.8 <h8=reg128#3%bot,[<posh=int32#3,: 64]
  3168. # asm 2: vst1.8 <h8=d4,[<posh=r2,: 64]
  3169. vst1.8 d4,[r2,: 64]
  3170. # qhasm: posH+=16
  3171. # asm 1: add >posH=int32#5,<posH=int32#5,#16
  3172. # asm 2: add >posH=r4,<posH=r4,#16
  3173. add r4,r4,#16
  3174. # qhasm: mem64[posH] aligned= h8[1]
  3175. # asm 1: vst1.8 <h8=reg128#3%top,[<posH=int32#5,: 64]
  3176. # asm 2: vst1.8 <h8=d5,[<posH=r4,: 64]
  3177. vst1.8 d5,[r4,: 64]
  3178. # qhasm: h6 h7 = h6[0]h7[0]h6[2]h6[3] h6[1]h7[1]h7[2]h7[3]
  3179. # asm 1: vtrn.32 <h6=reg128#6%bot,<h7=reg128#4%bot
  3180. # asm 2: vtrn.32 <h6=d10,<h7=d6
  3181. vtrn.32 d10,d6
  3182. # qhasm: h6 h7 = h6[0]h6[1]h6[2]h7[2] h7[0]h7[1]h6[3]h7[3]
  3183. # asm 1: vtrn.32 <h6=reg128#6%top,<h7=reg128#4%top
  3184. # asm 2: vtrn.32 <h6=d11,<h7=d7
  3185. vtrn.32 d11,d7
  3186. # qhasm: posh-=8
  3187. # asm 1: sub >posh=int32#3,<posh=int32#3,#8
  3188. # asm 2: sub >posh=r2,<posh=r2,#8
  3189. sub r2,r2,#8
  3190. # qhasm: posH-=8
  3191. # asm 1: sub >posH=int32#5,<posH=int32#5,#8
  3192. # asm 2: sub >posH=r4,<posH=r4,#8
  3193. sub r4,r4,#8
  3194. # qhasm: h0 h1 = h0[0]h1[0]h0[2]h0[3] h0[1]h1[1]h1[2]h1[3]
  3195. # asm 1: vtrn.32 <h0=reg128#1%bot,<h1=reg128#2%bot
  3196. # asm 2: vtrn.32 <h0=d0,<h1=d2
  3197. vtrn.32 d0,d2
  3198. # qhasm: h0 h1 = h0[0]h0[1]h0[2]h1[2] h1[0]h1[1]h0[3]h1[3]
  3199. # asm 1: vtrn.32 <h0=reg128#1%top,<h1=reg128#2%top
  3200. # asm 2: vtrn.32 <h0=d1,<h1=d3
  3201. vtrn.32 d1,d3
  3202. # qhasm: mem64[posh] aligned= h6[0]
  3203. # asm 1: vst1.8 <h6=reg128#6%bot,[<posh=int32#3,: 64]
  3204. # asm 2: vst1.8 <h6=d10,[<posh=r2,: 64]
  3205. vst1.8 d10,[r2,: 64]
  3206. # qhasm: mem64[posH] aligned= h6[1]
  3207. # asm 1: vst1.8 <h6=reg128#6%top,[<posH=int32#5,: 64]
  3208. # asm 2: vst1.8 <h6=d11,[<posH=r4,: 64]
  3209. vst1.8 d11,[r4,: 64]
  3210. # qhasm: posh-=24
  3211. # asm 1: sub >posh=int32#3,<posh=int32#3,#24
  3212. # asm 2: sub >posh=r2,<posh=r2,#24
  3213. sub r2,r2,#24
  3214. # qhasm: posH-=24
  3215. # asm 1: sub >posH=int32#5,<posH=int32#5,#24
  3216. # asm 2: sub >posH=r4,<posH=r4,#24
  3217. sub r4,r4,#24
  3218. # qhasm: mem64[posh] aligned= h0[0]
  3219. # asm 1: vst1.8 <h0=reg128#1%bot,[<posh=int32#3,: 64]
  3220. # asm 2: vst1.8 <h0=d0,[<posh=r2,: 64]
  3221. vst1.8 d0,[r2,: 64]
  3222. # qhasm: mem64[posH] aligned= h0[1]
  3223. # asm 1: vst1.8 <h0=reg128#1%top,[<posH=int32#5,: 64]
  3224. # asm 2: vst1.8 <h0=d1,[<posH=r4,: 64]
  3225. vst1.8 d1,[r4,: 64]
  3226. # qhasm: pos1 = playground1_ptr + 288
  3227. # asm 1: add >pos1=int32#3,<playground1_ptr=int32#4,#288
  3228. # asm 2: add >pos1=r2,<playground1_ptr=r3,#288
  3229. add r2,r3,#288
  3230. # qhasm: pos2 = playground1_ptr + 336
  3231. # asm 1: add >pos2=int32#5,<playground1_ptr=int32#4,#336
  3232. # asm 2: add >pos2=r4,<playground1_ptr=r3,#336
  3233. add r4,r3,#336
  3234. # qhasm: f0 aligned= mem128[pos1];pos1 += 16
  3235. # asm 1: vld1.8 {>f0=reg128#1%bot->f0=reg128#1%top},[<pos1=int32#3,: 128]!
  3236. # asm 2: vld1.8 {>f0=d0->f0=d1},[<pos1=r2,: 128]!
  3237. vld1.8 {d0-d1},[r2,: 128]!
  3238. # qhasm: g0 aligned= mem128[pos2];pos2 += 16
  3239. # asm 1: vld1.8 {>g0=reg128#2%bot->g0=reg128#2%top},[<pos2=int32#5,: 128]!
  3240. # asm 2: vld1.8 {>g0=d2->g0=d3},[<pos2=r4,: 128]!
  3241. vld1.8 {d2-d3},[r4,: 128]!
  3242. # qhasm: 4x f0 -= g0
  3243. # asm 1: vsub.i32 >f0=reg128#1,<f0=reg128#1,<g0=reg128#2
  3244. # asm 2: vsub.i32 >f0=q0,<f0=q0,<g0=q1
  3245. vsub.i32 q0,q0,q1
  3246. # qhasm: f4 aligned= mem128[pos1];pos1 += 16
  3247. # asm 1: vld1.8 {>f4=reg128#2%bot->f4=reg128#2%top},[<pos1=int32#3,: 128]!
  3248. # asm 2: vld1.8 {>f4=d2->f4=d3},[<pos1=r2,: 128]!
  3249. vld1.8 {d2-d3},[r2,: 128]!
  3250. # qhasm: g4 aligned= mem128[pos2];pos2 += 16
  3251. # asm 1: vld1.8 {>g4=reg128#3%bot->g4=reg128#3%top},[<pos2=int32#5,: 128]!
  3252. # asm 2: vld1.8 {>g4=d4->g4=d5},[<pos2=r4,: 128]!
  3253. vld1.8 {d4-d5},[r4,: 128]!
  3254. # qhasm: 4x f4 -= g4
  3255. # asm 1: vsub.i32 >f4=reg128#2,<f4=reg128#2,<g4=reg128#3
  3256. # asm 2: vsub.i32 >f4=q1,<f4=q1,<g4=q2
  3257. vsub.i32 q1,q1,q2
  3258. # qhasm: pos0 = playground1_ptr + 240
  3259. # asm 1: add >pos0=int32#6,<playground1_ptr=int32#4,#240
  3260. # asm 2: add >pos0=r5,<playground1_ptr=r3,#240
  3261. add r5,r3,#240
  3262. # qhasm: new f8
  3263. # qhasm: new g8
  3264. # qhasm: f8 aligned= mem64[pos1] f8[1]
  3265. # asm 1: vld1.8 {<f8=reg128#3%bot},[<pos1=int32#3,: 64]
  3266. # asm 2: vld1.8 {<f8=d4},[<pos1=r2,: 64]
  3267. vld1.8 {d4},[r2,: 64]
  3268. # qhasm: g8 aligned= mem64[pos2] g8[1]
  3269. # asm 1: vld1.8 {<g8=reg128#4%bot},[<pos2=int32#5,: 64]
  3270. # asm 2: vld1.8 {<g8=d6},[<pos2=r4,: 64]
  3271. vld1.8 {d6},[r4,: 64]
  3272. # qhasm: 4x f8 -= g8
  3273. # asm 1: vsub.i32 >f8=reg128#3,<f8=reg128#3,<g8=reg128#4
  3274. # asm 2: vsub.i32 >f8=q2,<f8=q2,<g8=q3
  3275. vsub.i32 q2,q2,q3
  3276. # qhasm: mem128[pos0] aligned= f0;pos0 += 16
  3277. # asm 1: vst1.8 {<f0=reg128#1%bot-<f0=reg128#1%top},[<pos0=int32#6,: 128]!
  3278. # asm 2: vst1.8 {<f0=d0-<f0=d1},[<pos0=r5,: 128]!
  3279. vst1.8 {d0-d1},[r5,: 128]!
  3280. # qhasm: mem128[pos0] aligned= f4;pos0 += 16
  3281. # asm 1: vst1.8 {<f4=reg128#2%bot-<f4=reg128#2%top},[<pos0=int32#6,: 128]!
  3282. # asm 2: vst1.8 {<f4=d2-<f4=d3},[<pos0=r5,: 128]!
  3283. vst1.8 {d2-d3},[r5,: 128]!
  3284. # qhasm: mem64[pos0] aligned= f8[0]
  3285. # asm 1: vst1.8 <f8=reg128#3%bot,[<pos0=int32#6,: 64]
  3286. # asm 2: vst1.8 <f8=d4,[<pos0=r5,: 64]
  3287. vst1.8 d4,[r5,: 64]
  3288. # qhasm: pos1 = playground1_ptr + 144
  3289. # asm 1: add >pos1=int32#3,<playground1_ptr=int32#4,#144
  3290. # asm 2: add >pos1=r2,<playground1_ptr=r3,#144
  3291. add r2,r3,#144
  3292. # qhasm: pos2 = playground1_ptr + 96
  3293. # asm 1: add >pos2=int32#5,<playground1_ptr=int32#4,#96
  3294. # asm 2: add >pos2=r4,<playground1_ptr=r3,#96
  3295. add r4,r3,#96
  3296. # qhasm: pos3 = playground1_ptr + 144
  3297. # asm 1: add >pos3=int32#6,<playground1_ptr=int32#4,#144
  3298. # asm 2: add >pos3=r5,<playground1_ptr=r3,#144
  3299. add r5,r3,#144
  3300. # qhasm: pos0 = playground1_ptr + 192
  3301. # asm 1: add >pos0=int32#7,<playground1_ptr=int32#4,#192
  3302. # asm 2: add >pos0=r6,<playground1_ptr=r3,#192
  3303. add r6,r3,#192
  3304. # qhasm: f0 aligned= mem128[pos1];pos1 += 16
  3305. # asm 1: vld1.8 {>f0=reg128#1%bot->f0=reg128#1%top},[<pos1=int32#3,: 128]!
  3306. # asm 2: vld1.8 {>f0=d0->f0=d1},[<pos1=r2,: 128]!
  3307. vld1.8 {d0-d1},[r2,: 128]!
  3308. # qhasm: g0 aligned= mem128[pos2];pos2 += 16
  3309. # asm 1: vld1.8 {>g0=reg128#2%bot->g0=reg128#2%top},[<pos2=int32#5,: 128]!
  3310. # asm 2: vld1.8 {>g0=d2->g0=d3},[<pos2=r4,: 128]!
  3311. vld1.8 {d2-d3},[r4,: 128]!
  3312. # qhasm: 4x d0 = f0 - g0
  3313. # asm 1: vsub.i32 >d0=reg128#3,<f0=reg128#1,<g0=reg128#2
  3314. # asm 2: vsub.i32 >d0=q2,<f0=q0,<g0=q1
  3315. vsub.i32 q2,q0,q1
  3316. # qhasm: 4x f0 += g0
  3317. # asm 1: vadd.i32 >f0=reg128#1,<f0=reg128#1,<g0=reg128#2
  3318. # asm 2: vadd.i32 >f0=q0,<f0=q0,<g0=q1
  3319. vadd.i32 q0,q0,q1
  3320. # qhasm: f4 aligned= mem128[pos1];pos1 += 16
  3321. # asm 1: vld1.8 {>f4=reg128#2%bot->f4=reg128#2%top},[<pos1=int32#3,: 128]!
  3322. # asm 2: vld1.8 {>f4=d2->f4=d3},[<pos1=r2,: 128]!
  3323. vld1.8 {d2-d3},[r2,: 128]!
  3324. # qhasm: g4 aligned= mem128[pos2];pos2 += 16
  3325. # asm 1: vld1.8 {>g4=reg128#4%bot->g4=reg128#4%top},[<pos2=int32#5,: 128]!
  3326. # asm 2: vld1.8 {>g4=d6->g4=d7},[<pos2=r4,: 128]!
  3327. vld1.8 {d6-d7},[r4,: 128]!
  3328. # qhasm: 4x d4 = f4 - g4
  3329. # asm 1: vsub.i32 >d4=reg128#5,<f4=reg128#2,<g4=reg128#4
  3330. # asm 2: vsub.i32 >d4=q4,<f4=q1,<g4=q3
  3331. vsub.i32 q4,q1,q3
  3332. # qhasm: 4x f4 += g4
  3333. # asm 1: vadd.i32 >f4=reg128#2,<f4=reg128#2,<g4=reg128#4
  3334. # asm 2: vadd.i32 >f4=q1,<f4=q1,<g4=q3
  3335. vadd.i32 q1,q1,q3
  3336. # qhasm: new f8
  3337. # qhasm: f8 aligned= mem64[pos1] f8[1]
  3338. # asm 1: vld1.8 {<f8=reg128#4%bot},[<pos1=int32#3,: 64]
  3339. # asm 2: vld1.8 {<f8=d6},[<pos1=r2,: 64]
  3340. vld1.8 {d6},[r2,: 64]
  3341. # qhasm: new g8
  3342. # qhasm: g8 aligned= mem64[pos2] g8[1]
  3343. # asm 1: vld1.8 {<g8=reg128#6%bot},[<pos2=int32#5,: 64]
  3344. # asm 2: vld1.8 {<g8=d10},[<pos2=r4,: 64]
  3345. vld1.8 {d10},[r4,: 64]
  3346. # qhasm: 4x d8 = f8 - g8
  3347. # asm 1: vsub.i32 >d8=reg128#7,<f8=reg128#4,<g8=reg128#6
  3348. # asm 2: vsub.i32 >d8=q6,<f8=q3,<g8=q5
  3349. vsub.i32 q6,q3,q5
  3350. # qhasm: 4x f8 += g8
  3351. # asm 1: vadd.i32 >f8=reg128#4,<f8=reg128#4,<g8=reg128#6
  3352. # asm 2: vadd.i32 >f8=q3,<f8=q3,<g8=q5
  3353. vadd.i32 q3,q3,q5
  3354. # qhasm: mem128[pos3] aligned= d0;pos3 += 16
  3355. # asm 1: vst1.8 {<d0=reg128#3%bot-<d0=reg128#3%top},[<pos3=int32#6,: 128]!
  3356. # asm 2: vst1.8 {<d0=d4-<d0=d5},[<pos3=r5,: 128]!
  3357. vst1.8 {d4-d5},[r5,: 128]!
  3358. # qhasm: mem128[pos0] aligned= f0;pos0 += 16
  3359. # asm 1: vst1.8 {<f0=reg128#1%bot-<f0=reg128#1%top},[<pos0=int32#7,: 128]!
  3360. # asm 2: vst1.8 {<f0=d0-<f0=d1},[<pos0=r6,: 128]!
  3361. vst1.8 {d0-d1},[r6,: 128]!
  3362. # qhasm: mem128[pos3] aligned= d4;pos3 += 16
  3363. # asm 1: vst1.8 {<d4=reg128#5%bot-<d4=reg128#5%top},[<pos3=int32#6,: 128]!
  3364. # asm 2: vst1.8 {<d4=d8-<d4=d9},[<pos3=r5,: 128]!
  3365. vst1.8 {d8-d9},[r5,: 128]!
  3366. # qhasm: mem128[pos0] aligned= f4;pos0 += 16
  3367. # asm 1: vst1.8 {<f4=reg128#2%bot-<f4=reg128#2%top},[<pos0=int32#7,: 128]!
  3368. # asm 2: vst1.8 {<f4=d2-<f4=d3},[<pos0=r6,: 128]!
  3369. vst1.8 {d2-d3},[r6,: 128]!
  3370. # qhasm: mem64[pos3] aligned= d8[0]
  3371. # asm 1: vst1.8 <d8=reg128#7%bot,[<pos3=int32#6,: 64]
  3372. # asm 2: vst1.8 <d8=d12,[<pos3=r5,: 64]
  3373. vst1.8 d12,[r5,: 64]
  3374. # qhasm: mem64[pos0] aligned= f8[0]
  3375. # asm 1: vst1.8 <f8=reg128#4%bot,[<pos0=int32#7,: 64]
  3376. # asm 2: vst1.8 <f8=d6,[<pos0=r6,: 64]
  3377. vst1.8 d6,[r6,: 64]
  3378. # qhasm: posf = playground1_ptr + 0
  3379. # asm 1: add >posf=int32#3,<playground1_ptr=int32#4,#0
  3380. # asm 2: add >posf=r2,<playground1_ptr=r3,#0
  3381. add r2,r3,#0
  3382. # qhasm: posg = playground1_ptr + 240
  3383. # asm 1: add >posg=int32#5,<playground1_ptr=int32#4,#240
  3384. # asm 2: add >posg=r4,<playground1_ptr=r3,#240
  3385. add r4,r3,#240
  3386. # qhasm: g02 aligned= mem128[posg];posg += 16
  3387. # asm 1: vld1.8 {>g02=reg128#1%bot->g02=reg128#1%top},[<posg=int32#5,: 128]!
  3388. # asm 2: vld1.8 {>g02=d0->g02=d1},[<posg=r4,: 128]!
  3389. vld1.8 {d0-d1},[r4,: 128]!
  3390. # qhasm: g46 aligned= mem128[posg];posg += 16
  3391. # asm 1: vld1.8 {>g46=reg128#2%bot->g46=reg128#2%top},[<posg=int32#5,: 128]!
  3392. # asm 2: vld1.8 {>g46=d2->g46=d3},[<posg=r4,: 128]!
  3393. vld1.8 {d2-d3},[r4,: 128]!
  3394. # qhasm: new g89
  3395. # qhasm: g89 aligned= mem64[posg] g89[1]
  3396. # asm 1: vld1.8 {<g89=reg128#3%bot},[<posg=int32#5,: 64]
  3397. # asm 2: vld1.8 {<g89=d4},[<posg=r4,: 64]
  3398. vld1.8 {d4},[r4,: 64]
  3399. # qhasm: posG = playground1_ptr + 336
  3400. # asm 1: add >posG=int32#5,<playground1_ptr=int32#4,#336
  3401. # asm 2: add >posG=r4,<playground1_ptr=r3,#336
  3402. add r4,r3,#336
  3403. # qhasm: g13 aligned= mem128[posG];posG += 16
  3404. # asm 1: vld1.8 {>g13=reg128#4%bot->g13=reg128#4%top},[<posG=int32#5,: 128]!
  3405. # asm 2: vld1.8 {>g13=d6->g13=d7},[<posG=r4,: 128]!
  3406. vld1.8 {d6-d7},[r4,: 128]!
  3407. # qhasm: g02 g13 = g02[0]g13[0] g02[2]g13[2] g02[1]g13[1] g02[3]g13[3]
  3408. # asm 1: vtrn.32 <g02=reg128#1,<g13=reg128#4
  3409. # asm 2: vtrn.32 <g02=q0,<g13=q3
  3410. vtrn.32 q0,q3
  3411. # qhasm: g57 aligned= mem128[posG];posG += 16
  3412. # asm 1: vld1.8 {>g57=reg128#5%bot->g57=reg128#5%top},[<posG=int32#5,: 128]!
  3413. # asm 2: vld1.8 {>g57=d8->g57=d9},[<posG=r4,: 128]!
  3414. vld1.8 {d8-d9},[r4,: 128]!
  3415. # qhasm: 4x mix = g02 << 4
  3416. # asm 1: vshl.i32 >mix=reg128#6,<g02=reg128#1,#4
  3417. # asm 2: vshl.i32 >mix=q5,<g02=q0,#4
  3418. vshl.i32 q5,q0,#4
  3419. # qhasm: g46 g57 = g46[0]g57[0] g46[2]g57[2] g46[1]g57[1] g46[3]g57[3]
  3420. # asm 1: vtrn.32 <g46=reg128#2,<g57=reg128#5
  3421. # asm 2: vtrn.32 <g46=q1,<g57=q4
  3422. vtrn.32 q1,q4
  3423. # qhasm: 4x g13_19 = g13 << 4
  3424. # asm 1: vshl.i32 >g13_19=reg128#7,<g13=reg128#4,#4
  3425. # asm 2: vshl.i32 >g13_19=q6,<g13=q3,#4
  3426. vshl.i32 q6,q3,#4
  3427. # qhasm: 4x mix += g02
  3428. # asm 1: vadd.i32 >mix=reg128#6,<mix=reg128#6,<g02=reg128#1
  3429. # asm 2: vadd.i32 >mix=q5,<mix=q5,<g02=q0
  3430. vadd.i32 q5,q5,q0
  3431. # qhasm: 4x g13_19 += g13
  3432. # asm 1: vadd.i32 >g13_19=reg128#7,<g13_19=reg128#7,<g13=reg128#4
  3433. # asm 2: vadd.i32 >g13_19=q6,<g13_19=q6,<g13=q3
  3434. vadd.i32 q6,q6,q3
  3435. # qhasm: 4x g46_19 = g46 << 4
  3436. # asm 1: vshl.i32 >g46_19=reg128#8,<g46=reg128#2,#4
  3437. # asm 2: vshl.i32 >g46_19=q7,<g46=q1,#4
  3438. vshl.i32 q7,q1,#4
  3439. # qhasm: g89 aligned= g89[0] mem64[posG]
  3440. # asm 1: vld1.8 {<g89=reg128#3%top},[<posG=int32#5,: 64]
  3441. # asm 2: vld1.8 {<g89=d5},[<posG=r4,: 64]
  3442. vld1.8 {d5},[r4,: 64]
  3443. # qhasm: 4x g57_19 = g57 << 4
  3444. # asm 1: vshl.i32 >g57_19=reg128#9,<g57=reg128#5,#4
  3445. # asm 2: vshl.i32 >g57_19=q8,<g57=q4,#4
  3446. vshl.i32 q8,q4,#4
  3447. # qhasm: g89 = g89[0] g89[2] g89[1] g89[3]
  3448. # asm 1: vtrn.32 <g89=reg128#3%bot,<g89=reg128#3%top
  3449. # asm 2: vtrn.32 <g89=d4,<g89=d5
  3450. vtrn.32 d4,d5
  3451. # qhasm: 4x g46_19 += g46
  3452. # asm 1: vadd.i32 >g46_19=reg128#8,<g46_19=reg128#8,<g46=reg128#2
  3453. # asm 2: vadd.i32 >g46_19=q7,<g46_19=q7,<g46=q1
  3454. vadd.i32 q7,q7,q1
  3455. # qhasm: 4x g57_19 += g57
  3456. # asm 1: vadd.i32 >g57_19=reg128#9,<g57_19=reg128#9,<g57=reg128#5
  3457. # asm 2: vadd.i32 >g57_19=q8,<g57_19=q8,<g57=q4
  3458. vadd.i32 q8,q8,q4
  3459. # qhasm: f02 aligned= mem128[posf];posf += 16
  3460. # asm 1: vld1.8 {>f02=reg128#10%bot->f02=reg128#10%top},[<posf=int32#3,: 128]!
  3461. # asm 2: vld1.8 {>f02=d18->f02=d19},[<posf=r2,: 128]!
  3462. vld1.8 {d18-d19},[r2,: 128]!
  3463. # qhasm: 4x g89_19 = g89 << 4
  3464. # asm 1: vshl.i32 >g89_19=reg128#11,<g89=reg128#3,#4
  3465. # asm 2: vshl.i32 >g89_19=q10,<g89=q2,#4
  3466. vshl.i32 q10,q2,#4
  3467. # qhasm: f46 aligned= mem128[posf];posf += 16
  3468. # asm 1: vld1.8 {>f46=reg128#12%bot->f46=reg128#12%top},[<posf=int32#3,: 128]!
  3469. # asm 2: vld1.8 {>f46=d22->f46=d23},[<posf=r2,: 128]!
  3470. vld1.8 {d22-d23},[r2,: 128]!
  3471. # qhasm: 4x g89_19 += g89
  3472. # asm 1: vadd.i32 >g89_19=reg128#11,<g89_19=reg128#11,<g89=reg128#3
  3473. # asm 2: vadd.i32 >g89_19=q10,<g89_19=q10,<g89=q2
  3474. vadd.i32 q10,q10,q2
  3475. # qhasm: new f89
  3476. # qhasm: f89 aligned= mem64[posf] f89[1]
  3477. # asm 1: vld1.8 {<f89=reg128#13%bot},[<posf=int32#3,: 64]
  3478. # asm 2: vld1.8 {<f89=d24},[<posf=r2,: 64]
  3479. vld1.8 {d24},[r2,: 64]
  3480. # qhasm: 4x mix += g02
  3481. # asm 1: vadd.i32 >mix=reg128#6,<mix=reg128#6,<g02=reg128#1
  3482. # asm 2: vadd.i32 >mix=q5,<mix=q5,<g02=q0
  3483. vadd.i32 q5,q5,q0
  3484. # qhasm: posF = playground1_ptr + 288
  3485. # asm 1: add >posF=int32#3,<playground1_ptr=int32#4,#288
  3486. # asm 2: add >posF=r2,<playground1_ptr=r3,#288
  3487. add r2,r3,#288
  3488. # qhasm: f13 aligned= mem128[posF];posF += 16
  3489. # asm 1: vld1.8 {>f13=reg128#14%bot->f13=reg128#14%top},[<posF=int32#3,: 128]!
  3490. # asm 2: vld1.8 {>f13=d26->f13=d27},[<posF=r2,: 128]!
  3491. vld1.8 {d26-d27},[r2,: 128]!
  3492. # qhasm: 4x g13_19 += g13
  3493. # asm 1: vadd.i32 >g13_19=reg128#7,<g13_19=reg128#7,<g13=reg128#4
  3494. # asm 2: vadd.i32 >g13_19=q6,<g13_19=q6,<g13=q3
  3495. vadd.i32 q6,q6,q3
  3496. # qhasm: f57 aligned= mem128[posF];posF += 16
  3497. # asm 1: vld1.8 {>f57=reg128#15%bot->f57=reg128#15%top},[<posF=int32#3,: 128]!
  3498. # asm 2: vld1.8 {>f57=d28->f57=d29},[<posF=r2,: 128]!
  3499. vld1.8 {d28-d29},[r2,: 128]!
  3500. # qhasm: 4x g57_19 += g57
  3501. # asm 1: vadd.i32 >g57_19=reg128#9,<g57_19=reg128#9,<g57=reg128#5
  3502. # asm 2: vadd.i32 >g57_19=q8,<g57_19=q8,<g57=q4
  3503. vadd.i32 q8,q8,q4
  3504. # qhasm: f89 aligned= f89[0] mem64[posF]
  3505. # asm 1: vld1.8 {<f89=reg128#13%top},[<posF=int32#3,: 64]
  3506. # asm 2: vld1.8 {<f89=d25},[<posF=r2,: 64]
  3507. vld1.8 {d25},[r2,: 64]
  3508. # qhasm: 4x g89_19 += g89
  3509. # asm 1: vadd.i32 >g89_19=reg128#11,<g89_19=reg128#11,<g89=reg128#3
  3510. # asm 2: vadd.i32 >g89_19=q10,<g89_19=q10,<g89=q2
  3511. vadd.i32 q10,q10,q2
  3512. # qhasm: f02 f13 = f02[0]f13[0] f02[2]f13[2] f02[1]f13[1] f02[3]f13[3]
  3513. # asm 1: vtrn.32 <f02=reg128#10,<f13=reg128#14
  3514. # asm 2: vtrn.32 <f02=q9,<f13=q13
  3515. vtrn.32 q9,q13
  3516. # qhasm: 4x g46_19 += g46
  3517. # asm 1: vadd.i32 >g46_19=reg128#8,<g46_19=reg128#8,<g46=reg128#2
  3518. # asm 2: vadd.i32 >g46_19=q7,<g46_19=q7,<g46=q1
  3519. vadd.i32 q7,q7,q1
  3520. # qhasm: 4x mix += g02
  3521. # asm 1: vadd.i32 >mix=reg128#6,<mix=reg128#6,<g02=reg128#1
  3522. # asm 2: vadd.i32 >mix=q5,<mix=q5,<g02=q0
  3523. vadd.i32 q5,q5,q0
  3524. # qhasm: f46 f57 = f46[0]f57[0] f46[2]f57[2] f46[1]f57[1] f46[3]f57[3]
  3525. # asm 1: vtrn.32 <f46=reg128#12,<f57=reg128#15
  3526. # asm 2: vtrn.32 <f46=q11,<f57=q14
  3527. vtrn.32 q11,q14
  3528. # qhasm: 4x g13_19 += g13
  3529. # asm 1: vadd.i32 >g13_19=reg128#7,<g13_19=reg128#7,<g13=reg128#4
  3530. # asm 2: vadd.i32 >g13_19=q6,<g13_19=q6,<g13=q3
  3531. vadd.i32 q6,q6,q3
  3532. # qhasm: new g13_19_stack
  3533. # qhasm: ptr = &g13_19_stack
  3534. # asm 1: lea >ptr=int32#3,<g13_19_stack=stack128#4
  3535. # asm 2: lea >ptr=r2,<g13_19_stack=[sp,#560]
  3536. add r2,sp,#560
  3537. # qhasm: 4x g89_19 += g89
  3538. # asm 1: vadd.i32 >g89_19=reg128#11,<g89_19=reg128#11,<g89=reg128#3
  3539. # asm 2: vadd.i32 >g89_19=q10,<g89_19=q10,<g89=q2
  3540. vadd.i32 q10,q10,q2
  3541. # qhasm: f89 = f89[0] f89[2] f89[1] f89[3]
  3542. # asm 1: vtrn.32 <f89=reg128#13%bot,<f89=reg128#13%top
  3543. # asm 2: vtrn.32 <f89=d24,<f89=d25
  3544. vtrn.32 d24,d25
  3545. # qhasm: mem128[ptr] aligned= g13_19
  3546. # asm 1: vst1.8 {<g13_19=reg128#7%bot-<g13_19=reg128#7%top},[<ptr=int32#3,: 128]
  3547. # asm 2: vst1.8 {<g13_19=d12-<g13_19=d13},[<ptr=r2,: 128]
  3548. vst1.8 {d12-d13},[r2,: 128]
  3549. # qhasm: 4x f13_2 = f13 << 1
  3550. # asm 1: vshl.i32 >f13_2=reg128#7,<f13=reg128#14,#1
  3551. # asm 2: vshl.i32 >f13_2=q6,<f13=q13,#1
  3552. vshl.i32 q6,q13,#1
  3553. # qhasm: new g89_19_stack
  3554. # qhasm: ptr = &g89_19_stack
  3555. # asm 1: lea >ptr=int32#3,<g89_19_stack=stack128#5
  3556. # asm 2: lea >ptr=r2,<g89_19_stack=[sp,#576]
  3557. add r2,sp,#576
  3558. # qhasm: mem128[ptr] aligned= g89_19
  3559. # asm 1: vst1.8 {<g89_19=reg128#11%bot-<g89_19=reg128#11%top},[<ptr=int32#3,: 128]
  3560. # asm 2: vst1.8 {<g89_19=d20-<g89_19=d21},[<ptr=r2,: 128]
  3561. vst1.8 {d20-d21},[r2,: 128]
  3562. # qhasm: 4x f57_2 = f57 << 1
  3563. # asm 1: vshl.i32 >f57_2=reg128#11,<f57=reg128#15,#1
  3564. # asm 2: vshl.i32 >f57_2=q10,<f57=q14,#1
  3565. vshl.i32 q10,q14,#1
  3566. # qhasm: new f13_2_stack
  3567. # qhasm: ptr = &f13_2_stack
  3568. # asm 1: lea >ptr=int32#3,<f13_2_stack=stack128#6
  3569. # asm 2: lea >ptr=r2,<f13_2_stack=[sp,#592]
  3570. add r2,sp,#592
  3571. # qhasm: mem128[ptr] aligned= f13_2
  3572. # asm 1: vst1.8 {<f13_2=reg128#7%bot-<f13_2=reg128#7%top},[<ptr=int32#3,: 128]
  3573. # asm 2: vst1.8 {<f13_2=d12-<f13_2=d13},[<ptr=r2,: 128]
  3574. vst1.8 {d12-d13},[r2,: 128]
  3575. # qhasm: 4x f89_2 = f89 << 1
  3576. # asm 1: vshl.i32 >f89_2=reg128#16,<f89=reg128#13,#1
  3577. # asm 2: vshl.i32 >f89_2=q15,<f89=q12,#1
  3578. vshl.i32 q15,q12,#1
  3579. # qhasm: 4x g57_19 += g57
  3580. # asm 1: vadd.i32 >g57_19=reg128#9,<g57_19=reg128#9,<g57=reg128#5
  3581. # asm 2: vadd.i32 >g57_19=q8,<g57_19=q8,<g57=q4
  3582. vadd.i32 q8,q8,q4
  3583. # qhasm: mix = f89_2[2,3] mix[2,3]
  3584. # asm 1: vext.32 <mix=reg128#6%bot,<f89_2=reg128#16%top,<f89_2=reg128#16%bot,#0
  3585. # asm 2: vext.32 <mix=d10,<f89_2=d31,<f89_2=d30,#0
  3586. vext.32 d10,d31,d30,#0
  3587. # qhasm: 4x g46_19 += g46
  3588. # asm 1: vadd.i32 >g46_19=reg128#8,<g46_19=reg128#8,<g46=reg128#2
  3589. # asm 2: vadd.i32 >g46_19=q7,<g46_19=q7,<g46=q1
  3590. vadd.i32 q7,q7,q1
  3591. # qhasm: new g57_19_stack
  3592. # qhasm: ptr = &g57_19_stack
  3593. # asm 1: lea >ptr=int32#3,<g57_19_stack=stack128#7
  3594. # asm 2: lea >ptr=r2,<g57_19_stack=[sp,#608]
  3595. add r2,sp,#608
  3596. # qhasm: mem128[ptr] aligned= g57_19
  3597. # asm 1: vst1.8 {<g57_19=reg128#9%bot-<g57_19=reg128#9%top},[<ptr=int32#3,: 128]
  3598. # asm 2: vst1.8 {<g57_19=d16-<g57_19=d17},[<ptr=r2,: 128]
  3599. vst1.8 {d16-d17},[r2,: 128]
  3600. # qhasm: h9[0,1] = f02[0] signed* g89[2];h9[2,3] = f02[1] signed* g89[3]
  3601. # asm 1: vmull.s32 >h9=reg128#9,<f02=reg128#10%bot,<g89=reg128#3%top
  3602. # asm 2: vmull.s32 >h9=q8,<f02=d18,<g89=d5
  3603. vmull.s32 q8,d18,d5
  3604. # qhasm: h9[0,1] += f13[0] signed* g89[0];h9[2,3] += f13[1] signed* g89[1]
  3605. # asm 1: vmlal.s32 <h9=reg128#9,<f13=reg128#14%bot,<g89=reg128#3%bot
  3606. # asm 2: vmlal.s32 <h9=q8,<f13=d26,<g89=d4
  3607. vmlal.s32 q8,d26,d4
  3608. # qhasm: h9[0,1] += f02[2] signed* g57[2];h9[2,3] += f02[3] signed* g57[3]
  3609. # asm 1: vmlal.s32 <h9=reg128#9,<f02=reg128#10%top,<g57=reg128#5%top
  3610. # asm 2: vmlal.s32 <h9=q8,<f02=d19,<g57=d9
  3611. vmlal.s32 q8,d19,d9
  3612. # qhasm: h9[0,1] += f13[2] signed* g46[2];h9[2,3] += f13[3] signed* g46[3]
  3613. # asm 1: vmlal.s32 <h9=reg128#9,<f13=reg128#14%top,<g46=reg128#2%top
  3614. # asm 2: vmlal.s32 <h9=q8,<f13=d27,<g46=d3
  3615. vmlal.s32 q8,d27,d3
  3616. # qhasm: h9[0,1] += f46[0] signed* g57[0];h9[2,3] += f46[1] signed* g57[1]
  3617. # asm 1: vmlal.s32 <h9=reg128#9,<f46=reg128#12%bot,<g57=reg128#5%bot
  3618. # asm 2: vmlal.s32 <h9=q8,<f46=d22,<g57=d8
  3619. vmlal.s32 q8,d22,d8
  3620. # qhasm: h9[0,1] += f57[0] signed* g46[0];h9[2,3] += f57[1] signed* g46[1]
  3621. # asm 1: vmlal.s32 <h9=reg128#9,<f57=reg128#15%bot,<g46=reg128#2%bot
  3622. # asm 2: vmlal.s32 <h9=q8,<f57=d28,<g46=d2
  3623. vmlal.s32 q8,d28,d2
  3624. # qhasm: h9[0,1] += f46[2] signed* g13[2];h9[2,3] += f46[3] signed* g13[3]
  3625. # asm 1: vmlal.s32 <h9=reg128#9,<f46=reg128#12%top,<g13=reg128#4%top
  3626. # asm 2: vmlal.s32 <h9=q8,<f46=d23,<g13=d7
  3627. vmlal.s32 q8,d23,d7
  3628. # qhasm: h9[0,1] += f57[2] signed* g02[2];h9[2,3] += f57[3] signed* g02[3]
  3629. # asm 1: vmlal.s32 <h9=reg128#9,<f57=reg128#15%top,<g02=reg128#1%top
  3630. # asm 2: vmlal.s32 <h9=q8,<f57=d29,<g02=d1
  3631. vmlal.s32 q8,d29,d1
  3632. # qhasm: h9[0,1] += f89[0] signed* g13[0];h9[2,3] += f89[1] signed* g13[1]
  3633. # asm 1: vmlal.s32 <h9=reg128#9,<f89=reg128#13%bot,<g13=reg128#4%bot
  3634. # asm 2: vmlal.s32 <h9=q8,<f89=d24,<g13=d6
  3635. vmlal.s32 q8,d24,d6
  3636. # qhasm: h9[0,1] += f89[2] signed* g02[0];h9[2,3] += f89[3] signed* g02[1]
  3637. # asm 1: vmlal.s32 <h9=reg128#9,<f89=reg128#13%top,<g02=reg128#1%bot
  3638. # asm 2: vmlal.s32 <h9=q8,<f89=d25,<g02=d0
  3639. vmlal.s32 q8,d25,d0
  3640. # qhasm: new g46_19_stack
  3641. # qhasm: ptr = &g46_19_stack
  3642. # asm 1: lea >ptr=int32#3,<g46_19_stack=stack128#8
  3643. # asm 2: lea >ptr=r2,<g46_19_stack=[sp,#624]
  3644. add r2,sp,#624
  3645. # qhasm: mem128[ptr] aligned= g46_19
  3646. # asm 1: vst1.8 {<g46_19=reg128#8%bot-<g46_19=reg128#8%top},[<ptr=int32#3,: 128]
  3647. # asm 2: vst1.8 {<g46_19=d14-<g46_19=d15},[<ptr=r2,: 128]
  3648. vst1.8 {d14-d15},[r2,: 128]
  3649. # qhasm: h8[0,1] = f02[0] signed* g89[0];h8[2,3] = f02[1] signed* g89[1]
  3650. # asm 1: vmull.s32 >h8=reg128#3,<f02=reg128#10%bot,<g89=reg128#3%bot
  3651. # asm 2: vmull.s32 >h8=q2,<f02=d18,<g89=d4
  3652. vmull.s32 q2,d18,d4
  3653. # qhasm: h8[0,1] += f13_2[0] signed* g57[2];h8[2,3] += f13_2[1] signed* g57[3]
  3654. # asm 1: vmlal.s32 <h8=reg128#3,<f13_2=reg128#7%bot,<g57=reg128#5%top
  3655. # asm 2: vmlal.s32 <h8=q2,<f13_2=d12,<g57=d9
  3656. vmlal.s32 q2,d12,d9
  3657. # qhasm: h8[0,1] += f13_2[2] signed* g57[0];h8[2,3] += f13_2[3] signed* g57[1]
  3658. # asm 1: vmlal.s32 <h8=reg128#3,<f13_2=reg128#7%top,<g57=reg128#5%bot
  3659. # asm 2: vmlal.s32 <h8=q2,<f13_2=d13,<g57=d8
  3660. vmlal.s32 q2,d13,d8
  3661. # qhasm: h8[0,1] += f02[2] signed* g46[2];h8[2,3] += f02[3] signed* g46[3]
  3662. # asm 1: vmlal.s32 <h8=reg128#3,<f02=reg128#10%top,<g46=reg128#2%top
  3663. # asm 2: vmlal.s32 <h8=q2,<f02=d19,<g46=d3
  3664. vmlal.s32 q2,d19,d3
  3665. # qhasm: h8[0,1] += f46[0] signed* g46[0];h8[2,3] += f46[1] signed* g46[1]
  3666. # asm 1: vmlal.s32 <h8=reg128#3,<f46=reg128#12%bot,<g46=reg128#2%bot
  3667. # asm 2: vmlal.s32 <h8=q2,<f46=d22,<g46=d2
  3668. vmlal.s32 q2,d22,d2
  3669. # qhasm: h8[0,1] += f46[2] signed* g02[2];h8[2,3] += f46[3] signed* g02[3]
  3670. # asm 1: vmlal.s32 <h8=reg128#3,<f46=reg128#12%top,<g02=reg128#1%top
  3671. # asm 2: vmlal.s32 <h8=q2,<f46=d23,<g02=d1
  3672. vmlal.s32 q2,d23,d1
  3673. # qhasm: h8[0,1] += f89[0] signed* g02[0];h8[2,3] += f89[1] signed* g02[1]
  3674. # asm 1: vmlal.s32 <h8=reg128#3,<f89=reg128#13%bot,<g02=reg128#1%bot
  3675. # asm 2: vmlal.s32 <h8=q2,<f89=d24,<g02=d0
  3676. vmlal.s32 q2,d24,d0
  3677. # qhasm: new f57_2_stack
  3678. # qhasm: ptr = &f57_2_stack
  3679. # asm 1: lea >ptr=int32#3,<f57_2_stack=stack128#9
  3680. # asm 2: lea >ptr=r2,<f57_2_stack=[sp,#640]
  3681. add r2,sp,#640
  3682. # qhasm: mem128[ptr] aligned= f57_2
  3683. # asm 1: vst1.8 {<f57_2=reg128#11%bot-<f57_2=reg128#11%top},[<ptr=int32#3,: 128]
  3684. # asm 2: vst1.8 {<f57_2=d20-<f57_2=d21},[<ptr=r2,: 128]
  3685. vst1.8 {d20-d21},[r2,: 128]
  3686. # qhasm: h7[0,1] = f02[0] signed* g57[2];h7[2,3] = f02[1] signed* g57[3]
  3687. # asm 1: vmull.s32 >h7=reg128#8,<f02=reg128#10%bot,<g57=reg128#5%top
  3688. # asm 2: vmull.s32 >h7=q7,<f02=d18,<g57=d9
  3689. vmull.s32 q7,d18,d9
  3690. # qhasm: h7[0,1] += f13[0] signed* g46[2];h7[2,3] += f13[1] signed* g46[3]
  3691. # asm 1: vmlal.s32 <h7=reg128#8,<f13=reg128#14%bot,<g46=reg128#2%top
  3692. # asm 2: vmlal.s32 <h7=q7,<f13=d26,<g46=d3
  3693. vmlal.s32 q7,d26,d3
  3694. # qhasm: h7[0,1] += f02[2] signed* g57[0];h7[2,3] += f02[3] signed* g57[1]
  3695. # asm 1: vmlal.s32 <h7=reg128#8,<f02=reg128#10%top,<g57=reg128#5%bot
  3696. # asm 2: vmlal.s32 <h7=q7,<f02=d19,<g57=d8
  3697. vmlal.s32 q7,d19,d8
  3698. # qhasm: h7[0,1] += f13[2] signed* g46[0];h7[2,3] += f13[3] signed* g46[1]
  3699. # asm 1: vmlal.s32 <h7=reg128#8,<f13=reg128#14%top,<g46=reg128#2%bot
  3700. # asm 2: vmlal.s32 <h7=q7,<f13=d27,<g46=d2
  3701. vmlal.s32 q7,d27,d2
  3702. # qhasm: h7[0,1] += f46[0] signed* g13[2];h7[2,3] += f46[1] signed* g13[3]
  3703. # asm 1: vmlal.s32 <h7=reg128#8,<f46=reg128#12%bot,<g13=reg128#4%top
  3704. # asm 2: vmlal.s32 <h7=q7,<f46=d22,<g13=d7
  3705. vmlal.s32 q7,d22,d7
  3706. # qhasm: h7[0,1] += f57[0] signed* g02[2];h7[2,3] += f57[1] signed* g02[3]
  3707. # asm 1: vmlal.s32 <h7=reg128#8,<f57=reg128#15%bot,<g02=reg128#1%top
  3708. # asm 2: vmlal.s32 <h7=q7,<f57=d28,<g02=d1
  3709. vmlal.s32 q7,d28,d1
  3710. # qhasm: h7[0,1] += f46[2] signed* g13[0];h7[2,3] += f46[3] signed* g13[1]
  3711. # asm 1: vmlal.s32 <h7=reg128#8,<f46=reg128#12%top,<g13=reg128#4%bot
  3712. # asm 2: vmlal.s32 <h7=q7,<f46=d23,<g13=d6
  3713. vmlal.s32 q7,d23,d6
  3714. # qhasm: h7[0,1] += f57[2] signed* g02[0];h7[2,3] += f57[3] signed* g02[1]
  3715. # asm 1: vmlal.s32 <h7=reg128#8,<f57=reg128#15%top,<g02=reg128#1%bot
  3716. # asm 2: vmlal.s32 <h7=q7,<f57=d29,<g02=d0
  3717. vmlal.s32 q7,d29,d0
  3718. # qhasm: new mix_stack
  3719. # qhasm: ptr = &mix_stack
  3720. # asm 1: lea >ptr=int32#3,<mix_stack=stack128#10
  3721. # asm 2: lea >ptr=r2,<mix_stack=[sp,#656]
  3722. add r2,sp,#656
  3723. # qhasm: mem128[ptr] aligned= mix
  3724. # asm 1: vst1.8 {<mix=reg128#6%bot-<mix=reg128#6%top},[<ptr=int32#3,: 128]
  3725. # asm 2: vst1.8 {<mix=d10-<mix=d11},[<ptr=r2,: 128]
  3726. vst1.8 {d10-d11},[r2,: 128]
  3727. # qhasm: h6[0,1] = f02[0] signed* g46[2];h6[2,3] = f02[1] signed* g46[3]
  3728. # asm 1: vmull.s32 >h6=reg128#6,<f02=reg128#10%bot,<g46=reg128#2%top
  3729. # asm 2: vmull.s32 >h6=q5,<f02=d18,<g46=d3
  3730. vmull.s32 q5,d18,d3
  3731. # qhasm: h6[0,1] += f02[2] signed* g46[0];h6[2,3] += f02[3] signed* g46[1]
  3732. # asm 1: vmlal.s32 <h6=reg128#6,<f02=reg128#10%top,<g46=reg128#2%bot
  3733. # asm 2: vmlal.s32 <h6=q5,<f02=d19,<g46=d2
  3734. vmlal.s32 q5,d19,d2
  3735. # qhasm: h6[0,1] += f46[0] signed* g02[2];h6[2,3] += f46[1] signed* g02[3]
  3736. # asm 1: vmlal.s32 <h6=reg128#6,<f46=reg128#12%bot,<g02=reg128#1%top
  3737. # asm 2: vmlal.s32 <h6=q5,<f46=d22,<g02=d1
  3738. vmlal.s32 q5,d22,d1
  3739. # qhasm: h6[0,1] += f46[2] signed* g02[0];h6[2,3] += f46[3] signed* g02[1]
  3740. # asm 1: vmlal.s32 <h6=reg128#6,<f46=reg128#12%top,<g02=reg128#1%bot
  3741. # asm 2: vmlal.s32 <h6=q5,<f46=d23,<g02=d0
  3742. vmlal.s32 q5,d23,d0
  3743. # qhasm: h6[0,1] += f13_2[0] signed* g57[0];h6[2,3] += f13_2[1] signed* g57[1]
  3744. # asm 1: vmlal.s32 <h6=reg128#6,<f13_2=reg128#7%bot,<g57=reg128#5%bot
  3745. # asm 2: vmlal.s32 <h6=q5,<f13_2=d12,<g57=d8
  3746. vmlal.s32 q5,d12,d8
  3747. # qhasm: new h9_stack
  3748. # qhasm: ptr = &h9_stack
  3749. # asm 1: lea >ptr=int32#3,<h9_stack=stack128#11
  3750. # asm 2: lea >ptr=r2,<h9_stack=[sp,#672]
  3751. add r2,sp,#672
  3752. # qhasm: mem128[ptr] aligned= h9
  3753. # asm 1: vst1.8 {<h9=reg128#9%bot-<h9=reg128#9%top},[<ptr=int32#3,: 128]
  3754. # asm 2: vst1.8 {<h9=d16-<h9=d17},[<ptr=r2,: 128]
  3755. vst1.8 {d16-d17},[r2,: 128]
  3756. # qhasm: h5[0,1] = f02[0] signed* g57[0];h5[2,3] = f02[1] signed* g57[1]
  3757. # asm 1: vmull.s32 >h5=reg128#5,<f02=reg128#10%bot,<g57=reg128#5%bot
  3758. # asm 2: vmull.s32 >h5=q4,<f02=d18,<g57=d8
  3759. vmull.s32 q4,d18,d8
  3760. # qhasm: h5[0,1] += f13[0] signed* g46[0];h5[2,3] += f13[1] signed* g46[1]
  3761. # asm 1: vmlal.s32 <h5=reg128#5,<f13=reg128#14%bot,<g46=reg128#2%bot
  3762. # asm 2: vmlal.s32 <h5=q4,<f13=d26,<g46=d2
  3763. vmlal.s32 q4,d26,d2
  3764. # qhasm: h5[0,1] += f02[2] signed* g13[2];h5[2,3] += f02[3] signed* g13[3]
  3765. # asm 1: vmlal.s32 <h5=reg128#5,<f02=reg128#10%top,<g13=reg128#4%top
  3766. # asm 2: vmlal.s32 <h5=q4,<f02=d19,<g13=d7
  3767. vmlal.s32 q4,d19,d7
  3768. # qhasm: h5[0,1] += f13[2] signed* g02[2];h5[2,3] += f13[3] signed* g02[3]
  3769. # asm 1: vmlal.s32 <h5=reg128#5,<f13=reg128#14%top,<g02=reg128#1%top
  3770. # asm 2: vmlal.s32 <h5=q4,<f13=d27,<g02=d1
  3771. vmlal.s32 q4,d27,d1
  3772. # qhasm: h5[0,1] += f46[0] signed* g13[0];h5[2,3] += f46[1] signed* g13[1]
  3773. # asm 1: vmlal.s32 <h5=reg128#5,<f46=reg128#12%bot,<g13=reg128#4%bot
  3774. # asm 2: vmlal.s32 <h5=q4,<f46=d22,<g13=d6
  3775. vmlal.s32 q4,d22,d6
  3776. # qhasm: h5[0,1] += f57[0] signed* g02[0];h5[2,3] += f57[1] signed* g02[1]
  3777. # asm 1: vmlal.s32 <h5=reg128#5,<f57=reg128#15%bot,<g02=reg128#1%bot
  3778. # asm 2: vmlal.s32 <h5=q4,<f57=d28,<g02=d0
  3779. vmlal.s32 q4,d28,d0
  3780. # qhasm: h3[0,1] = f02[0] signed* g13[2];h3[2,3] = f02[1] signed* g13[3]
  3781. # asm 1: vmull.s32 >h3=reg128#9,<f02=reg128#10%bot,<g13=reg128#4%top
  3782. # asm 2: vmull.s32 >h3=q8,<f02=d18,<g13=d7
  3783. vmull.s32 q8,d18,d7
  3784. # qhasm: h3[0,1] += f13[0] signed* g02[2];h3[2,3] += f13[1] signed* g02[3]
  3785. # asm 1: vmlal.s32 <h3=reg128#9,<f13=reg128#14%bot,<g02=reg128#1%top
  3786. # asm 2: vmlal.s32 <h3=q8,<f13=d26,<g02=d1
  3787. vmlal.s32 q8,d26,d1
  3788. # qhasm: h3[0,1] += f02[2] signed* g13[0];h3[2,3] += f02[3] signed* g13[1]
  3789. # asm 1: vmlal.s32 <h3=reg128#9,<f02=reg128#10%top,<g13=reg128#4%bot
  3790. # asm 2: vmlal.s32 <h3=q8,<f02=d19,<g13=d6
  3791. vmlal.s32 q8,d19,d6
  3792. # qhasm: h3[0,1] += f13[2] signed* g02[0];h3[2,3] += f13[3] signed* g02[1]
  3793. # asm 1: vmlal.s32 <h3=reg128#9,<f13=reg128#14%top,<g02=reg128#1%bot
  3794. # asm 2: vmlal.s32 <h3=q8,<f13=d27,<g02=d0
  3795. vmlal.s32 q8,d27,d0
  3796. # qhasm: ptr = &g89_19_stack
  3797. # asm 1: lea >ptr=int32#3,<g89_19_stack=stack128#5
  3798. # asm 2: lea >ptr=r2,<g89_19_stack=[sp,#576]
  3799. add r2,sp,#576
  3800. # qhasm: g89_19 aligned= mem128[ptr]
  3801. # asm 1: vld1.8 {>g89_19=reg128#11%bot->g89_19=reg128#11%top},[<ptr=int32#3,: 128]
  3802. # asm 2: vld1.8 {>g89_19=d20->g89_19=d21},[<ptr=r2,: 128]
  3803. vld1.8 {d20-d21},[r2,: 128]
  3804. # qhasm: h7[0,1] += f89[0] signed* g89_19[2];h7[2,3] += f89[1] signed* g89_19[3]
  3805. # asm 1: vmlal.s32 <h7=reg128#8,<f89=reg128#13%bot,<g89_19=reg128#11%top
  3806. # asm 2: vmlal.s32 <h7=q7,<f89=d24,<g89_19=d21
  3807. vmlal.s32 q7,d24,d21
  3808. # qhasm: h7[0,1] += f89[2] signed* g89_19[0];h7[2,3] += f89[3] signed* g89_19[1]
  3809. # asm 1: vmlal.s32 <h7=reg128#8,<f89=reg128#13%top,<g89_19=reg128#11%bot
  3810. # asm 2: vmlal.s32 <h7=q7,<f89=d25,<g89_19=d20
  3811. vmlal.s32 q7,d25,d20
  3812. # qhasm: h5[0,1] += f46[2] signed* g89_19[2];h5[2,3] += f46[3] signed* g89_19[3]
  3813. # asm 1: vmlal.s32 <h5=reg128#5,<f46=reg128#12%top,<g89_19=reg128#11%top
  3814. # asm 2: vmlal.s32 <h5=q4,<f46=d23,<g89_19=d21
  3815. vmlal.s32 q4,d23,d21
  3816. # qhasm: h5[0,1] += f57[2] signed* g89_19[0];h5[2,3] += f57[3] signed* g89_19[1]
  3817. # asm 1: vmlal.s32 <h5=reg128#5,<f57=reg128#15%top,<g89_19=reg128#11%bot
  3818. # asm 2: vmlal.s32 <h5=q4,<f57=d29,<g89_19=d20
  3819. vmlal.s32 q4,d29,d20
  3820. # qhasm: h3[0,1] += f46[0] signed* g89_19[2];h3[2,3] += f46[1] signed* g89_19[3]
  3821. # asm 1: vmlal.s32 <h3=reg128#9,<f46=reg128#12%bot,<g89_19=reg128#11%top
  3822. # asm 2: vmlal.s32 <h3=q8,<f46=d22,<g89_19=d21
  3823. vmlal.s32 q8,d22,d21
  3824. # qhasm: h3[0,1] += f57[0] signed* g89_19[0];h3[2,3] += f57[1] signed* g89_19[1]
  3825. # asm 1: vmlal.s32 <h3=reg128#9,<f57=reg128#15%bot,<g89_19=reg128#11%bot
  3826. # asm 2: vmlal.s32 <h3=q8,<f57=d28,<g89_19=d20
  3827. vmlal.s32 q8,d28,d20
  3828. # qhasm: h6[0,1] += f89[0] signed* g89_19[0];h6[2,3] += f89[1] signed* g89_19[1]
  3829. # asm 1: vmlal.s32 <h6=reg128#6,<f89=reg128#13%bot,<g89_19=reg128#11%bot
  3830. # asm 2: vmlal.s32 <h6=q5,<f89=d24,<g89_19=d20
  3831. vmlal.s32 q5,d24,d20
  3832. # qhasm: new h7_stack
  3833. # qhasm: ptr = &h7_stack
  3834. # asm 1: lea >ptr=int32#3,<h7_stack=stack128#5
  3835. # asm 2: lea >ptr=r2,<h7_stack=[sp,#576]
  3836. add r2,sp,#576
  3837. # qhasm: mem128[ptr] aligned= h7
  3838. # asm 1: vst1.8 {<h7=reg128#8%bot-<h7=reg128#8%top},[<ptr=int32#3,: 128]
  3839. # asm 2: vst1.8 {<h7=d14-<h7=d15},[<ptr=r2,: 128]
  3840. vst1.8 {d14-d15},[r2,: 128]
  3841. # qhasm: h1[0,1] = f02[0] signed* g13[0];h1[2,3] = f02[1] signed* g13[1]
  3842. # asm 1: vmull.s32 >h1=reg128#8,<f02=reg128#10%bot,<g13=reg128#4%bot
  3843. # asm 2: vmull.s32 >h1=q7,<f02=d18,<g13=d6
  3844. vmull.s32 q7,d18,d6
  3845. # qhasm: h1[0,1] += f13[0] signed* g02[0];h1[2,3] += f13[1] signed* g02[1]
  3846. # asm 1: vmlal.s32 <h1=reg128#8,<f13=reg128#14%bot,<g02=reg128#1%bot
  3847. # asm 2: vmlal.s32 <h1=q7,<f13=d26,<g02=d0
  3848. vmlal.s32 q7,d26,d0
  3849. # qhasm: ptr = &mix_stack
  3850. # asm 1: lea >ptr=int32#3,<mix_stack=stack128#10
  3851. # asm 2: lea >ptr=r2,<mix_stack=[sp,#656]
  3852. add r2,sp,#656
  3853. # qhasm: mix aligned= mem128[ptr]
  3854. # asm 1: vld1.8 {>mix=reg128#16%bot->mix=reg128#16%top},[<ptr=int32#3,: 128]
  3855. # asm 2: vld1.8 {>mix=d30->mix=d31},[<ptr=r2,: 128]
  3856. vld1.8 {d30-d31},[r2,: 128]
  3857. # qhasm: h8[0,1] += mix[0] signed* g89_19[2];h8[2,3] += mix[1] signed* g89_19[3]
  3858. # asm 1: vmlal.s32 <h8=reg128#3,<mix=reg128#16%bot,<g89_19=reg128#11%top
  3859. # asm 2: vmlal.s32 <h8=q2,<mix=d30,<g89_19=d21
  3860. vmlal.s32 q2,d30,d21
  3861. # qhasm: h1[0,1] += f02[2] signed* g89_19[2];h1[2,3] += f02[3] signed* g89_19[3]
  3862. # asm 1: vmlal.s32 <h1=reg128#8,<f02=reg128#10%top,<g89_19=reg128#11%top
  3863. # asm 2: vmlal.s32 <h1=q7,<f02=d19,<g89_19=d21
  3864. vmlal.s32 q7,d19,d21
  3865. # qhasm: h1[0,1] += f13[2] signed* g89_19[0];h1[2,3] += f13[3] signed* g89_19[1]
  3866. # asm 1: vmlal.s32 <h1=reg128#8,<f13=reg128#14%top,<g89_19=reg128#11%bot
  3867. # asm 2: vmlal.s32 <h1=q7,<f13=d27,<g89_19=d20
  3868. vmlal.s32 q7,d27,d20
  3869. # qhasm: ptr = &g46_19_stack
  3870. # asm 1: lea >ptr=int32#3,<g46_19_stack=stack128#8
  3871. # asm 2: lea >ptr=r2,<g46_19_stack=[sp,#624]
  3872. add r2,sp,#624
  3873. # qhasm: g46_19 aligned= mem128[ptr]
  3874. # asm 1: vld1.8 {>g46_19=reg128#14%bot->g46_19=reg128#14%top},[<ptr=int32#3,: 128]
  3875. # asm 2: vld1.8 {>g46_19=d26->g46_19=d27},[<ptr=r2,: 128]
  3876. vld1.8 {d26-d27},[r2,: 128]
  3877. # qhasm: h5[0,1] += f89[2] signed* g46_19[2];h5[2,3] += f89[3] signed* g46_19[3]
  3878. # asm 1: vmlal.s32 <h5=reg128#5,<f89=reg128#13%top,<g46_19=reg128#14%top
  3879. # asm 2: vmlal.s32 <h5=q4,<f89=d25,<g46_19=d27
  3880. vmlal.s32 q4,d25,d27
  3881. # qhasm: h3[0,1] += f57[2] signed* g46_19[2];h3[2,3] += f57[3] signed* g46_19[3]
  3882. # asm 1: vmlal.s32 <h3=reg128#9,<f57=reg128#15%top,<g46_19=reg128#14%top
  3883. # asm 2: vmlal.s32 <h3=q8,<f57=d29,<g46_19=d27
  3884. vmlal.s32 q8,d29,d27
  3885. # qhasm: h3[0,1] += f89[2] signed* g46_19[0];h3[2,3] += f89[3] signed* g46_19[1]
  3886. # asm 1: vmlal.s32 <h3=reg128#9,<f89=reg128#13%top,<g46_19=reg128#14%bot
  3887. # asm 2: vmlal.s32 <h3=q8,<f89=d25,<g46_19=d26
  3888. vmlal.s32 q8,d25,d26
  3889. # qhasm: h1[0,1] += f57[0] signed* g46_19[2];h1[2,3] += f57[1] signed* g46_19[3]
  3890. # asm 1: vmlal.s32 <h1=reg128#8,<f57=reg128#15%bot,<g46_19=reg128#14%top
  3891. # asm 2: vmlal.s32 <h1=q7,<f57=d28,<g46_19=d27
  3892. vmlal.s32 q7,d28,d27
  3893. # qhasm: h1[0,1] += f57[2] signed* g46_19[0];h1[2,3] += f57[3] signed* g46_19[1]
  3894. # asm 1: vmlal.s32 <h1=reg128#8,<f57=reg128#15%top,<g46_19=reg128#14%bot
  3895. # asm 2: vmlal.s32 <h1=q7,<f57=d29,<g46_19=d26
  3896. vmlal.s32 q7,d29,d26
  3897. # qhasm: ptr = &g57_19_stack
  3898. # asm 1: lea >ptr=int32#3,<g57_19_stack=stack128#7
  3899. # asm 2: lea >ptr=r2,<g57_19_stack=[sp,#608]
  3900. add r2,sp,#608
  3901. # qhasm: g57_19 aligned= mem128[ptr]
  3902. # asm 1: vld1.8 {>g57_19=reg128#15%bot->g57_19=reg128#15%top},[<ptr=int32#3,: 128]
  3903. # asm 2: vld1.8 {>g57_19=d28->g57_19=d29},[<ptr=r2,: 128]
  3904. vld1.8 {d28-d29},[r2,: 128]
  3905. # qhasm: h5[0,1] += f89[0] signed* g57_19[2];h5[2,3] += f89[1] signed* g57_19[3]
  3906. # asm 1: vmlal.s32 <h5=reg128#5,<f89=reg128#13%bot,<g57_19=reg128#15%top
  3907. # asm 2: vmlal.s32 <h5=q4,<f89=d24,<g57_19=d29
  3908. vmlal.s32 q4,d24,d29
  3909. # qhasm: h3[0,1] += f46[2] signed* g57_19[2];h3[2,3] += f46[3] signed* g57_19[3]
  3910. # asm 1: vmlal.s32 <h3=reg128#9,<f46=reg128#12%top,<g57_19=reg128#15%top
  3911. # asm 2: vmlal.s32 <h3=q8,<f46=d23,<g57_19=d29
  3912. vmlal.s32 q8,d23,d29
  3913. # qhasm: h3[0,1] += f89[0] signed* g57_19[0];h3[2,3] += f89[1] signed* g57_19[1]
  3914. # asm 1: vmlal.s32 <h3=reg128#9,<f89=reg128#13%bot,<g57_19=reg128#15%bot
  3915. # asm 2: vmlal.s32 <h3=q8,<f89=d24,<g57_19=d28
  3916. vmlal.s32 q8,d24,d28
  3917. # qhasm: h1[0,1] += f46[0] signed* g57_19[2];h1[2,3] += f46[1] signed* g57_19[3]
  3918. # asm 1: vmlal.s32 <h1=reg128#8,<f46=reg128#12%bot,<g57_19=reg128#15%top
  3919. # asm 2: vmlal.s32 <h1=q7,<f46=d22,<g57_19=d29
  3920. vmlal.s32 q7,d22,d29
  3921. # qhasm: h1[0,1] += f46[2] signed* g57_19[0];h1[2,3] += f46[3] signed* g57_19[1]
  3922. # asm 1: vmlal.s32 <h1=reg128#8,<f46=reg128#12%top,<g57_19=reg128#15%bot
  3923. # asm 2: vmlal.s32 <h1=q7,<f46=d23,<g57_19=d28
  3924. vmlal.s32 q7,d23,d28
  3925. # qhasm: new h5_stack
  3926. # qhasm: ptr = &h5_stack
  3927. # asm 1: lea >ptr=int32#3,<h5_stack=stack128#7
  3928. # asm 2: lea >ptr=r2,<h5_stack=[sp,#608]
  3929. add r2,sp,#608
  3930. # qhasm: mem128[ptr] aligned= h5
  3931. # asm 1: vst1.8 {<h5=reg128#5%bot-<h5=reg128#5%top},[<ptr=int32#3,: 128]
  3932. # asm 2: vst1.8 {<h5=d8-<h5=d9},[<ptr=r2,: 128]
  3933. vst1.8 {d8-d9},[r2,: 128]
  3934. # qhasm: ptr = &g13_19_stack
  3935. # asm 1: lea >ptr=int32#3,<g13_19_stack=stack128#4
  3936. # asm 2: lea >ptr=r2,<g13_19_stack=[sp,#560]
  3937. add r2,sp,#560
  3938. # qhasm: g13_19 aligned= mem128[ptr]
  3939. # asm 1: vld1.8 {>g13_19=reg128#5%bot->g13_19=reg128#5%top},[<ptr=int32#3,: 128]
  3940. # asm 2: vld1.8 {>g13_19=d8->g13_19=d9},[<ptr=r2,: 128]
  3941. vld1.8 {d8-d9},[r2,: 128]
  3942. # qhasm: h1[0,1] += f89[0] signed* g13_19[2];h1[2,3] += f89[1] signed* g13_19[3]
  3943. # asm 1: vmlal.s32 <h1=reg128#8,<f89=reg128#13%bot,<g13_19=reg128#5%top
  3944. # asm 2: vmlal.s32 <h1=q7,<f89=d24,<g13_19=d9
  3945. vmlal.s32 q7,d24,d9
  3946. # qhasm: h1[0,1] += f89[2] signed* mix[2];h1[2,3] += f89[3] signed* mix[3]
  3947. # asm 1: vmlal.s32 <h1=reg128#8,<f89=reg128#13%top,<mix=reg128#16%top
  3948. # asm 2: vmlal.s32 <h1=q7,<f89=d25,<mix=d31
  3949. vmlal.s32 q7,d25,d31
  3950. # qhasm: h4[0,1] = f02[0] signed* g46[0];h4[2,3] = f02[1] signed* g46[1]
  3951. # asm 1: vmull.s32 >h4=reg128#2,<f02=reg128#10%bot,<g46=reg128#2%bot
  3952. # asm 2: vmull.s32 >h4=q1,<f02=d18,<g46=d2
  3953. vmull.s32 q1,d18,d2
  3954. # qhasm: h4[0,1] += f02[2] signed* g02[2];h4[2,3] += f02[3] signed* g02[3]
  3955. # asm 1: vmlal.s32 <h4=reg128#2,<f02=reg128#10%top,<g02=reg128#1%top
  3956. # asm 2: vmlal.s32 <h4=q1,<f02=d19,<g02=d1
  3957. vmlal.s32 q1,d19,d1
  3958. # qhasm: h4[0,1] += f46[0] signed* g02[0];h4[2,3] += f46[1] signed* g02[1]
  3959. # asm 1: vmlal.s32 <h4=reg128#2,<f46=reg128#12%bot,<g02=reg128#1%bot
  3960. # asm 2: vmlal.s32 <h4=q1,<f46=d22,<g02=d0
  3961. vmlal.s32 q1,d22,d0
  3962. # qhasm: h4[0,1] += f89[0] signed* g46_19[2];h4[2,3] += f89[1] signed* g46_19[3]
  3963. # asm 1: vmlal.s32 <h4=reg128#2,<f89=reg128#13%bot,<g46_19=reg128#14%top
  3964. # asm 2: vmlal.s32 <h4=q1,<f89=d24,<g46_19=d27
  3965. vmlal.s32 q1,d24,d27
  3966. # qhasm: h4[0,1] += f46[2] signed* g89_19[0];h4[2,3] += f46[3] signed* g89_19[1]
  3967. # asm 1: vmlal.s32 <h4=reg128#2,<f46=reg128#12%top,<g89_19=reg128#11%bot
  3968. # asm 2: vmlal.s32 <h4=q1,<f46=d23,<g89_19=d20
  3969. vmlal.s32 q1,d23,d20
  3970. # qhasm: h4[0,1] += f13_2[0] signed* g13[2];h4[2,3] += f13_2[1] signed* g13[3]
  3971. # asm 1: vmlal.s32 <h4=reg128#2,<f13_2=reg128#7%bot,<g13=reg128#4%top
  3972. # asm 2: vmlal.s32 <h4=q1,<f13_2=d12,<g13=d7
  3973. vmlal.s32 q1,d12,d7
  3974. # qhasm: h4[0,1] += f13_2[2] signed* g13[0];h4[2,3] += f13_2[3] signed* g13[1]
  3975. # asm 1: vmlal.s32 <h4=reg128#2,<f13_2=reg128#7%top,<g13=reg128#4%bot
  3976. # asm 2: vmlal.s32 <h4=q1,<f13_2=d13,<g13=d6
  3977. vmlal.s32 q1,d13,d6
  3978. # qhasm: h2[0,1] = f02[0] signed* g02[2];h2[2,3] = f02[1] signed* g02[3]
  3979. # asm 1: vmull.s32 >h2=reg128#7,<f02=reg128#10%bot,<g02=reg128#1%top
  3980. # asm 2: vmull.s32 >h2=q6,<f02=d18,<g02=d1
  3981. vmull.s32 q6,d18,d1
  3982. # qhasm: h2[0,1] += f02[2] signed* g02[0];h2[2,3] += f02[3] signed* g02[1]
  3983. # asm 1: vmlal.s32 <h2=reg128#7,<f02=reg128#10%top,<g02=reg128#1%bot
  3984. # asm 2: vmlal.s32 <h2=q6,<f02=d19,<g02=d0
  3985. vmlal.s32 q6,d19,d0
  3986. # qhasm: h2[0,1] += f46[2] signed* g46_19[2];h2[2,3] += f46[3] signed* g46_19[3]
  3987. # asm 1: vmlal.s32 <h2=reg128#7,<f46=reg128#12%top,<g46_19=reg128#14%top
  3988. # asm 2: vmlal.s32 <h2=q6,<f46=d23,<g46_19=d27
  3989. vmlal.s32 q6,d23,d27
  3990. # qhasm: h2[0,1] += f46[0] signed* g89_19[0];h2[2,3] += f46[1] signed* g89_19[1]
  3991. # asm 1: vmlal.s32 <h2=reg128#7,<f46=reg128#12%bot,<g89_19=reg128#11%bot
  3992. # asm 2: vmlal.s32 <h2=q6,<f46=d22,<g89_19=d20
  3993. vmlal.s32 q6,d22,d20
  3994. # qhasm: h2[0,1] += f89[0] signed* g46_19[0];h2[2,3] += f89[1] signed* g46_19[1]
  3995. # asm 1: vmlal.s32 <h2=reg128#7,<f89=reg128#13%bot,<g46_19=reg128#14%bot
  3996. # asm 2: vmlal.s32 <h2=q6,<f89=d24,<g46_19=d26
  3997. vmlal.s32 q6,d24,d26
  3998. # qhasm: h0[0,1] = f02[0] signed* g02[0];h0[2,3] = f02[1] signed* g02[1]
  3999. # asm 1: vmull.s32 >h0=reg128#1,<f02=reg128#10%bot,<g02=reg128#1%bot
  4000. # asm 2: vmull.s32 >h0=q0,<f02=d18,<g02=d0
  4001. vmull.s32 q0,d18,d0
  4002. # qhasm: h0[0,1] += f46[0] signed* g46_19[2];h0[2,3] += f46[1] signed* g46_19[3]
  4003. # asm 1: vmlal.s32 <h0=reg128#1,<f46=reg128#12%bot,<g46_19=reg128#14%top
  4004. # asm 2: vmlal.s32 <h0=q0,<f46=d22,<g46_19=d27
  4005. vmlal.s32 q0,d22,d27
  4006. # qhasm: h0[0,1] += f46[2] signed* g46_19[0];h0[2,3] += f46[3] signed* g46_19[1]
  4007. # asm 1: vmlal.s32 <h0=reg128#1,<f46=reg128#12%top,<g46_19=reg128#14%bot
  4008. # asm 2: vmlal.s32 <h0=q0,<f46=d23,<g46_19=d26
  4009. vmlal.s32 q0,d23,d26
  4010. # qhasm: h0[0,1] += f89[0] signed* mix[2];h0[2,3] += f89[1] signed* mix[3]
  4011. # asm 1: vmlal.s32 <h0=reg128#1,<f89=reg128#13%bot,<mix=reg128#16%top
  4012. # asm 2: vmlal.s32 <h0=q0,<f89=d24,<mix=d31
  4013. vmlal.s32 q0,d24,d31
  4014. # qhasm: h0[0,1] += f02[2] signed* g89_19[0];h0[2,3] += f02[3] signed* g89_19[1]
  4015. # asm 1: vmlal.s32 <h0=reg128#1,<f02=reg128#10%top,<g89_19=reg128#11%bot
  4016. # asm 2: vmlal.s32 <h0=q0,<f02=d19,<g89_19=d20
  4017. vmlal.s32 q0,d19,d20
  4018. # qhasm: ptr = &f57_2_stack
  4019. # asm 1: lea >ptr=int32#3,<f57_2_stack=stack128#9
  4020. # asm 2: lea >ptr=r2,<f57_2_stack=[sp,#640]
  4021. add r2,sp,#640
  4022. # qhasm: f57_2 aligned= mem128[ptr]
  4023. # asm 1: vld1.8 {>f57_2=reg128#10%bot->f57_2=reg128#10%top},[<ptr=int32#3,: 128]
  4024. # asm 2: vld1.8 {>f57_2=d18->f57_2=d19},[<ptr=r2,: 128]
  4025. vld1.8 {d18-d19},[r2,: 128]
  4026. # qhasm: h8[0,1] += f57_2[0] signed* g13[2];h8[2,3] += f57_2[1] signed* g13[3]
  4027. # asm 1: vmlal.s32 <h8=reg128#3,<f57_2=reg128#10%bot,<g13=reg128#4%top
  4028. # asm 2: vmlal.s32 <h8=q2,<f57_2=d18,<g13=d7
  4029. vmlal.s32 q2,d18,d7
  4030. # qhasm: h8[0,1] += f57_2[2] signed* g13[0];h8[2,3] += f57_2[3] signed* g13[1]
  4031. # asm 1: vmlal.s32 <h8=reg128#3,<f57_2=reg128#10%top,<g13=reg128#4%bot
  4032. # asm 2: vmlal.s32 <h8=q2,<f57_2=d19,<g13=d6
  4033. vmlal.s32 q2,d19,d6
  4034. # qhasm: h6[0,1] += f57_2[0] signed* g13[0];h6[2,3] += f57_2[1] signed* g13[1]
  4035. # asm 1: vmlal.s32 <h6=reg128#6,<f57_2=reg128#10%bot,<g13=reg128#4%bot
  4036. # asm 2: vmlal.s32 <h6=q5,<f57_2=d18,<g13=d6
  4037. vmlal.s32 q5,d18,d6
  4038. # qhasm: h6[0,1] += f57_2[2] signed* g89_19[2];h6[2,3] += f57_2[3] signed* g89_19[3]
  4039. # asm 1: vmlal.s32 <h6=reg128#6,<f57_2=reg128#10%top,<g89_19=reg128#11%top
  4040. # asm 2: vmlal.s32 <h6=q5,<f57_2=d19,<g89_19=d21
  4041. vmlal.s32 q5,d19,d21
  4042. # qhasm: h4[0,1] += f57_2[0] signed* g89_19[2];h4[2,3] += f57_2[1] signed* g89_19[3]
  4043. # asm 1: vmlal.s32 <h4=reg128#2,<f57_2=reg128#10%bot,<g89_19=reg128#11%top
  4044. # asm 2: vmlal.s32 <h4=q1,<f57_2=d18,<g89_19=d21
  4045. vmlal.s32 q1,d18,d21
  4046. # qhasm: h4[0,1] += f57_2[2] signed* g57_19[2];h4[2,3] += f57_2[3] signed* g57_19[3]
  4047. # asm 1: vmlal.s32 <h4=reg128#2,<f57_2=reg128#10%top,<g57_19=reg128#15%top
  4048. # asm 2: vmlal.s32 <h4=q1,<f57_2=d19,<g57_19=d29
  4049. vmlal.s32 q1,d19,d29
  4050. # qhasm: h0[0,1] += f57_2[0] signed* g57_19[0];h0[2,3] += f57_2[1] signed* g57_19[1]
  4051. # asm 1: vmlal.s32 <h0=reg128#1,<f57_2=reg128#10%bot,<g57_19=reg128#15%bot
  4052. # asm 2: vmlal.s32 <h0=q0,<f57_2=d18,<g57_19=d28
  4053. vmlal.s32 q0,d18,d28
  4054. # qhasm: h0[0,1] += f57_2[2] signed* g13_19[2];h0[2,3] += f57_2[3] signed* g13_19[3]
  4055. # asm 1: vmlal.s32 <h0=reg128#1,<f57_2=reg128#10%top,<g13_19=reg128#5%top
  4056. # asm 2: vmlal.s32 <h0=q0,<f57_2=d19,<g13_19=d9
  4057. vmlal.s32 q0,d19,d9
  4058. # qhasm: h2[0,1] += f57_2[0] signed* g57_19[2];h2[2,3] += f57_2[1] signed* g57_19[3]
  4059. # asm 1: vmlal.s32 <h2=reg128#7,<f57_2=reg128#10%bot,<g57_19=reg128#15%top
  4060. # asm 2: vmlal.s32 <h2=q6,<f57_2=d18,<g57_19=d29
  4061. vmlal.s32 q6,d18,d29
  4062. # qhasm: h2[0,1] += f57_2[2] signed* g57_19[0];h2[2,3] += f57_2[3] signed* g57_19[1]
  4063. # asm 1: vmlal.s32 <h2=reg128#7,<f57_2=reg128#10%top,<g57_19=reg128#15%bot
  4064. # asm 2: vmlal.s32 <h2=q6,<f57_2=d19,<g57_19=d28
  4065. vmlal.s32 q6,d19,d28
  4066. # qhasm: ptr = &f13_2_stack
  4067. # asm 1: lea >ptr=int32#3,<f13_2_stack=stack128#6
  4068. # asm 2: lea >ptr=r2,<f13_2_stack=[sp,#592]
  4069. add r2,sp,#592
  4070. # qhasm: f13_2 aligned= mem128[ptr]
  4071. # asm 1: vld1.8 {>f13_2=reg128#10%bot->f13_2=reg128#10%top},[<ptr=int32#3,: 128]
  4072. # asm 2: vld1.8 {>f13_2=d18->f13_2=d19},[<ptr=r2,: 128]
  4073. vld1.8 {d18-d19},[r2,: 128]
  4074. # qhasm: ptr = &_0x2000000_stack
  4075. # asm 1: lea >ptr=int32#3,<_0x2000000_stack=stack128#1
  4076. # asm 2: lea >ptr=r2,<_0x2000000_stack=[sp,#512]
  4077. add r2,sp,#512
  4078. # qhasm: _0x2000000 aligned= mem128[ptr]
  4079. # asm 1: vld1.8 {>_0x2000000=reg128#12%bot->_0x2000000=reg128#12%top},[<ptr=int32#3,: 128]
  4080. # asm 2: vld1.8 {>_0x2000000=d22->_0x2000000=d23},[<ptr=r2,: 128]
  4081. vld1.8 {d22-d23},[r2,: 128]
  4082. # qhasm: h6[0,1] += f13_2[2] signed* g13[2];h6[2,3] += f13_2[3] signed* g13[3]
  4083. # asm 1: vmlal.s32 <h6=reg128#6,<f13_2=reg128#10%top,<g13=reg128#4%top
  4084. # asm 2: vmlal.s32 <h6=q5,<f13_2=d19,<g13=d7
  4085. vmlal.s32 q5,d19,d7
  4086. # qhasm: h0[0,1] += f13_2[0] signed* g89_19[2];h0[2,3] += f13_2[1] signed* g89_19[3]
  4087. # asm 1: vmlal.s32 <h0=reg128#1,<f13_2=reg128#10%bot,<g89_19=reg128#11%top
  4088. # asm 2: vmlal.s32 <h0=q0,<f13_2=d18,<g89_19=d21
  4089. vmlal.s32 q0,d18,d21
  4090. # qhasm: h0[0,1] += f13_2[2] signed* g57_19[2];h0[2,3] += f13_2[3] signed* g57_19[3]
  4091. # asm 1: vmlal.s32 <h0=reg128#1,<f13_2=reg128#10%top,<g57_19=reg128#15%top
  4092. # asm 2: vmlal.s32 <h0=q0,<f13_2=d19,<g57_19=d29
  4093. vmlal.s32 q0,d19,d29
  4094. # qhasm: h2[0,1] += f13_2[0] signed* g13[0];h2[2,3] += f13_2[1] signed* g13[1]
  4095. # asm 1: vmlal.s32 <h2=reg128#7,<f13_2=reg128#10%bot,<g13=reg128#4%bot
  4096. # asm 2: vmlal.s32 <h2=q6,<f13_2=d18,<g13=d6
  4097. vmlal.s32 q6,d18,d6
  4098. # qhasm: ptr = &_0x1000000_stack
  4099. # asm 1: lea >ptr=int32#3,<_0x1000000_stack=stack128#2
  4100. # asm 2: lea >ptr=r2,<_0x1000000_stack=[sp,#528]
  4101. add r2,sp,#528
  4102. # qhasm: _0x1000000 aligned= mem128[ptr]
  4103. # asm 1: vld1.8 {>_0x1000000=reg128#4%bot->_0x1000000=reg128#4%top},[<ptr=int32#3,: 128]
  4104. # asm 2: vld1.8 {>_0x1000000=d6->_0x1000000=d7},[<ptr=r2,: 128]
  4105. vld1.8 {d6-d7},[r2,: 128]
  4106. # qhasm: h2[0,1] += f13_2[2] signed* g89_19[2];h2[2,3] += f13_2[3] signed* g89_19[3]
  4107. # asm 1: vmlal.s32 <h2=reg128#7,<f13_2=reg128#10%top,<g89_19=reg128#11%top
  4108. # asm 2: vmlal.s32 <h2=q6,<f13_2=d19,<g89_19=d21
  4109. vmlal.s32 q6,d19,d21
  4110. # qhasm: ptr = &h7_stack
  4111. # asm 1: lea >ptr=int32#3,<h7_stack=stack128#5
  4112. # asm 2: lea >ptr=r2,<h7_stack=[sp,#576]
  4113. add r2,sp,#576
  4114. # qhasm: h7 aligned= mem128[ptr]
  4115. # asm 1: vld1.8 {>h7=reg128#10%bot->h7=reg128#10%top},[<ptr=int32#3,: 128]
  4116. # asm 2: vld1.8 {>h7=d18->h7=d19},[<ptr=r2,: 128]
  4117. vld1.8 {d18-d19},[r2,: 128]
  4118. # qhasm: h0[0,1] += mix[0] signed* g13_19[0];h0[2,3] += mix[1] signed* g13_19[1]
  4119. # asm 1: vmlal.s32 <h0=reg128#1,<mix=reg128#16%bot,<g13_19=reg128#5%bot
  4120. # asm 2: vmlal.s32 <h0=q0,<mix=d30,<g13_19=d8
  4121. vmlal.s32 q0,d30,d8
  4122. # qhasm: ptr = &h9_stack
  4123. # asm 1: lea >ptr=int32#3,<h9_stack=stack128#11
  4124. # asm 2: lea >ptr=r2,<h9_stack=[sp,#672]
  4125. add r2,sp,#672
  4126. # qhasm: h9 aligned= mem128[ptr]
  4127. # asm 1: vld1.8 {>h9=reg128#11%bot->h9=reg128#11%top},[<ptr=int32#3,: 128]
  4128. # asm 2: vld1.8 {>h9=d20->h9=d21},[<ptr=r2,: 128]
  4129. vld1.8 {d20-d21},[r2,: 128]
  4130. # qhasm: h6[0,1] += mix[0] signed* g57_19[2];h6[2,3] += mix[1] signed* g57_19[3]
  4131. # asm 1: vmlal.s32 <h6=reg128#6,<mix=reg128#16%bot,<g57_19=reg128#15%top
  4132. # asm 2: vmlal.s32 <h6=q5,<mix=d30,<g57_19=d29
  4133. vmlal.s32 q5,d30,d29
  4134. # qhasm: ptr = &h5_stack
  4135. # asm 1: lea >ptr=int32#3,<h5_stack=stack128#7
  4136. # asm 2: lea >ptr=r2,<h5_stack=[sp,#608]
  4137. add r2,sp,#608
  4138. # qhasm: h5 aligned= mem128[ptr]
  4139. # asm 1: vld1.8 {>h5=reg128#13%bot->h5=reg128#13%top},[<ptr=int32#3,: 128]
  4140. # asm 2: vld1.8 {>h5=d24->h5=d25},[<ptr=r2,: 128]
  4141. vld1.8 {d24-d25},[r2,: 128]
  4142. # qhasm: h4[0,1] += mix[0] signed* g57_19[0];h4[2,3] += mix[1] signed* g57_19[1]
  4143. # asm 1: vmlal.s32 <h4=reg128#2,<mix=reg128#16%bot,<g57_19=reg128#15%bot
  4144. # asm 2: vmlal.s32 <h4=q1,<mix=d30,<g57_19=d28
  4145. vmlal.s32 q1,d30,d28
  4146. # qhasm: 2x t0 = h0 + _0x2000000
  4147. # asm 1: vadd.i64 >t0=reg128#14,<h0=reg128#1,<_0x2000000=reg128#12
  4148. # asm 2: vadd.i64 >t0=q13,<h0=q0,<_0x2000000=q11
  4149. vadd.i64 q13,q0,q11
  4150. # qhasm: 2x t6 = h6 + _0x2000000
  4151. # asm 1: vadd.i64 >t6=reg128#15,<h6=reg128#6,<_0x2000000=reg128#12
  4152. # asm 2: vadd.i64 >t6=q14,<h6=q5,<_0x2000000=q11
  4153. vadd.i64 q14,q5,q11
  4154. # qhasm: h2[0,1] += mix[0] signed* g13_19[2];h2[2,3] += mix[1] signed* g13_19[3]
  4155. # asm 1: vmlal.s32 <h2=reg128#7,<mix=reg128#16%bot,<g13_19=reg128#5%top
  4156. # asm 2: vmlal.s32 <h2=q6,<mix=d30,<g13_19=d9
  4157. vmlal.s32 q6,d30,d9
  4158. # qhasm: 2x c0 = t0 signed>> 26
  4159. # asm 1: vshr.s64 >c0=reg128#5,<t0=reg128#14,#26
  4160. # asm 2: vshr.s64 >c0=q4,<t0=q13,#26
  4161. vshr.s64 q4,q13,#26
  4162. # qhasm: 2x c6 = t6 signed>> 26
  4163. # asm 1: vshr.s64 >c6=reg128#14,<t6=reg128#15,#26
  4164. # asm 2: vshr.s64 >c6=q13,<t6=q14,#26
  4165. vshr.s64 q13,q14,#26
  4166. # qhasm: 2x h1 += c0
  4167. # asm 1: vadd.i64 >h1=reg128#8,<h1=reg128#8,<c0=reg128#5
  4168. # asm 2: vadd.i64 >h1=q7,<h1=q7,<c0=q4
  4169. vadd.i64 q7,q7,q4
  4170. # qhasm: 2x t0 = c0 << 26
  4171. # asm 1: vshl.i64 >t0=reg128#5,<c0=reg128#5,#26
  4172. # asm 2: vshl.i64 >t0=q4,<c0=q4,#26
  4173. vshl.i64 q4,q4,#26
  4174. # qhasm: 2x t1 = h1 + _0x1000000
  4175. # asm 1: vadd.i64 >t1=reg128#15,<h1=reg128#8,<_0x1000000=reg128#4
  4176. # asm 2: vadd.i64 >t1=q14,<h1=q7,<_0x1000000=q3
  4177. vadd.i64 q14,q7,q3
  4178. # qhasm: 2x h7 += c6
  4179. # asm 1: vadd.i64 >h7=reg128#10,<h7=reg128#10,<c6=reg128#14
  4180. # asm 2: vadd.i64 >h7=q9,<h7=q9,<c6=q13
  4181. vadd.i64 q9,q9,q13
  4182. # qhasm: 2x t6 = c6 << 26
  4183. # asm 1: vshl.i64 >t6=reg128#14,<c6=reg128#14,#26
  4184. # asm 2: vshl.i64 >t6=q13,<c6=q13,#26
  4185. vshl.i64 q13,q13,#26
  4186. # qhasm: 2x t7 = h7 + _0x1000000
  4187. # asm 1: vadd.i64 >t7=reg128#16,<h7=reg128#10,<_0x1000000=reg128#4
  4188. # asm 2: vadd.i64 >t7=q15,<h7=q9,<_0x1000000=q3
  4189. vadd.i64 q15,q9,q3
  4190. # qhasm: 2x h0 -= t0
  4191. # asm 1: vsub.i64 >h0=reg128#1,<h0=reg128#1,<t0=reg128#5
  4192. # asm 2: vsub.i64 >h0=q0,<h0=q0,<t0=q4
  4193. vsub.i64 q0,q0,q4
  4194. # qhasm: 2x c1 = t1 signed>> 25
  4195. # asm 1: vshr.s64 >c1=reg128#5,<t1=reg128#15,#25
  4196. # asm 2: vshr.s64 >c1=q4,<t1=q14,#25
  4197. vshr.s64 q4,q14,#25
  4198. # qhasm: 2x h6 -= t6
  4199. # asm 1: vsub.i64 >h6=reg128#6,<h6=reg128#6,<t6=reg128#14
  4200. # asm 2: vsub.i64 >h6=q5,<h6=q5,<t6=q13
  4201. vsub.i64 q5,q5,q13
  4202. # qhasm: 2x c7 = t7 signed>> 25
  4203. # asm 1: vshr.s64 >c7=reg128#14,<t7=reg128#16,#25
  4204. # asm 2: vshr.s64 >c7=q13,<t7=q15,#25
  4205. vshr.s64 q13,q15,#25
  4206. # qhasm: 2x h2 += c1
  4207. # asm 1: vadd.i64 >h2=reg128#7,<h2=reg128#7,<c1=reg128#5
  4208. # asm 2: vadd.i64 >h2=q6,<h2=q6,<c1=q4
  4209. vadd.i64 q6,q6,q4
  4210. # qhasm: 2x t1 = c1 << 25
  4211. # asm 1: vshl.i64 >t1=reg128#5,<c1=reg128#5,#25
  4212. # asm 2: vshl.i64 >t1=q4,<c1=q4,#25
  4213. vshl.i64 q4,q4,#25
  4214. # qhasm: 2x t2 = h2 + _0x2000000
  4215. # asm 1: vadd.i64 >t2=reg128#15,<h2=reg128#7,<_0x2000000=reg128#12
  4216. # asm 2: vadd.i64 >t2=q14,<h2=q6,<_0x2000000=q11
  4217. vadd.i64 q14,q6,q11
  4218. # qhasm: 2x h8 += c7
  4219. # asm 1: vadd.i64 >h8=reg128#3,<h8=reg128#3,<c7=reg128#14
  4220. # asm 2: vadd.i64 >h8=q2,<h8=q2,<c7=q13
  4221. vadd.i64 q2,q2,q13
  4222. # qhasm: 2x h1 -= t1
  4223. # asm 1: vsub.i64 >h1=reg128#5,<h1=reg128#8,<t1=reg128#5
  4224. # asm 2: vsub.i64 >h1=q4,<h1=q7,<t1=q4
  4225. vsub.i64 q4,q7,q4
  4226. # qhasm: 2x c2 = t2 signed>> 26
  4227. # asm 1: vshr.s64 >c2=reg128#8,<t2=reg128#15,#26
  4228. # asm 2: vshr.s64 >c2=q7,<t2=q14,#26
  4229. vshr.s64 q7,q14,#26
  4230. # qhasm: 2x t7 = c7 << 25
  4231. # asm 1: vshl.i64 >t7=reg128#14,<c7=reg128#14,#25
  4232. # asm 2: vshl.i64 >t7=q13,<c7=q13,#25
  4233. vshl.i64 q13,q13,#25
  4234. # qhasm: 2x t8 = h8 + _0x2000000
  4235. # asm 1: vadd.i64 >t8=reg128#15,<h8=reg128#3,<_0x2000000=reg128#12
  4236. # asm 2: vadd.i64 >t8=q14,<h8=q2,<_0x2000000=q11
  4237. vadd.i64 q14,q2,q11
  4238. # qhasm: 2x h3 += c2
  4239. # asm 1: vadd.i64 >h3=reg128#9,<h3=reg128#9,<c2=reg128#8
  4240. # asm 2: vadd.i64 >h3=q8,<h3=q8,<c2=q7
  4241. vadd.i64 q8,q8,q7
  4242. # qhasm: 2x t2 = c2 << 26
  4243. # asm 1: vshl.i64 >t2=reg128#8,<c2=reg128#8,#26
  4244. # asm 2: vshl.i64 >t2=q7,<c2=q7,#26
  4245. vshl.i64 q7,q7,#26
  4246. # qhasm: 2x t3 = h3 + _0x1000000
  4247. # asm 1: vadd.i64 >t3=reg128#16,<h3=reg128#9,<_0x1000000=reg128#4
  4248. # asm 2: vadd.i64 >t3=q15,<h3=q8,<_0x1000000=q3
  4249. vadd.i64 q15,q8,q3
  4250. # qhasm: 2x h7 -= t7
  4251. # asm 1: vsub.i64 >h7=reg128#10,<h7=reg128#10,<t7=reg128#14
  4252. # asm 2: vsub.i64 >h7=q9,<h7=q9,<t7=q13
  4253. vsub.i64 q9,q9,q13
  4254. # qhasm: 2x c8 = t8 signed>> 26
  4255. # asm 1: vshr.s64 >c8=reg128#14,<t8=reg128#15,#26
  4256. # asm 2: vshr.s64 >c8=q13,<t8=q14,#26
  4257. vshr.s64 q13,q14,#26
  4258. # qhasm: 2x h2 -= t2
  4259. # asm 1: vsub.i64 >h2=reg128#7,<h2=reg128#7,<t2=reg128#8
  4260. # asm 2: vsub.i64 >h2=q6,<h2=q6,<t2=q7
  4261. vsub.i64 q6,q6,q7
  4262. # qhasm: 2x c3 = t3 signed>> 25
  4263. # asm 1: vshr.s64 >c3=reg128#8,<t3=reg128#16,#25
  4264. # asm 2: vshr.s64 >c3=q7,<t3=q15,#25
  4265. vshr.s64 q7,q15,#25
  4266. # qhasm: 2x h9 += c8
  4267. # asm 1: vadd.i64 >h9=reg128#11,<h9=reg128#11,<c8=reg128#14
  4268. # asm 2: vadd.i64 >h9=q10,<h9=q10,<c8=q13
  4269. vadd.i64 q10,q10,q13
  4270. # qhasm: 2x t8 = c8 << 26
  4271. # asm 1: vshl.i64 >t8=reg128#14,<c8=reg128#14,#26
  4272. # asm 2: vshl.i64 >t8=q13,<c8=q13,#26
  4273. vshl.i64 q13,q13,#26
  4274. # qhasm: 2x t9 = h9 + _0x1000000
  4275. # asm 1: vadd.i64 >t9=reg128#15,<h9=reg128#11,<_0x1000000=reg128#4
  4276. # asm 2: vadd.i64 >t9=q14,<h9=q10,<_0x1000000=q3
  4277. vadd.i64 q14,q10,q3
  4278. # qhasm: 2x h4 += c3
  4279. # asm 1: vadd.i64 >h4=reg128#2,<h4=reg128#2,<c3=reg128#8
  4280. # asm 2: vadd.i64 >h4=q1,<h4=q1,<c3=q7
  4281. vadd.i64 q1,q1,q7
  4282. # qhasm: posh = playground1_ptr + 288
  4283. # asm 1: add >posh=int32#3,<playground1_ptr=int32#4,#288
  4284. # asm 2: add >posh=r2,<playground1_ptr=r3,#288
  4285. add r2,r3,#288
  4286. # qhasm: 2x t3 = c3 << 25
  4287. # asm 1: vshl.i64 >t3=reg128#8,<c3=reg128#8,#25
  4288. # asm 2: vshl.i64 >t3=q7,<c3=q7,#25
  4289. vshl.i64 q7,q7,#25
  4290. # qhasm: posH = playground1_ptr + 96
  4291. # asm 1: add >posH=int32#5,<playground1_ptr=int32#4,#96
  4292. # asm 2: add >posH=r4,<playground1_ptr=r3,#96
  4293. add r4,r3,#96
  4294. # qhasm: 2x t4 = h4 + _0x2000000
  4295. # asm 1: vadd.i64 >t4=reg128#16,<h4=reg128#2,<_0x2000000=reg128#12
  4296. # asm 2: vadd.i64 >t4=q15,<h4=q1,<_0x2000000=q11
  4297. vadd.i64 q15,q1,q11
  4298. # qhasm: posh+=8
  4299. # asm 1: add >posh=int32#3,<posh=int32#3,#8
  4300. # asm 2: add >posh=r2,<posh=r2,#8
  4301. add r2,r2,#8
  4302. # qhasm: 2x h8 -= t8
  4303. # asm 1: vsub.i64 >h8=reg128#3,<h8=reg128#3,<t8=reg128#14
  4304. # asm 2: vsub.i64 >h8=q2,<h8=q2,<t8=q13
  4305. vsub.i64 q2,q2,q13
  4306. # qhasm: posH+=8
  4307. # asm 1: add >posH=int32#5,<posH=int32#5,#8
  4308. # asm 2: add >posH=r4,<posH=r4,#8
  4309. add r4,r4,#8
  4310. # qhasm: 2x c9 = t9 signed>> 25
  4311. # asm 1: vshr.s64 >c9=reg128#14,<t9=reg128#15,#25
  4312. # asm 2: vshr.s64 >c9=q13,<t9=q14,#25
  4313. vshr.s64 q13,q14,#25
  4314. # qhasm: 2x h3 -= t3
  4315. # asm 1: vsub.i64 >h3=reg128#8,<h3=reg128#9,<t3=reg128#8
  4316. # asm 2: vsub.i64 >h3=q7,<h3=q8,<t3=q7
  4317. vsub.i64 q7,q8,q7
  4318. # qhasm: 2x c4 = t4 signed>> 26
  4319. # asm 1: vshr.s64 >c4=reg128#9,<t4=reg128#16,#26
  4320. # asm 2: vshr.s64 >c4=q8,<t4=q15,#26
  4321. vshr.s64 q8,q15,#26
  4322. # qhasm: 2x s = c9 + c9
  4323. # asm 1: vadd.i64 >s=reg128#15,<c9=reg128#14,<c9=reg128#14
  4324. # asm 2: vadd.i64 >s=q14,<c9=q13,<c9=q13
  4325. vadd.i64 q14,q13,q13
  4326. # qhasm: 2x h5 += c4
  4327. # asm 1: vadd.i64 >h5=reg128#13,<h5=reg128#13,<c4=reg128#9
  4328. # asm 2: vadd.i64 >h5=q12,<h5=q12,<c4=q8
  4329. vadd.i64 q12,q12,q8
  4330. # qhasm: h2 h3 = h2[0]h3[0]h2[2]h2[3] h2[1]h3[1]h3[2]h3[3]
  4331. # asm 1: vtrn.32 <h2=reg128#7%bot,<h3=reg128#8%bot
  4332. # asm 2: vtrn.32 <h2=d12,<h3=d14
  4333. vtrn.32 d12,d14
  4334. # qhasm: 2x t4 = c4 << 26
  4335. # asm 1: vshl.i64 >t4=reg128#9,<c4=reg128#9,#26
  4336. # asm 2: vshl.i64 >t4=q8,<c4=q8,#26
  4337. vshl.i64 q8,q8,#26
  4338. # qhasm: h2 h3 = h2[0]h2[1]h2[2]h3[2] h3[0]h3[1]h2[3]h3[3]
  4339. # asm 1: vtrn.32 <h2=reg128#7%top,<h3=reg128#8%top
  4340. # asm 2: vtrn.32 <h2=d13,<h3=d15
  4341. vtrn.32 d13,d15
  4342. # qhasm: 2x t5 = h5 + _0x1000000
  4343. # asm 1: vadd.i64 >t5=reg128#4,<h5=reg128#13,<_0x1000000=reg128#4
  4344. # asm 2: vadd.i64 >t5=q3,<h5=q12,<_0x1000000=q3
  4345. vadd.i64 q3,q12,q3
  4346. # qhasm: 2x h0 += s
  4347. # asm 1: vadd.i64 >h0=reg128#1,<h0=reg128#1,<s=reg128#15
  4348. # asm 2: vadd.i64 >h0=q0,<h0=q0,<s=q14
  4349. vadd.i64 q0,q0,q14
  4350. # qhasm: mem64[posh] aligned= h2[0];posh+=8
  4351. # asm 1: vst1.8 <h2=reg128#7%bot,[<posh=int32#3,: 64]!
  4352. # asm 2: vst1.8 <h2=d12,[<posh=r2,: 64]!
  4353. vst1.8 d12,[r2,: 64]!
  4354. # qhasm: 2x s = c9 << 4
  4355. # asm 1: vshl.i64 >s=reg128#8,<c9=reg128#14,#4
  4356. # asm 2: vshl.i64 >s=q7,<c9=q13,#4
  4357. vshl.i64 q7,q13,#4
  4358. # qhasm: mem64[posH] aligned= h2[1];posH+=8
  4359. # asm 1: vst1.8 <h2=reg128#7%top,[<posH=int32#5,: 64]!
  4360. # asm 2: vst1.8 <h2=d13,[<posH=r4,: 64]!
  4361. vst1.8 d13,[r4,: 64]!
  4362. # qhasm: 2x h4 -= t4
  4363. # asm 1: vsub.i64 >h4=reg128#2,<h4=reg128#2,<t4=reg128#9
  4364. # asm 2: vsub.i64 >h4=q1,<h4=q1,<t4=q8
  4365. vsub.i64 q1,q1,q8
  4366. # qhasm: 2x c5 = t5 signed>> 25
  4367. # asm 1: vshr.s64 >c5=reg128#4,<t5=reg128#4,#25
  4368. # asm 2: vshr.s64 >c5=q3,<t5=q3,#25
  4369. vshr.s64 q3,q3,#25
  4370. # qhasm: 2x h0 += s
  4371. # asm 1: vadd.i64 >h0=reg128#1,<h0=reg128#1,<s=reg128#8
  4372. # asm 2: vadd.i64 >h0=q0,<h0=q0,<s=q7
  4373. vadd.i64 q0,q0,q7
  4374. # qhasm: 2x h6 += c5
  4375. # asm 1: vadd.i64 >h6=reg128#6,<h6=reg128#6,<c5=reg128#4
  4376. # asm 2: vadd.i64 >h6=q5,<h6=q5,<c5=q3
  4377. vadd.i64 q5,q5,q3
  4378. # qhasm: 2x t5 = c5 << 25
  4379. # asm 1: vshl.i64 >t5=reg128#4,<c5=reg128#4,#25
  4380. # asm 2: vshl.i64 >t5=q3,<c5=q3,#25
  4381. vshl.i64 q3,q3,#25
  4382. # qhasm: 2x t6 = h6 + _0x2000000
  4383. # asm 1: vadd.i64 >t6=reg128#7,<h6=reg128#6,<_0x2000000=reg128#12
  4384. # asm 2: vadd.i64 >t6=q6,<h6=q5,<_0x2000000=q11
  4385. vadd.i64 q6,q5,q11
  4386. # qhasm: 2x h0 += c9
  4387. # asm 1: vadd.i64 >h0=reg128#1,<h0=reg128#1,<c9=reg128#14
  4388. # asm 2: vadd.i64 >h0=q0,<h0=q0,<c9=q13
  4389. vadd.i64 q0,q0,q13
  4390. # qhasm: 2x t9 = c9 << 25
  4391. # asm 1: vshl.i64 >t9=reg128#8,<c9=reg128#14,#25
  4392. # asm 2: vshl.i64 >t9=q7,<c9=q13,#25
  4393. vshl.i64 q7,q13,#25
  4394. # qhasm: 2x t0 = h0 + _0x2000000
  4395. # asm 1: vadd.i64 >t0=reg128#9,<h0=reg128#1,<_0x2000000=reg128#12
  4396. # asm 2: vadd.i64 >t0=q8,<h0=q0,<_0x2000000=q11
  4397. vadd.i64 q8,q0,q11
  4398. # qhasm: 2x h5 -= t5
  4399. # asm 1: vsub.i64 >h5=reg128#4,<h5=reg128#13,<t5=reg128#4
  4400. # asm 2: vsub.i64 >h5=q3,<h5=q12,<t5=q3
  4401. vsub.i64 q3,q12,q3
  4402. # qhasm: 2x c6 = t6 signed>> 26
  4403. # asm 1: vshr.s64 >c6=reg128#7,<t6=reg128#7,#26
  4404. # asm 2: vshr.s64 >c6=q6,<t6=q6,#26
  4405. vshr.s64 q6,q6,#26
  4406. # qhasm: 2x h9 -= t9
  4407. # asm 1: vsub.i64 >h9=reg128#8,<h9=reg128#11,<t9=reg128#8
  4408. # asm 2: vsub.i64 >h9=q7,<h9=q10,<t9=q7
  4409. vsub.i64 q7,q10,q7
  4410. # qhasm: h4 h5 = h4[0]h5[0]h4[2]h4[3] h4[1]h5[1]h5[2]h5[3]
  4411. # asm 1: vtrn.32 <h4=reg128#2%bot,<h5=reg128#4%bot
  4412. # asm 2: vtrn.32 <h4=d2,<h5=d6
  4413. vtrn.32 d2,d6
  4414. # qhasm: 2x c0 = t0 signed>> 26
  4415. # asm 1: vshr.s64 >c0=reg128#9,<t0=reg128#9,#26
  4416. # asm 2: vshr.s64 >c0=q8,<t0=q8,#26
  4417. vshr.s64 q8,q8,#26
  4418. # qhasm: h4 h5 = h4[0]h4[1]h4[2]h5[2] h5[0]h5[1]h4[3]h5[3]
  4419. # asm 1: vtrn.32 <h4=reg128#2%top,<h5=reg128#4%top
  4420. # asm 2: vtrn.32 <h4=d3,<h5=d7
  4421. vtrn.32 d3,d7
  4422. # qhasm: 2x h7 += c6
  4423. # asm 1: vadd.i64 >h7=reg128#4,<h7=reg128#10,<c6=reg128#7
  4424. # asm 2: vadd.i64 >h7=q3,<h7=q9,<c6=q6
  4425. vadd.i64 q3,q9,q6
  4426. # qhasm: mem64[posh] aligned= h4[0]
  4427. # asm 1: vst1.8 <h4=reg128#2%bot,[<posh=int32#3,: 64]
  4428. # asm 2: vst1.8 <h4=d2,[<posh=r2,: 64]
  4429. vst1.8 d2,[r2,: 64]
  4430. # qhasm: 2x t6 = c6 << 26
  4431. # asm 1: vshl.i64 >t6=reg128#7,<c6=reg128#7,#26
  4432. # asm 2: vshl.i64 >t6=q6,<c6=q6,#26
  4433. vshl.i64 q6,q6,#26
  4434. # qhasm: mem64[posH] aligned= h4[1]
  4435. # asm 1: vst1.8 <h4=reg128#2%top,[<posH=int32#5,: 64]
  4436. # asm 2: vst1.8 <h4=d3,[<posH=r4,: 64]
  4437. vst1.8 d3,[r4,: 64]
  4438. # qhasm: 2x h1 += c0
  4439. # asm 1: vadd.i64 >h1=reg128#2,<h1=reg128#5,<c0=reg128#9
  4440. # asm 2: vadd.i64 >h1=q1,<h1=q4,<c0=q8
  4441. vadd.i64 q1,q4,q8
  4442. # qhasm: h8 h9 = h8[0]h9[0]h8[2]h8[3] h8[1]h9[1]h9[2]h9[3]
  4443. # asm 1: vtrn.32 <h8=reg128#3%bot,<h9=reg128#8%bot
  4444. # asm 2: vtrn.32 <h8=d4,<h9=d14
  4445. vtrn.32 d4,d14
  4446. # qhasm: 2x t0 = c0 << 26
  4447. # asm 1: vshl.i64 >t0=reg128#5,<c0=reg128#9,#26
  4448. # asm 2: vshl.i64 >t0=q4,<c0=q8,#26
  4449. vshl.i64 q4,q8,#26
  4450. # qhasm: h8 h9 = h8[0]h8[1]h8[2]h9[2] h9[0]h9[1]h8[3]h9[3]
  4451. # asm 1: vtrn.32 <h8=reg128#3%top,<h9=reg128#8%top
  4452. # asm 2: vtrn.32 <h8=d5,<h9=d15
  4453. vtrn.32 d5,d15
  4454. # qhasm: 2x h6 -= t6
  4455. # asm 1: vsub.i64 >h6=reg128#6,<h6=reg128#6,<t6=reg128#7
  4456. # asm 2: vsub.i64 >h6=q5,<h6=q5,<t6=q6
  4457. vsub.i64 q5,q5,q6
  4458. # qhasm: posh+=16
  4459. # asm 1: add >posh=int32#3,<posh=int32#3,#16
  4460. # asm 2: add >posh=r2,<posh=r2,#16
  4461. add r2,r2,#16
  4462. # qhasm: 2x h0 -= t0
  4463. # asm 1: vsub.i64 >h0=reg128#1,<h0=reg128#1,<t0=reg128#5
  4464. # asm 2: vsub.i64 >h0=q0,<h0=q0,<t0=q4
  4465. vsub.i64 q0,q0,q4
  4466. # qhasm: mem64[posh] aligned= h8[0]
  4467. # asm 1: vst1.8 <h8=reg128#3%bot,[<posh=int32#3,: 64]
  4468. # asm 2: vst1.8 <h8=d4,[<posh=r2,: 64]
  4469. vst1.8 d4,[r2,: 64]
  4470. # qhasm: posH+=16
  4471. # asm 1: add >posH=int32#5,<posH=int32#5,#16
  4472. # asm 2: add >posH=r4,<posH=r4,#16
  4473. add r4,r4,#16
  4474. # qhasm: mem64[posH] aligned= h8[1]
  4475. # asm 1: vst1.8 <h8=reg128#3%top,[<posH=int32#5,: 64]
  4476. # asm 2: vst1.8 <h8=d5,[<posH=r4,: 64]
  4477. vst1.8 d5,[r4,: 64]
  4478. # qhasm: h6 h7 = h6[0]h7[0]h6[2]h6[3] h6[1]h7[1]h7[2]h7[3]
  4479. # asm 1: vtrn.32 <h6=reg128#6%bot,<h7=reg128#4%bot
  4480. # asm 2: vtrn.32 <h6=d10,<h7=d6
  4481. vtrn.32 d10,d6
  4482. # qhasm: h6 h7 = h6[0]h6[1]h6[2]h7[2] h7[0]h7[1]h6[3]h7[3]
  4483. # asm 1: vtrn.32 <h6=reg128#6%top,<h7=reg128#4%top
  4484. # asm 2: vtrn.32 <h6=d11,<h7=d7
  4485. vtrn.32 d11,d7
  4486. # qhasm: posh-=8
  4487. # asm 1: sub >posh=int32#3,<posh=int32#3,#8
  4488. # asm 2: sub >posh=r2,<posh=r2,#8
  4489. sub r2,r2,#8
  4490. # qhasm: posH-=8
  4491. # asm 1: sub >posH=int32#5,<posH=int32#5,#8
  4492. # asm 2: sub >posH=r4,<posH=r4,#8
  4493. sub r4,r4,#8
  4494. # qhasm: h0 h1 = h0[0]h1[0]h0[2]h0[3] h0[1]h1[1]h1[2]h1[3]
  4495. # asm 1: vtrn.32 <h0=reg128#1%bot,<h1=reg128#2%bot
  4496. # asm 2: vtrn.32 <h0=d0,<h1=d2
  4497. vtrn.32 d0,d2
  4498. # qhasm: h0 h1 = h0[0]h0[1]h0[2]h1[2] h1[0]h1[1]h0[3]h1[3]
  4499. # asm 1: vtrn.32 <h0=reg128#1%top,<h1=reg128#2%top
  4500. # asm 2: vtrn.32 <h0=d1,<h1=d3
  4501. vtrn.32 d1,d3
  4502. # qhasm: mem64[posh] aligned= h6[0]
  4503. # asm 1: vst1.8 <h6=reg128#6%bot,[<posh=int32#3,: 64]
  4504. # asm 2: vst1.8 <h6=d10,[<posh=r2,: 64]
  4505. vst1.8 d10,[r2,: 64]
  4506. # qhasm: mem64[posH] aligned= h6[1]
  4507. # asm 1: vst1.8 <h6=reg128#6%top,[<posH=int32#5,: 64]
  4508. # asm 2: vst1.8 <h6=d11,[<posH=r4,: 64]
  4509. vst1.8 d11,[r4,: 64]
  4510. # qhasm: posh-=24
  4511. # asm 1: sub >posh=int32#3,<posh=int32#3,#24
  4512. # asm 2: sub >posh=r2,<posh=r2,#24
  4513. sub r2,r2,#24
  4514. # qhasm: posH-=24
  4515. # asm 1: sub >posH=int32#5,<posH=int32#5,#24
  4516. # asm 2: sub >posH=r4,<posH=r4,#24
  4517. sub r4,r4,#24
  4518. # qhasm: mem64[posh] aligned= h0[0]
  4519. # asm 1: vst1.8 <h0=reg128#1%bot,[<posh=int32#3,: 64]
  4520. # asm 2: vst1.8 <h0=d0,[<posh=r2,: 64]
  4521. vst1.8 d0,[r2,: 64]
  4522. # qhasm: mem64[posH] aligned= h0[1]
  4523. # asm 1: vst1.8 <h0=reg128#1%top,[<posH=int32#5,: 64]
  4524. # asm 2: vst1.8 <h0=d1,[<posH=r4,: 64]
  4525. vst1.8 d1,[r4,: 64]
  4526. # qhasm: ptr = &_19_19_38_38_stack
  4527. # asm 1: lea >ptr=int32#3,<_19_19_38_38_stack=stack128#3
  4528. # asm 2: lea >ptr=r2,<_19_19_38_38_stack=[sp,#544]
  4529. add r2,sp,#544
  4530. # qhasm: posf = playground1_ptr + 144
  4531. # asm 1: add >posf=int32#5,<playground1_ptr=int32#4,#144
  4532. # asm 2: add >posf=r4,<playground1_ptr=r3,#144
  4533. add r4,r3,#144
  4534. # qhasm: posF = playground1_ptr + 192
  4535. # asm 1: add >posF=int32#6,<playground1_ptr=int32#4,#192
  4536. # asm 2: add >posF=r5,<playground1_ptr=r3,#192
  4537. add r5,r3,#192
  4538. # qhasm: _19_19_38_38 aligned= mem128[ptr]
  4539. # asm 1: vld1.8 {>_19_19_38_38=reg128#1%bot->_19_19_38_38=reg128#1%top},[<ptr=int32#3,: 128]
  4540. # asm 2: vld1.8 {>_19_19_38_38=d0->_19_19_38_38=d1},[<ptr=r2,: 128]
  4541. vld1.8 {d0-d1},[r2,: 128]
  4542. # qhasm: fg01 aligned= mem128[posf];posf+=16
  4543. # asm 1: vld1.8 {>fg01=reg128#2%bot->fg01=reg128#2%top},[<posf=int32#5,: 128]!
  4544. # asm 2: vld1.8 {>fg01=d2->fg01=d3},[<posf=r4,: 128]!
  4545. vld1.8 {d2-d3},[r4,: 128]!
  4546. # qhasm: fg23 aligned= mem128[posF];posF+=16
  4547. # asm 1: vld1.8 {>fg23=reg128#3%bot->fg23=reg128#3%top},[<posF=int32#6,: 128]!
  4548. # asm 2: vld1.8 {>fg23=d4->fg23=d5},[<posF=r5,: 128]!
  4549. vld1.8 {d4-d5},[r5,: 128]!
  4550. # qhasm: fg01[0,1,2,3] fg23[0,1,2,3] = fg01[0]fg23[0]fg01[1]fg23[1] fg01[2]fg23[2]fg01[3]fg23[3]
  4551. # asm 1: vzip.i32 <fg01=reg128#2,<fg23=reg128#3
  4552. # asm 2: vzip.i32 <fg01=q1,<fg23=q2
  4553. vzip.i32 q1,q2
  4554. # qhasm: fg45 aligned= mem128[posf];posf+=16
  4555. # asm 1: vld1.8 {>fg45=reg128#4%bot->fg45=reg128#4%top},[<posf=int32#5,: 128]!
  4556. # asm 2: vld1.8 {>fg45=d6->fg45=d7},[<posf=r4,: 128]!
  4557. vld1.8 {d6-d7},[r4,: 128]!
  4558. # qhasm: fg67 aligned= mem128[posF];posF+=16
  4559. # asm 1: vld1.8 {>fg67=reg128#5%bot->fg67=reg128#5%top},[<posF=int32#6,: 128]!
  4560. # asm 2: vld1.8 {>fg67=d8->fg67=d9},[<posF=r5,: 128]!
  4561. vld1.8 {d8-d9},[r5,: 128]!
  4562. # qhasm: 4x fg01_2 = fg01 << 1
  4563. # asm 1: vshl.i32 >fg01_2=reg128#6,<fg01=reg128#2,#1
  4564. # asm 2: vshl.i32 >fg01_2=q5,<fg01=q1,#1
  4565. vshl.i32 q5,q1,#1
  4566. # qhasm: fg45[0,1,2,3] fg67[0,1,2,3] = fg45[0]fg67[0]fg45[1]fg67[1] fg45[2]fg67[2]fg45[3]fg67[3]
  4567. # asm 1: vzip.i32 <fg45=reg128#4,<fg67=reg128#5
  4568. # asm 2: vzip.i32 <fg45=q3,<fg67=q4
  4569. vzip.i32 q3,q4
  4570. # qhasm: 4x fg23_2 = fg23 << 1
  4571. # asm 1: vshl.i32 >fg23_2=reg128#7,<fg23=reg128#3,#1
  4572. # asm 2: vshl.i32 >fg23_2=q6,<fg23=q2,#1
  4573. vshl.i32 q6,q2,#1
  4574. # qhasm: new fg89
  4575. # qhasm: fg89 aligned= mem64[posf]fg89[1]
  4576. # asm 1: vld1.8 {<fg89=reg128#8%bot},[<posf=int32#5,: 64]
  4577. # asm 2: vld1.8 {<fg89=d14},[<posf=r4,: 64]
  4578. vld1.8 {d14},[r4,: 64]
  4579. # qhasm: 4x fg45_2 = fg45 << 1
  4580. # asm 1: vshl.i32 >fg45_2=reg128#9,<fg45=reg128#4,#1
  4581. # asm 2: vshl.i32 >fg45_2=q8,<fg45=q3,#1
  4582. vshl.i32 q8,q3,#1
  4583. # qhasm: fg89 aligned= fg89[0]mem64[posF]
  4584. # asm 1: vld1.8 {<fg89=reg128#8%top},[<posF=int32#6,: 64]
  4585. # asm 2: vld1.8 {<fg89=d15},[<posF=r5,: 64]
  4586. vld1.8 {d15},[r5,: 64]
  4587. # qhasm: 4x fg67_2 = fg67 << 1
  4588. # asm 1: vshl.i32 >fg67_2=reg128#10,<fg67=reg128#5,#1
  4589. # asm 2: vshl.i32 >fg67_2=q9,<fg67=q4,#1
  4590. vshl.i32 q9,q4,#1
  4591. # qhasm: fg45_19_38[0,1] = fg45_19_38[0,1];fg45_19_38[2] = fg45[2] * _19_19_38_38[2];fg45_19_38[3] = fg45[3] * _19_19_38_38[3]
  4592. # asm 1: vmul.i32 >fg45_19_38=reg128#11%top,<fg45=reg128#4%top,<_19_19_38_38=reg128#1%top
  4593. # asm 2: vmul.i32 >fg45_19_38=d21,<fg45=d7,<_19_19_38_38=d1
  4594. vmul.i32 d21,d7,d1
  4595. # qhasm: fg89 = fg89[0,2,1,3]
  4596. # asm 1: vtrn.32 <fg89=reg128#8%bot,<fg89=reg128#8%top
  4597. # asm 2: vtrn.32 <fg89=d14,<fg89=d15
  4598. vtrn.32 d14,d15
  4599. # qhasm: 4x fg67_19_38 = fg67 * _19_19_38_38
  4600. # asm 1: vmul.i32 >fg67_19_38=reg128#12,<fg67=reg128#5,<_19_19_38_38=reg128#1
  4601. # asm 2: vmul.i32 >fg67_19_38=q11,<fg67=q4,<_19_19_38_38=q0
  4602. vmul.i32 q11,q4,q0
  4603. # qhasm: 4x fg89_19_38 = fg89 * _19_19_38_38
  4604. # asm 1: vmul.i32 >fg89_19_38=reg128#1,<fg89=reg128#8,<_19_19_38_38=reg128#1
  4605. # asm 2: vmul.i32 >fg89_19_38=q0,<fg89=q7,<_19_19_38_38=q0
  4606. vmul.i32 q0,q7,q0
  4607. # qhasm: h0[0,1] = fg01[0] signed* fg01[0];h0[2,3] = fg01[1] signed* fg01[1]
  4608. # asm 1: vmull.s32 >h0=reg128#13,<fg01=reg128#2%bot,<fg01=reg128#2%bot
  4609. # asm 2: vmull.s32 >h0=q12,<fg01=d2,<fg01=d2
  4610. vmull.s32 q12,d2,d2
  4611. # qhasm: h0[0,1] += fg01_2[2] signed* fg89_19_38[2];h0[2,3] += fg01_2[3] signed* fg89_19_38[3]
  4612. # asm 1: vmlal.s32 <h0=reg128#13,<fg01_2=reg128#6%top,<fg89_19_38=reg128#1%top
  4613. # asm 2: vmlal.s32 <h0=q12,<fg01_2=d11,<fg89_19_38=d1
  4614. vmlal.s32 q12,d11,d1
  4615. # qhasm: h0[0,1] += fg23_2[0] signed* fg89_19_38[0];h0[2,3] += fg23_2[1] signed* fg89_19_38[1]
  4616. # asm 1: vmlal.s32 <h0=reg128#13,<fg23_2=reg128#7%bot,<fg89_19_38=reg128#1%bot
  4617. # asm 2: vmlal.s32 <h0=q12,<fg23_2=d12,<fg89_19_38=d0
  4618. vmlal.s32 q12,d12,d0
  4619. # qhasm: h0[0,1] += fg23_2[2] signed* fg67_19_38[2];h0[2,3] += fg23_2[3] signed* fg67_19_38[3]
  4620. # asm 1: vmlal.s32 <h0=reg128#13,<fg23_2=reg128#7%top,<fg67_19_38=reg128#12%top
  4621. # asm 2: vmlal.s32 <h0=q12,<fg23_2=d13,<fg67_19_38=d23
  4622. vmlal.s32 q12,d13,d23
  4623. # qhasm: h0[0,1] += fg45_2[0] signed* fg67_19_38[0];h0[2,3] += fg45_2[1] signed* fg67_19_38[1]
  4624. # asm 1: vmlal.s32 <h0=reg128#13,<fg45_2=reg128#9%bot,<fg67_19_38=reg128#12%bot
  4625. # asm 2: vmlal.s32 <h0=q12,<fg45_2=d16,<fg67_19_38=d22
  4626. vmlal.s32 q12,d16,d22
  4627. # qhasm: h0[0,1] += fg45[2] signed* fg45_19_38[2];h0[2,3] += fg45[3] signed* fg45_19_38[3]
  4628. # asm 1: vmlal.s32 <h0=reg128#13,<fg45=reg128#4%top,<fg45_19_38=reg128#11%top
  4629. # asm 2: vmlal.s32 <h0=q12,<fg45=d7,<fg45_19_38=d21
  4630. vmlal.s32 q12,d7,d21
  4631. # qhasm: h1[0,1] = fg01[0] signed* fg01_2[2];h1[2,3] = fg01[1] signed* fg01_2[3]
  4632. # asm 1: vmull.s32 >h1=reg128#11,<fg01=reg128#2%bot,<fg01_2=reg128#6%top
  4633. # asm 2: vmull.s32 >h1=q10,<fg01=d2,<fg01_2=d11
  4634. vmull.s32 q10,d2,d11
  4635. # qhasm: h1[0,1] += fg23[0] signed* fg89_19_38[2];h1[2,3] += fg23[1] signed* fg89_19_38[3]
  4636. # asm 1: vmlal.s32 <h1=reg128#11,<fg23=reg128#3%bot,<fg89_19_38=reg128#1%top
  4637. # asm 2: vmlal.s32 <h1=q10,<fg23=d4,<fg89_19_38=d1
  4638. vmlal.s32 q10,d4,d1
  4639. # qhasm: h1[0,1] += fg23_2[2] signed* fg89_19_38[0];h1[2,3] += fg23_2[3] signed* fg89_19_38[1]
  4640. # asm 1: vmlal.s32 <h1=reg128#11,<fg23_2=reg128#7%top,<fg89_19_38=reg128#1%bot
  4641. # asm 2: vmlal.s32 <h1=q10,<fg23_2=d13,<fg89_19_38=d0
  4642. vmlal.s32 q10,d13,d0
  4643. # qhasm: h1[0,1] += fg45[0] signed* fg67_19_38[2];h1[2,3] += fg45[1] signed* fg67_19_38[3]
  4644. # asm 1: vmlal.s32 <h1=reg128#11,<fg45=reg128#4%bot,<fg67_19_38=reg128#12%top
  4645. # asm 2: vmlal.s32 <h1=q10,<fg45=d6,<fg67_19_38=d23
  4646. vmlal.s32 q10,d6,d23
  4647. # qhasm: h1[0,1] += fg45_2[2] signed* fg67_19_38[0];h1[2,3] += fg45_2[3] signed* fg67_19_38[1]
  4648. # asm 1: vmlal.s32 <h1=reg128#11,<fg45_2=reg128#9%top,<fg67_19_38=reg128#12%bot
  4649. # asm 2: vmlal.s32 <h1=q10,<fg45_2=d17,<fg67_19_38=d22
  4650. vmlal.s32 q10,d17,d22
  4651. # qhasm: h2[0,1] = fg01_2[0] signed* fg23[0];h2[2,3] = fg01_2[1] signed* fg23[1]
  4652. # asm 1: vmull.s32 >h2=reg128#14,<fg01_2=reg128#6%bot,<fg23=reg128#3%bot
  4653. # asm 2: vmull.s32 >h2=q13,<fg01_2=d10,<fg23=d4
  4654. vmull.s32 q13,d10,d4
  4655. # qhasm: h2[0,1] += fg01_2[2] signed* fg01[2];h2[2,3] += fg01_2[3] signed* fg01[3]
  4656. # asm 1: vmlal.s32 <h2=reg128#14,<fg01_2=reg128#6%top,<fg01=reg128#2%top
  4657. # asm 2: vmlal.s32 <h2=q13,<fg01_2=d11,<fg01=d3
  4658. vmlal.s32 q13,d11,d3
  4659. # qhasm: h2[0,1] += fg23_2[2] signed* fg89_19_38[2];h2[2,3] += fg23_2[3] signed* fg89_19_38[3]
  4660. # asm 1: vmlal.s32 <h2=reg128#14,<fg23_2=reg128#7%top,<fg89_19_38=reg128#1%top
  4661. # asm 2: vmlal.s32 <h2=q13,<fg23_2=d13,<fg89_19_38=d1
  4662. vmlal.s32 q13,d13,d1
  4663. # qhasm: h2[0,1] += fg45_2[0] signed* fg89_19_38[0];h2[2,3] += fg45_2[1] signed* fg89_19_38[1]
  4664. # asm 1: vmlal.s32 <h2=reg128#14,<fg45_2=reg128#9%bot,<fg89_19_38=reg128#1%bot
  4665. # asm 2: vmlal.s32 <h2=q13,<fg45_2=d16,<fg89_19_38=d0
  4666. vmlal.s32 q13,d16,d0
  4667. # qhasm: h2[0,1] += fg45_2[2] signed* fg67_19_38[2];h2[2,3] += fg45_2[3] signed* fg67_19_38[3]
  4668. # asm 1: vmlal.s32 <h2=reg128#14,<fg45_2=reg128#9%top,<fg67_19_38=reg128#12%top
  4669. # asm 2: vmlal.s32 <h2=q13,<fg45_2=d17,<fg67_19_38=d23
  4670. vmlal.s32 q13,d17,d23
  4671. # qhasm: h2[0,1] += fg67[0] signed* fg67_19_38[0];h2[2,3] += fg67[1] signed* fg67_19_38[1]
  4672. # asm 1: vmlal.s32 <h2=reg128#14,<fg67=reg128#5%bot,<fg67_19_38=reg128#12%bot
  4673. # asm 2: vmlal.s32 <h2=q13,<fg67=d8,<fg67_19_38=d22
  4674. vmlal.s32 q13,d8,d22
  4675. # qhasm: h3[0,1] = fg01_2[0] signed* fg23[2];h3[2,3] = fg01_2[1] signed* fg23[3]
  4676. # asm 1: vmull.s32 >h3=reg128#2,<fg01_2=reg128#6%bot,<fg23=reg128#3%top
  4677. # asm 2: vmull.s32 >h3=q1,<fg01_2=d10,<fg23=d5
  4678. vmull.s32 q1,d10,d5
  4679. # qhasm: h3[0,1] += fg01_2[2] signed* fg23[0];h3[2,3] += fg01_2[3] signed* fg23[1]
  4680. # asm 1: vmlal.s32 <h3=reg128#2,<fg01_2=reg128#6%top,<fg23=reg128#3%bot
  4681. # asm 2: vmlal.s32 <h3=q1,<fg01_2=d11,<fg23=d4
  4682. vmlal.s32 q1,d11,d4
  4683. # qhasm: h3[0,1] += fg45[0] signed* fg89_19_38[2];h3[2,3] += fg45[1] signed* fg89_19_38[3]
  4684. # asm 1: vmlal.s32 <h3=reg128#2,<fg45=reg128#4%bot,<fg89_19_38=reg128#1%top
  4685. # asm 2: vmlal.s32 <h3=q1,<fg45=d6,<fg89_19_38=d1
  4686. vmlal.s32 q1,d6,d1
  4687. # qhasm: h3[0,1] += fg45_2[2] signed* fg89_19_38[0];h3[2,3] += fg45_2[3] signed* fg89_19_38[1]
  4688. # asm 1: vmlal.s32 <h3=reg128#2,<fg45_2=reg128#9%top,<fg89_19_38=reg128#1%bot
  4689. # asm 2: vmlal.s32 <h3=q1,<fg45_2=d17,<fg89_19_38=d0
  4690. vmlal.s32 q1,d17,d0
  4691. # qhasm: h3[0,1] += fg67[0] signed* fg67_19_38[2];h3[2,3] += fg67[1] signed* fg67_19_38[3]
  4692. # asm 1: vmlal.s32 <h3=reg128#2,<fg67=reg128#5%bot,<fg67_19_38=reg128#12%top
  4693. # asm 2: vmlal.s32 <h3=q1,<fg67=d8,<fg67_19_38=d23
  4694. vmlal.s32 q1,d8,d23
  4695. # qhasm: h4[0,1] = fg01_2[0] signed* fg45[0];h4[2,3] = fg01_2[1] signed* fg45[1]
  4696. # asm 1: vmull.s32 >h4=reg128#15,<fg01_2=reg128#6%bot,<fg45=reg128#4%bot
  4697. # asm 2: vmull.s32 >h4=q14,<fg01_2=d10,<fg45=d6
  4698. vmull.s32 q14,d10,d6
  4699. # qhasm: h4[0,1] += fg01_2[2] signed* fg23_2[2];h4[2,3] += fg01_2[3] signed* fg23_2[3]
  4700. # asm 1: vmlal.s32 <h4=reg128#15,<fg01_2=reg128#6%top,<fg23_2=reg128#7%top
  4701. # asm 2: vmlal.s32 <h4=q14,<fg01_2=d11,<fg23_2=d13
  4702. vmlal.s32 q14,d11,d13
  4703. # qhasm: h4[0,1] += fg23[0] signed* fg23[0];h4[2,3] += fg23[1] signed* fg23[1]
  4704. # asm 1: vmlal.s32 <h4=reg128#15,<fg23=reg128#3%bot,<fg23=reg128#3%bot
  4705. # asm 2: vmlal.s32 <h4=q14,<fg23=d4,<fg23=d4
  4706. vmlal.s32 q14,d4,d4
  4707. # qhasm: h4[0,1] += fg45_2[2] signed* fg89_19_38[2];h4[2,3] += fg45_2[3] signed* fg89_19_38[3]
  4708. # asm 1: vmlal.s32 <h4=reg128#15,<fg45_2=reg128#9%top,<fg89_19_38=reg128#1%top
  4709. # asm 2: vmlal.s32 <h4=q14,<fg45_2=d17,<fg89_19_38=d1
  4710. vmlal.s32 q14,d17,d1
  4711. # qhasm: h4[0,1] += fg67_2[0] signed* fg89_19_38[0];h4[2,3] += fg67_2[1] signed* fg89_19_38[1]
  4712. # asm 1: vmlal.s32 <h4=reg128#15,<fg67_2=reg128#10%bot,<fg89_19_38=reg128#1%bot
  4713. # asm 2: vmlal.s32 <h4=q14,<fg67_2=d18,<fg89_19_38=d0
  4714. vmlal.s32 q14,d18,d0
  4715. # qhasm: h4[0,1] += fg67[2] signed* fg67_19_38[2];h4[2,3] += fg67[3] signed* fg67_19_38[3]
  4716. # asm 1: vmlal.s32 <h4=reg128#15,<fg67=reg128#5%top,<fg67_19_38=reg128#12%top
  4717. # asm 2: vmlal.s32 <h4=q14,<fg67=d9,<fg67_19_38=d23
  4718. vmlal.s32 q14,d9,d23
  4719. # qhasm: h5[0,1] = fg01_2[0] signed* fg45[2];h5[2,3] = fg01_2[1] signed* fg45[3]
  4720. # asm 1: vmull.s32 >h5=reg128#12,<fg01_2=reg128#6%bot,<fg45=reg128#4%top
  4721. # asm 2: vmull.s32 >h5=q11,<fg01_2=d10,<fg45=d7
  4722. vmull.s32 q11,d10,d7
  4723. # qhasm: h5[0,1] += fg01_2[2] signed* fg45[0];h5[2,3] += fg01_2[3] signed* fg45[1]
  4724. # asm 1: vmlal.s32 <h5=reg128#12,<fg01_2=reg128#6%top,<fg45=reg128#4%bot
  4725. # asm 2: vmlal.s32 <h5=q11,<fg01_2=d11,<fg45=d6
  4726. vmlal.s32 q11,d11,d6
  4727. # qhasm: h5[0,1] += fg23_2[0] signed* fg23[2];h5[2,3] += fg23_2[1] signed* fg23[3]
  4728. # asm 1: vmlal.s32 <h5=reg128#12,<fg23_2=reg128#7%bot,<fg23=reg128#3%top
  4729. # asm 2: vmlal.s32 <h5=q11,<fg23_2=d12,<fg23=d5
  4730. vmlal.s32 q11,d12,d5
  4731. # qhasm: h5[0,1] += fg67[0] signed* fg89_19_38[2];h5[2,3] += fg67[1] signed* fg89_19_38[3]
  4732. # asm 1: vmlal.s32 <h5=reg128#12,<fg67=reg128#5%bot,<fg89_19_38=reg128#1%top
  4733. # asm 2: vmlal.s32 <h5=q11,<fg67=d8,<fg89_19_38=d1
  4734. vmlal.s32 q11,d8,d1
  4735. # qhasm: h5[0,1] += fg67_2[2] signed* fg89_19_38[0];h5[2,3] += fg67_2[3] signed* fg89_19_38[1]
  4736. # asm 1: vmlal.s32 <h5=reg128#12,<fg67_2=reg128#10%top,<fg89_19_38=reg128#1%bot
  4737. # asm 2: vmlal.s32 <h5=q11,<fg67_2=d19,<fg89_19_38=d0
  4738. vmlal.s32 q11,d19,d0
  4739. # qhasm: h6[0,1] = fg01_2[0] signed* fg67[0];h6[2,3] = fg01_2[1] signed* fg67[1]
  4740. # asm 1: vmull.s32 >h6=reg128#16,<fg01_2=reg128#6%bot,<fg67=reg128#5%bot
  4741. # asm 2: vmull.s32 >h6=q15,<fg01_2=d10,<fg67=d8
  4742. vmull.s32 q15,d10,d8
  4743. # qhasm: h6[0,1] += fg01_2[2] signed* fg45_2[2];h6[2,3] += fg01_2[3] signed* fg45_2[3]
  4744. # asm 1: vmlal.s32 <h6=reg128#16,<fg01_2=reg128#6%top,<fg45_2=reg128#9%top
  4745. # asm 2: vmlal.s32 <h6=q15,<fg01_2=d11,<fg45_2=d17
  4746. vmlal.s32 q15,d11,d17
  4747. # qhasm: h6[0,1] += fg23_2[0] signed* fg45[0];h6[2,3] += fg23_2[1] signed* fg45[1]
  4748. # asm 1: vmlal.s32 <h6=reg128#16,<fg23_2=reg128#7%bot,<fg45=reg128#4%bot
  4749. # asm 2: vmlal.s32 <h6=q15,<fg23_2=d12,<fg45=d6
  4750. vmlal.s32 q15,d12,d6
  4751. # qhasm: h6[0,1] += fg23_2[2] signed* fg23[2];h6[2,3] += fg23_2[3] signed* fg23[3]
  4752. # asm 1: vmlal.s32 <h6=reg128#16,<fg23_2=reg128#7%top,<fg23=reg128#3%top
  4753. # asm 2: vmlal.s32 <h6=q15,<fg23_2=d13,<fg23=d5
  4754. vmlal.s32 q15,d13,d5
  4755. # qhasm: h6[0,1] += fg67_2[2] signed* fg89_19_38[2];h6[2,3] += fg67_2[3] signed* fg89_19_38[3]
  4756. # asm 1: vmlal.s32 <h6=reg128#16,<fg67_2=reg128#10%top,<fg89_19_38=reg128#1%top
  4757. # asm 2: vmlal.s32 <h6=q15,<fg67_2=d19,<fg89_19_38=d1
  4758. vmlal.s32 q15,d19,d1
  4759. # qhasm: h6[0,1] += fg89[0] signed* fg89_19_38[0];h6[2,3] += fg89[1] signed* fg89_19_38[1]
  4760. # asm 1: vmlal.s32 <h6=reg128#16,<fg89=reg128#8%bot,<fg89_19_38=reg128#1%bot
  4761. # asm 2: vmlal.s32 <h6=q15,<fg89=d14,<fg89_19_38=d0
  4762. vmlal.s32 q15,d14,d0
  4763. # qhasm: h7[0,1] = fg01_2[0] signed* fg67[2];h7[2,3] = fg01_2[1] signed* fg67[3]
  4764. # asm 1: vmull.s32 >h7=reg128#3,<fg01_2=reg128#6%bot,<fg67=reg128#5%top
  4765. # asm 2: vmull.s32 >h7=q2,<fg01_2=d10,<fg67=d9
  4766. vmull.s32 q2,d10,d9
  4767. # qhasm: h7[0,1] += fg01_2[2] signed* fg67[0];h7[2,3] += fg01_2[3] signed* fg67[1]
  4768. # asm 1: vmlal.s32 <h7=reg128#3,<fg01_2=reg128#6%top,<fg67=reg128#5%bot
  4769. # asm 2: vmlal.s32 <h7=q2,<fg01_2=d11,<fg67=d8
  4770. vmlal.s32 q2,d11,d8
  4771. # qhasm: h7[0,1] += fg23_2[0] signed* fg45[2];h7[2,3] += fg23_2[1] signed* fg45[3]
  4772. # asm 1: vmlal.s32 <h7=reg128#3,<fg23_2=reg128#7%bot,<fg45=reg128#4%top
  4773. # asm 2: vmlal.s32 <h7=q2,<fg23_2=d12,<fg45=d7
  4774. vmlal.s32 q2,d12,d7
  4775. # qhasm: h7[0,1] += fg23_2[2] signed* fg45[0];h7[2,3] += fg23_2[3] signed* fg45[1]
  4776. # asm 1: vmlal.s32 <h7=reg128#3,<fg23_2=reg128#7%top,<fg45=reg128#4%bot
  4777. # asm 2: vmlal.s32 <h7=q2,<fg23_2=d13,<fg45=d6
  4778. vmlal.s32 q2,d13,d6
  4779. # qhasm: h7[0,1] += fg89[0] signed* fg89_19_38[2];h7[2,3] += fg89[1] signed* fg89_19_38[3]
  4780. # asm 1: vmlal.s32 <h7=reg128#3,<fg89=reg128#8%bot,<fg89_19_38=reg128#1%top
  4781. # asm 2: vmlal.s32 <h7=q2,<fg89=d14,<fg89_19_38=d1
  4782. vmlal.s32 q2,d14,d1
  4783. # qhasm: h8[0,1] = fg89[2] signed* fg89_19_38[2];h8[2,3] = fg89[3] signed* fg89_19_38[3]
  4784. # asm 1: vmull.s32 >h8=reg128#1,<fg89=reg128#8%top,<fg89_19_38=reg128#1%top
  4785. # asm 2: vmull.s32 >h8=q0,<fg89=d15,<fg89_19_38=d1
  4786. vmull.s32 q0,d15,d1
  4787. # qhasm: h8[0,1] += fg01_2[0] signed* fg89[0];h8[2,3] += fg01_2[1] signed* fg89[1]
  4788. # asm 1: vmlal.s32 <h8=reg128#1,<fg01_2=reg128#6%bot,<fg89=reg128#8%bot
  4789. # asm 2: vmlal.s32 <h8=q0,<fg01_2=d10,<fg89=d14
  4790. vmlal.s32 q0,d10,d14
  4791. # qhasm: h8[0,1] += fg01_2[2] signed* fg67_2[2];h8[2,3] += fg01_2[3] signed* fg67_2[3]
  4792. # asm 1: vmlal.s32 <h8=reg128#1,<fg01_2=reg128#6%top,<fg67_2=reg128#10%top
  4793. # asm 2: vmlal.s32 <h8=q0,<fg01_2=d11,<fg67_2=d19
  4794. vmlal.s32 q0,d11,d19
  4795. # qhasm: h8[0,1] += fg23_2[0] signed* fg67[0];h8[2,3] += fg23_2[1] signed* fg67[1]
  4796. # asm 1: vmlal.s32 <h8=reg128#1,<fg23_2=reg128#7%bot,<fg67=reg128#5%bot
  4797. # asm 2: vmlal.s32 <h8=q0,<fg23_2=d12,<fg67=d8
  4798. vmlal.s32 q0,d12,d8
  4799. # qhasm: h8[0,1] += fg23_2[2] signed* fg45_2[2];h8[2,3] += fg23_2[3] signed* fg45_2[3]
  4800. # asm 1: vmlal.s32 <h8=reg128#1,<fg23_2=reg128#7%top,<fg45_2=reg128#9%top
  4801. # asm 2: vmlal.s32 <h8=q0,<fg23_2=d13,<fg45_2=d17
  4802. vmlal.s32 q0,d13,d17
  4803. # qhasm: h8[0,1] += fg45[0] signed* fg45[0];h8[2,3] += fg45[1] signed* fg45[1]
  4804. # asm 1: vmlal.s32 <h8=reg128#1,<fg45=reg128#4%bot,<fg45=reg128#4%bot
  4805. # asm 2: vmlal.s32 <h8=q0,<fg45=d6,<fg45=d6
  4806. vmlal.s32 q0,d6,d6
  4807. # qhasm: ptr = &_0x2000000_stack
  4808. # asm 1: lea >ptr=int32#3,<_0x2000000_stack=stack128#1
  4809. # asm 2: lea >ptr=r2,<_0x2000000_stack=[sp,#512]
  4810. add r2,sp,#512
  4811. # qhasm: _0x2000000 aligned= mem128[ptr]
  4812. # asm 1: vld1.8 {>_0x2000000=reg128#10%bot->_0x2000000=reg128#10%top},[<ptr=int32#3,: 128]
  4813. # asm 2: vld1.8 {>_0x2000000=d18->_0x2000000=d19},[<ptr=r2,: 128]
  4814. vld1.8 {d18-d19},[r2,: 128]
  4815. # qhasm: h9[0,1] = fg45_2[0] signed* fg45[2];h9[2,3] = fg45_2[1] signed* fg45[3]
  4816. # asm 1: vmull.s32 >h9=reg128#4,<fg45_2=reg128#9%bot,<fg45=reg128#4%top
  4817. # asm 2: vmull.s32 >h9=q3,<fg45_2=d16,<fg45=d7
  4818. vmull.s32 q3,d16,d7
  4819. # qhasm: h9[0,1] += fg01_2[0] signed* fg89[2];h9[2,3] += fg01_2[1] signed* fg89[3]
  4820. # asm 1: vmlal.s32 <h9=reg128#4,<fg01_2=reg128#6%bot,<fg89=reg128#8%top
  4821. # asm 2: vmlal.s32 <h9=q3,<fg01_2=d10,<fg89=d15
  4822. vmlal.s32 q3,d10,d15
  4823. # qhasm: h9[0,1] += fg01_2[2] signed* fg89[0];h9[2,3] += fg01_2[3] signed* fg89[1]
  4824. # asm 1: vmlal.s32 <h9=reg128#4,<fg01_2=reg128#6%top,<fg89=reg128#8%bot
  4825. # asm 2: vmlal.s32 <h9=q3,<fg01_2=d11,<fg89=d14
  4826. vmlal.s32 q3,d11,d14
  4827. # qhasm: h9[0,1] += fg23_2[0] signed* fg67[2];h9[2,3] += fg23_2[1] signed* fg67[3]
  4828. # asm 1: vmlal.s32 <h9=reg128#4,<fg23_2=reg128#7%bot,<fg67=reg128#5%top
  4829. # asm 2: vmlal.s32 <h9=q3,<fg23_2=d12,<fg67=d9
  4830. vmlal.s32 q3,d12,d9
  4831. # qhasm: h9[0,1] += fg23_2[2] signed* fg67[0];h9[2,3] += fg23_2[3] signed* fg67[1]
  4832. # asm 1: vmlal.s32 <h9=reg128#4,<fg23_2=reg128#7%top,<fg67=reg128#5%bot
  4833. # asm 2: vmlal.s32 <h9=q3,<fg23_2=d13,<fg67=d8
  4834. vmlal.s32 q3,d13,d8
  4835. # qhasm: ptr = &_0x1000000_stack
  4836. # asm 1: lea >ptr=int32#3,<_0x1000000_stack=stack128#2
  4837. # asm 2: lea >ptr=r2,<_0x1000000_stack=[sp,#528]
  4838. add r2,sp,#528
  4839. # qhasm: _0x1000000 aligned= mem128[ptr]
  4840. # asm 1: vld1.8 {>_0x1000000=reg128#5%bot->_0x1000000=reg128#5%top},[<ptr=int32#3,: 128]
  4841. # asm 2: vld1.8 {>_0x1000000=d8->_0x1000000=d9},[<ptr=r2,: 128]
  4842. vld1.8 {d8-d9},[r2,: 128]
  4843. # qhasm: 2x t0 = h0 + _0x2000000
  4844. # asm 1: vadd.i64 >t0=reg128#6,<h0=reg128#13,<_0x2000000=reg128#10
  4845. # asm 2: vadd.i64 >t0=q5,<h0=q12,<_0x2000000=q9
  4846. vadd.i64 q5,q12,q9
  4847. # qhasm: 2x t6 = h6 + _0x2000000
  4848. # asm 1: vadd.i64 >t6=reg128#7,<h6=reg128#16,<_0x2000000=reg128#10
  4849. # asm 2: vadd.i64 >t6=q6,<h6=q15,<_0x2000000=q9
  4850. vadd.i64 q6,q15,q9
  4851. # qhasm: 2x c0 = t0 signed>> 26
  4852. # asm 1: vshr.s64 >c0=reg128#6,<t0=reg128#6,#26
  4853. # asm 2: vshr.s64 >c0=q5,<t0=q5,#26
  4854. vshr.s64 q5,q5,#26
  4855. # qhasm: 2x c6 = t6 signed>> 26
  4856. # asm 1: vshr.s64 >c6=reg128#7,<t6=reg128#7,#26
  4857. # asm 2: vshr.s64 >c6=q6,<t6=q6,#26
  4858. vshr.s64 q6,q6,#26
  4859. # qhasm: 2x h1 += c0
  4860. # asm 1: vadd.i64 >h1=reg128#8,<h1=reg128#11,<c0=reg128#6
  4861. # asm 2: vadd.i64 >h1=q7,<h1=q10,<c0=q5
  4862. vadd.i64 q7,q10,q5
  4863. # qhasm: 2x t0 = c0 << 26
  4864. # asm 1: vshl.i64 >t0=reg128#6,<c0=reg128#6,#26
  4865. # asm 2: vshl.i64 >t0=q5,<c0=q5,#26
  4866. vshl.i64 q5,q5,#26
  4867. # qhasm: 2x t1 = h1 + _0x1000000
  4868. # asm 1: vadd.i64 >t1=reg128#9,<h1=reg128#8,<_0x1000000=reg128#5
  4869. # asm 2: vadd.i64 >t1=q8,<h1=q7,<_0x1000000=q4
  4870. vadd.i64 q8,q7,q4
  4871. # qhasm: 2x h7 += c6
  4872. # asm 1: vadd.i64 >h7=reg128#3,<h7=reg128#3,<c6=reg128#7
  4873. # asm 2: vadd.i64 >h7=q2,<h7=q2,<c6=q6
  4874. vadd.i64 q2,q2,q6
  4875. # qhasm: 2x t6 = c6 << 26
  4876. # asm 1: vshl.i64 >t6=reg128#7,<c6=reg128#7,#26
  4877. # asm 2: vshl.i64 >t6=q6,<c6=q6,#26
  4878. vshl.i64 q6,q6,#26
  4879. # qhasm: 2x t7 = h7 + _0x1000000
  4880. # asm 1: vadd.i64 >t7=reg128#11,<h7=reg128#3,<_0x1000000=reg128#5
  4881. # asm 2: vadd.i64 >t7=q10,<h7=q2,<_0x1000000=q4
  4882. vadd.i64 q10,q2,q4
  4883. # qhasm: 2x h0 -= t0
  4884. # asm 1: vsub.i64 >h0=reg128#6,<h0=reg128#13,<t0=reg128#6
  4885. # asm 2: vsub.i64 >h0=q5,<h0=q12,<t0=q5
  4886. vsub.i64 q5,q12,q5
  4887. # qhasm: 2x c1 = t1 signed>> 25
  4888. # asm 1: vshr.s64 >c1=reg128#9,<t1=reg128#9,#25
  4889. # asm 2: vshr.s64 >c1=q8,<t1=q8,#25
  4890. vshr.s64 q8,q8,#25
  4891. # qhasm: 2x h6 -= t6
  4892. # asm 1: vsub.i64 >h6=reg128#7,<h6=reg128#16,<t6=reg128#7
  4893. # asm 2: vsub.i64 >h6=q6,<h6=q15,<t6=q6
  4894. vsub.i64 q6,q15,q6
  4895. # qhasm: 2x c7 = t7 signed>> 25
  4896. # asm 1: vshr.s64 >c7=reg128#11,<t7=reg128#11,#25
  4897. # asm 2: vshr.s64 >c7=q10,<t7=q10,#25
  4898. vshr.s64 q10,q10,#25
  4899. # qhasm: 2x h2 += c1
  4900. # asm 1: vadd.i64 >h2=reg128#13,<h2=reg128#14,<c1=reg128#9
  4901. # asm 2: vadd.i64 >h2=q12,<h2=q13,<c1=q8
  4902. vadd.i64 q12,q13,q8
  4903. # qhasm: 2x t1 = c1 << 25
  4904. # asm 1: vshl.i64 >t1=reg128#9,<c1=reg128#9,#25
  4905. # asm 2: vshl.i64 >t1=q8,<c1=q8,#25
  4906. vshl.i64 q8,q8,#25
  4907. # qhasm: 2x t2 = h2 + _0x2000000
  4908. # asm 1: vadd.i64 >t2=reg128#14,<h2=reg128#13,<_0x2000000=reg128#10
  4909. # asm 2: vadd.i64 >t2=q13,<h2=q12,<_0x2000000=q9
  4910. vadd.i64 q13,q12,q9
  4911. # qhasm: 2x h8 += c7
  4912. # asm 1: vadd.i64 >h8=reg128#1,<h8=reg128#1,<c7=reg128#11
  4913. # asm 2: vadd.i64 >h8=q0,<h8=q0,<c7=q10
  4914. vadd.i64 q0,q0,q10
  4915. # qhasm: 2x h1 -= t1
  4916. # asm 1: vsub.i64 >h1=reg128#8,<h1=reg128#8,<t1=reg128#9
  4917. # asm 2: vsub.i64 >h1=q7,<h1=q7,<t1=q8
  4918. vsub.i64 q7,q7,q8
  4919. # qhasm: 2x c2 = t2 signed>> 26
  4920. # asm 1: vshr.s64 >c2=reg128#9,<t2=reg128#14,#26
  4921. # asm 2: vshr.s64 >c2=q8,<t2=q13,#26
  4922. vshr.s64 q8,q13,#26
  4923. # qhasm: 2x t7 = c7 << 25
  4924. # asm 1: vshl.i64 >t7=reg128#11,<c7=reg128#11,#25
  4925. # asm 2: vshl.i64 >t7=q10,<c7=q10,#25
  4926. vshl.i64 q10,q10,#25
  4927. # qhasm: 2x t8 = h8 + _0x2000000
  4928. # asm 1: vadd.i64 >t8=reg128#14,<h8=reg128#1,<_0x2000000=reg128#10
  4929. # asm 2: vadd.i64 >t8=q13,<h8=q0,<_0x2000000=q9
  4930. vadd.i64 q13,q0,q9
  4931. # qhasm: 2x h3 += c2
  4932. # asm 1: vadd.i64 >h3=reg128#2,<h3=reg128#2,<c2=reg128#9
  4933. # asm 2: vadd.i64 >h3=q1,<h3=q1,<c2=q8
  4934. vadd.i64 q1,q1,q8
  4935. # qhasm: 2x t2 = c2 << 26
  4936. # asm 1: vshl.i64 >t2=reg128#9,<c2=reg128#9,#26
  4937. # asm 2: vshl.i64 >t2=q8,<c2=q8,#26
  4938. vshl.i64 q8,q8,#26
  4939. # qhasm: 2x t3 = h3 + _0x1000000
  4940. # asm 1: vadd.i64 >t3=reg128#16,<h3=reg128#2,<_0x1000000=reg128#5
  4941. # asm 2: vadd.i64 >t3=q15,<h3=q1,<_0x1000000=q4
  4942. vadd.i64 q15,q1,q4
  4943. # qhasm: 2x h7 -= t7
  4944. # asm 1: vsub.i64 >h7=reg128#3,<h7=reg128#3,<t7=reg128#11
  4945. # asm 2: vsub.i64 >h7=q2,<h7=q2,<t7=q10
  4946. vsub.i64 q2,q2,q10
  4947. # qhasm: 2x c8 = t8 signed>> 26
  4948. # asm 1: vshr.s64 >c8=reg128#11,<t8=reg128#14,#26
  4949. # asm 2: vshr.s64 >c8=q10,<t8=q13,#26
  4950. vshr.s64 q10,q13,#26
  4951. # qhasm: 2x h2 -= t2
  4952. # asm 1: vsub.i64 >h2=reg128#9,<h2=reg128#13,<t2=reg128#9
  4953. # asm 2: vsub.i64 >h2=q8,<h2=q12,<t2=q8
  4954. vsub.i64 q8,q12,q8
  4955. # qhasm: 2x c3 = t3 signed>> 25
  4956. # asm 1: vshr.s64 >c3=reg128#13,<t3=reg128#16,#25
  4957. # asm 2: vshr.s64 >c3=q12,<t3=q15,#25
  4958. vshr.s64 q12,q15,#25
  4959. # qhasm: 2x h9 += c8
  4960. # asm 1: vadd.i64 >h9=reg128#4,<h9=reg128#4,<c8=reg128#11
  4961. # asm 2: vadd.i64 >h9=q3,<h9=q3,<c8=q10
  4962. vadd.i64 q3,q3,q10
  4963. # qhasm: 2x t8 = c8 << 26
  4964. # asm 1: vshl.i64 >t8=reg128#11,<c8=reg128#11,#26
  4965. # asm 2: vshl.i64 >t8=q10,<c8=q10,#26
  4966. vshl.i64 q10,q10,#26
  4967. # qhasm: 2x t9 = h9 + _0x1000000
  4968. # asm 1: vadd.i64 >t9=reg128#14,<h9=reg128#4,<_0x1000000=reg128#5
  4969. # asm 2: vadd.i64 >t9=q13,<h9=q3,<_0x1000000=q4
  4970. vadd.i64 q13,q3,q4
  4971. # qhasm: 2x h4 += c3
  4972. # asm 1: vadd.i64 >h4=reg128#15,<h4=reg128#15,<c3=reg128#13
  4973. # asm 2: vadd.i64 >h4=q14,<h4=q14,<c3=q12
  4974. vadd.i64 q14,q14,q12
  4975. # qhasm: posh = playground1_ptr + 144
  4976. # asm 1: add >posh=int32#3,<playground1_ptr=int32#4,#144
  4977. # asm 2: add >posh=r2,<playground1_ptr=r3,#144
  4978. add r2,r3,#144
  4979. # qhasm: 2x t3 = c3 << 25
  4980. # asm 1: vshl.i64 >t3=reg128#13,<c3=reg128#13,#25
  4981. # asm 2: vshl.i64 >t3=q12,<c3=q12,#25
  4982. vshl.i64 q12,q12,#25
  4983. # qhasm: posH = playground1_ptr + 192
  4984. # asm 1: add >posH=int32#5,<playground1_ptr=int32#4,#192
  4985. # asm 2: add >posH=r4,<playground1_ptr=r3,#192
  4986. add r4,r3,#192
  4987. # qhasm: 2x t4 = h4 + _0x2000000
  4988. # asm 1: vadd.i64 >t4=reg128#16,<h4=reg128#15,<_0x2000000=reg128#10
  4989. # asm 2: vadd.i64 >t4=q15,<h4=q14,<_0x2000000=q9
  4990. vadd.i64 q15,q14,q9
  4991. # qhasm: posh+=8
  4992. # asm 1: add >posh=int32#3,<posh=int32#3,#8
  4993. # asm 2: add >posh=r2,<posh=r2,#8
  4994. add r2,r2,#8
  4995. # qhasm: 2x h8 -= t8
  4996. # asm 1: vsub.i64 >h8=reg128#1,<h8=reg128#1,<t8=reg128#11
  4997. # asm 2: vsub.i64 >h8=q0,<h8=q0,<t8=q10
  4998. vsub.i64 q0,q0,q10
  4999. # qhasm: posH+=8
  5000. # asm 1: add >posH=int32#5,<posH=int32#5,#8
  5001. # asm 2: add >posH=r4,<posH=r4,#8
  5002. add r4,r4,#8
  5003. # qhasm: 2x c9 = t9 signed>> 25
  5004. # asm 1: vshr.s64 >c9=reg128#11,<t9=reg128#14,#25
  5005. # asm 2: vshr.s64 >c9=q10,<t9=q13,#25
  5006. vshr.s64 q10,q13,#25
  5007. # qhasm: 2x h3 -= t3
  5008. # asm 1: vsub.i64 >h3=reg128#2,<h3=reg128#2,<t3=reg128#13
  5009. # asm 2: vsub.i64 >h3=q1,<h3=q1,<t3=q12
  5010. vsub.i64 q1,q1,q12
  5011. # qhasm: 2x c4 = t4 signed>> 26
  5012. # asm 1: vshr.s64 >c4=reg128#13,<t4=reg128#16,#26
  5013. # asm 2: vshr.s64 >c4=q12,<t4=q15,#26
  5014. vshr.s64 q12,q15,#26
  5015. # qhasm: 2x s = c9 + c9
  5016. # asm 1: vadd.i64 >s=reg128#14,<c9=reg128#11,<c9=reg128#11
  5017. # asm 2: vadd.i64 >s=q13,<c9=q10,<c9=q10
  5018. vadd.i64 q13,q10,q10
  5019. # qhasm: 2x h5 += c4
  5020. # asm 1: vadd.i64 >h5=reg128#12,<h5=reg128#12,<c4=reg128#13
  5021. # asm 2: vadd.i64 >h5=q11,<h5=q11,<c4=q12
  5022. vadd.i64 q11,q11,q12
  5023. # qhasm: h2 h3 = h2[0]h3[0]h2[2]h2[3] h2[1]h3[1]h3[2]h3[3]
  5024. # asm 1: vtrn.32 <h2=reg128#9%bot,<h3=reg128#2%bot
  5025. # asm 2: vtrn.32 <h2=d16,<h3=d2
  5026. vtrn.32 d16,d2
  5027. # qhasm: 2x t4 = c4 << 26
  5028. # asm 1: vshl.i64 >t4=reg128#13,<c4=reg128#13,#26
  5029. # asm 2: vshl.i64 >t4=q12,<c4=q12,#26
  5030. vshl.i64 q12,q12,#26
  5031. # qhasm: h2 h3 = h2[0]h2[1]h2[2]h3[2] h3[0]h3[1]h2[3]h3[3]
  5032. # asm 1: vtrn.32 <h2=reg128#9%top,<h3=reg128#2%top
  5033. # asm 2: vtrn.32 <h2=d17,<h3=d3
  5034. vtrn.32 d17,d3
  5035. # qhasm: 2x t5 = h5 + _0x1000000
  5036. # asm 1: vadd.i64 >t5=reg128#2,<h5=reg128#12,<_0x1000000=reg128#5
  5037. # asm 2: vadd.i64 >t5=q1,<h5=q11,<_0x1000000=q4
  5038. vadd.i64 q1,q11,q4
  5039. # qhasm: 2x h0 += s
  5040. # asm 1: vadd.i64 >h0=reg128#5,<h0=reg128#6,<s=reg128#14
  5041. # asm 2: vadd.i64 >h0=q4,<h0=q5,<s=q13
  5042. vadd.i64 q4,q5,q13
  5043. # qhasm: mem64[posh] aligned= h2[0];posh+=8
  5044. # asm 1: vst1.8 <h2=reg128#9%bot,[<posh=int32#3,: 64]!
  5045. # asm 2: vst1.8 <h2=d16,[<posh=r2,: 64]!
  5046. vst1.8 d16,[r2,: 64]!
  5047. # qhasm: 2x s = c9 << 4
  5048. # asm 1: vshl.i64 >s=reg128#6,<c9=reg128#11,#4
  5049. # asm 2: vshl.i64 >s=q5,<c9=q10,#4
  5050. vshl.i64 q5,q10,#4
  5051. # qhasm: mem64[posH] aligned= h2[1];posH+=8
  5052. # asm 1: vst1.8 <h2=reg128#9%top,[<posH=int32#5,: 64]!
  5053. # asm 2: vst1.8 <h2=d17,[<posH=r4,: 64]!
  5054. vst1.8 d17,[r4,: 64]!
  5055. # qhasm: 2x h4 -= t4
  5056. # asm 1: vsub.i64 >h4=reg128#9,<h4=reg128#15,<t4=reg128#13
  5057. # asm 2: vsub.i64 >h4=q8,<h4=q14,<t4=q12
  5058. vsub.i64 q8,q14,q12
  5059. # qhasm: 2x c5 = t5 signed>> 25
  5060. # asm 1: vshr.s64 >c5=reg128#2,<t5=reg128#2,#25
  5061. # asm 2: vshr.s64 >c5=q1,<t5=q1,#25
  5062. vshr.s64 q1,q1,#25
  5063. # qhasm: 2x h0 += s
  5064. # asm 1: vadd.i64 >h0=reg128#5,<h0=reg128#5,<s=reg128#6
  5065. # asm 2: vadd.i64 >h0=q4,<h0=q4,<s=q5
  5066. vadd.i64 q4,q4,q5
  5067. # qhasm: 2x h6 += c5
  5068. # asm 1: vadd.i64 >h6=reg128#6,<h6=reg128#7,<c5=reg128#2
  5069. # asm 2: vadd.i64 >h6=q5,<h6=q6,<c5=q1
  5070. vadd.i64 q5,q6,q1
  5071. # qhasm: 2x t5 = c5 << 25
  5072. # asm 1: vshl.i64 >t5=reg128#2,<c5=reg128#2,#25
  5073. # asm 2: vshl.i64 >t5=q1,<c5=q1,#25
  5074. vshl.i64 q1,q1,#25
  5075. # qhasm: 2x t6 = h6 + _0x2000000
  5076. # asm 1: vadd.i64 >t6=reg128#7,<h6=reg128#6,<_0x2000000=reg128#10
  5077. # asm 2: vadd.i64 >t6=q6,<h6=q5,<_0x2000000=q9
  5078. vadd.i64 q6,q5,q9
  5079. # qhasm: 2x h0 += c9
  5080. # asm 1: vadd.i64 >h0=reg128#5,<h0=reg128#5,<c9=reg128#11
  5081. # asm 2: vadd.i64 >h0=q4,<h0=q4,<c9=q10
  5082. vadd.i64 q4,q4,q10
  5083. # qhasm: 2x t9 = c9 << 25
  5084. # asm 1: vshl.i64 >t9=reg128#11,<c9=reg128#11,#25
  5085. # asm 2: vshl.i64 >t9=q10,<c9=q10,#25
  5086. vshl.i64 q10,q10,#25
  5087. # qhasm: 2x t0 = h0 + _0x2000000
  5088. # asm 1: vadd.i64 >t0=reg128#10,<h0=reg128#5,<_0x2000000=reg128#10
  5089. # asm 2: vadd.i64 >t0=q9,<h0=q4,<_0x2000000=q9
  5090. vadd.i64 q9,q4,q9
  5091. # qhasm: 2x h5 -= t5
  5092. # asm 1: vsub.i64 >h5=reg128#2,<h5=reg128#12,<t5=reg128#2
  5093. # asm 2: vsub.i64 >h5=q1,<h5=q11,<t5=q1
  5094. vsub.i64 q1,q11,q1
  5095. # qhasm: 2x c6 = t6 signed>> 26
  5096. # asm 1: vshr.s64 >c6=reg128#7,<t6=reg128#7,#26
  5097. # asm 2: vshr.s64 >c6=q6,<t6=q6,#26
  5098. vshr.s64 q6,q6,#26
  5099. # qhasm: 2x h9 -= t9
  5100. # asm 1: vsub.i64 >h9=reg128#4,<h9=reg128#4,<t9=reg128#11
  5101. # asm 2: vsub.i64 >h9=q3,<h9=q3,<t9=q10
  5102. vsub.i64 q3,q3,q10
  5103. # qhasm: h4 h5 = h4[0]h5[0]h4[2]h4[3] h4[1]h5[1]h5[2]h5[3]
  5104. # asm 1: vtrn.32 <h4=reg128#9%bot,<h5=reg128#2%bot
  5105. # asm 2: vtrn.32 <h4=d16,<h5=d2
  5106. vtrn.32 d16,d2
  5107. # qhasm: 2x c0 = t0 signed>> 26
  5108. # asm 1: vshr.s64 >c0=reg128#10,<t0=reg128#10,#26
  5109. # asm 2: vshr.s64 >c0=q9,<t0=q9,#26
  5110. vshr.s64 q9,q9,#26
  5111. # qhasm: h4 h5 = h4[0]h4[1]h4[2]h5[2] h5[0]h5[1]h4[3]h5[3]
  5112. # asm 1: vtrn.32 <h4=reg128#9%top,<h5=reg128#2%top
  5113. # asm 2: vtrn.32 <h4=d17,<h5=d3
  5114. vtrn.32 d17,d3
  5115. # qhasm: 2x h7 += c6
  5116. # asm 1: vadd.i64 >h7=reg128#2,<h7=reg128#3,<c6=reg128#7
  5117. # asm 2: vadd.i64 >h7=q1,<h7=q2,<c6=q6
  5118. vadd.i64 q1,q2,q6
  5119. # qhasm: mem64[posh] aligned= h4[0]
  5120. # asm 1: vst1.8 <h4=reg128#9%bot,[<posh=int32#3,: 64]
  5121. # asm 2: vst1.8 <h4=d16,[<posh=r2,: 64]
  5122. vst1.8 d16,[r2,: 64]
  5123. # qhasm: 2x t6 = c6 << 26
  5124. # asm 1: vshl.i64 >t6=reg128#3,<c6=reg128#7,#26
  5125. # asm 2: vshl.i64 >t6=q2,<c6=q6,#26
  5126. vshl.i64 q2,q6,#26
  5127. # qhasm: mem64[posH] aligned= h4[1]
  5128. # asm 1: vst1.8 <h4=reg128#9%top,[<posH=int32#5,: 64]
  5129. # asm 2: vst1.8 <h4=d17,[<posH=r4,: 64]
  5130. vst1.8 d17,[r4,: 64]
  5131. # qhasm: 2x h1 += c0
  5132. # asm 1: vadd.i64 >h1=reg128#7,<h1=reg128#8,<c0=reg128#10
  5133. # asm 2: vadd.i64 >h1=q6,<h1=q7,<c0=q9
  5134. vadd.i64 q6,q7,q9
  5135. # qhasm: h8 h9 = h8[0]h9[0]h8[2]h8[3] h8[1]h9[1]h9[2]h9[3]
  5136. # asm 1: vtrn.32 <h8=reg128#1%bot,<h9=reg128#4%bot
  5137. # asm 2: vtrn.32 <h8=d0,<h9=d6
  5138. vtrn.32 d0,d6
  5139. # qhasm: 2x t0 = c0 << 26
  5140. # asm 1: vshl.i64 >t0=reg128#8,<c0=reg128#10,#26
  5141. # asm 2: vshl.i64 >t0=q7,<c0=q9,#26
  5142. vshl.i64 q7,q9,#26
  5143. # qhasm: h8 h9 = h8[0]h8[1]h8[2]h9[2] h9[0]h9[1]h8[3]h9[3]
  5144. # asm 1: vtrn.32 <h8=reg128#1%top,<h9=reg128#4%top
  5145. # asm 2: vtrn.32 <h8=d1,<h9=d7
  5146. vtrn.32 d1,d7
  5147. # qhasm: 2x h6 -= t6
  5148. # asm 1: vsub.i64 >h6=reg128#3,<h6=reg128#6,<t6=reg128#3
  5149. # asm 2: vsub.i64 >h6=q2,<h6=q5,<t6=q2
  5150. vsub.i64 q2,q5,q2
  5151. # qhasm: posh+=16
  5152. # asm 1: add >posh=int32#3,<posh=int32#3,#16
  5153. # asm 2: add >posh=r2,<posh=r2,#16
  5154. add r2,r2,#16
  5155. # qhasm: 2x h0 -= t0
  5156. # asm 1: vsub.i64 >h0=reg128#4,<h0=reg128#5,<t0=reg128#8
  5157. # asm 2: vsub.i64 >h0=q3,<h0=q4,<t0=q7
  5158. vsub.i64 q3,q4,q7
  5159. # qhasm: mem64[posh] aligned= h8[0]
  5160. # asm 1: vst1.8 <h8=reg128#1%bot,[<posh=int32#3,: 64]
  5161. # asm 2: vst1.8 <h8=d0,[<posh=r2,: 64]
  5162. vst1.8 d0,[r2,: 64]
  5163. # qhasm: posH+=16
  5164. # asm 1: add >posH=int32#5,<posH=int32#5,#16
  5165. # asm 2: add >posH=r4,<posH=r4,#16
  5166. add r4,r4,#16
  5167. # qhasm: mem64[posH] aligned= h8[1]
  5168. # asm 1: vst1.8 <h8=reg128#1%top,[<posH=int32#5,: 64]
  5169. # asm 2: vst1.8 <h8=d1,[<posH=r4,: 64]
  5170. vst1.8 d1,[r4,: 64]
  5171. # qhasm: h6 h7 = h6[0]h7[0]h6[2]h6[3] h6[1]h7[1]h7[2]h7[3]
  5172. # asm 1: vtrn.32 <h6=reg128#3%bot,<h7=reg128#2%bot
  5173. # asm 2: vtrn.32 <h6=d4,<h7=d2
  5174. vtrn.32 d4,d2
  5175. # qhasm: h6 h7 = h6[0]h6[1]h6[2]h7[2] h7[0]h7[1]h6[3]h7[3]
  5176. # asm 1: vtrn.32 <h6=reg128#3%top,<h7=reg128#2%top
  5177. # asm 2: vtrn.32 <h6=d5,<h7=d3
  5178. vtrn.32 d5,d3
  5179. # qhasm: posh-=8
  5180. # asm 1: sub >posh=int32#3,<posh=int32#3,#8
  5181. # asm 2: sub >posh=r2,<posh=r2,#8
  5182. sub r2,r2,#8
  5183. # qhasm: posH-=8
  5184. # asm 1: sub >posH=int32#5,<posH=int32#5,#8
  5185. # asm 2: sub >posH=r4,<posH=r4,#8
  5186. sub r4,r4,#8
  5187. # qhasm: h0 h1 = h0[0]h1[0]h0[2]h0[3] h0[1]h1[1]h1[2]h1[3]
  5188. # asm 1: vtrn.32 <h0=reg128#4%bot,<h1=reg128#7%bot
  5189. # asm 2: vtrn.32 <h0=d6,<h1=d12
  5190. vtrn.32 d6,d12
  5191. # qhasm: h0 h1 = h0[0]h0[1]h0[2]h1[2] h1[0]h1[1]h0[3]h1[3]
  5192. # asm 1: vtrn.32 <h0=reg128#4%top,<h1=reg128#7%top
  5193. # asm 2: vtrn.32 <h0=d7,<h1=d13
  5194. vtrn.32 d7,d13
  5195. # qhasm: mem64[posh] aligned= h6[0]
  5196. # asm 1: vst1.8 <h6=reg128#3%bot,[<posh=int32#3,: 64]
  5197. # asm 2: vst1.8 <h6=d4,[<posh=r2,: 64]
  5198. vst1.8 d4,[r2,: 64]
  5199. # qhasm: mem64[posH] aligned= h6[1]
  5200. # asm 1: vst1.8 <h6=reg128#3%top,[<posH=int32#5,: 64]
  5201. # asm 2: vst1.8 <h6=d5,[<posH=r4,: 64]
  5202. vst1.8 d5,[r4,: 64]
  5203. # qhasm: posh-=24
  5204. # asm 1: sub >posh=int32#3,<posh=int32#3,#24
  5205. # asm 2: sub >posh=r2,<posh=r2,#24
  5206. sub r2,r2,#24
  5207. # qhasm: posH-=24
  5208. # asm 1: sub >posH=int32#5,<posH=int32#5,#24
  5209. # asm 2: sub >posH=r4,<posH=r4,#24
  5210. sub r4,r4,#24
  5211. # qhasm: mem64[posh] aligned= h0[0]
  5212. # asm 1: vst1.8 <h0=reg128#4%bot,[<posh=int32#3,: 64]
  5213. # asm 2: vst1.8 <h0=d6,[<posh=r2,: 64]
  5214. vst1.8 d6,[r2,: 64]
  5215. # qhasm: mem64[posH] aligned= h0[1]
  5216. # asm 1: vst1.8 <h0=reg128#4%top,[<posH=int32#5,: 64]
  5217. # asm 2: vst1.8 <h0=d7,[<posH=r4,: 64]
  5218. vst1.8 d7,[r4,: 64]
  5219. # qhasm: pos1 = playground1_ptr + 336
  5220. # asm 1: add >pos1=int32#3,<playground1_ptr=int32#4,#336
  5221. # asm 2: add >pos1=r2,<playground1_ptr=r3,#336
  5222. add r2,r3,#336
  5223. # qhasm: pos2 = playground1_ptr + 288
  5224. # asm 1: add >pos2=int32#5,<playground1_ptr=int32#4,#288
  5225. # asm 2: add >pos2=r4,<playground1_ptr=r3,#288
  5226. add r4,r3,#288
  5227. # qhasm: f0 aligned= mem128[pos1];pos1 += 16
  5228. # asm 1: vld1.8 {>f0=reg128#1%bot->f0=reg128#1%top},[<pos1=int32#3,: 128]!
  5229. # asm 2: vld1.8 {>f0=d0->f0=d1},[<pos1=r2,: 128]!
  5230. vld1.8 {d0-d1},[r2,: 128]!
  5231. # qhasm: g0 aligned= mem128[pos2];pos2 += 16
  5232. # asm 1: vld1.8 {>g0=reg128#2%bot->g0=reg128#2%top},[<pos2=int32#5,: 128]!
  5233. # asm 2: vld1.8 {>g0=d2->g0=d3},[<pos2=r4,: 128]!
  5234. vld1.8 {d2-d3},[r4,: 128]!
  5235. # qhasm: 4x f0 += g0
  5236. # asm 1: vadd.i32 >f0=reg128#1,<f0=reg128#1,<g0=reg128#2
  5237. # asm 2: vadd.i32 >f0=q0,<f0=q0,<g0=q1
  5238. vadd.i32 q0,q0,q1
  5239. # qhasm: f4 aligned= mem128[pos1];pos1 += 16
  5240. # asm 1: vld1.8 {>f4=reg128#2%bot->f4=reg128#2%top},[<pos1=int32#3,: 128]!
  5241. # asm 2: vld1.8 {>f4=d2->f4=d3},[<pos1=r2,: 128]!
  5242. vld1.8 {d2-d3},[r2,: 128]!
  5243. # qhasm: g4 aligned= mem128[pos2];pos2 += 16
  5244. # asm 1: vld1.8 {>g4=reg128#3%bot->g4=reg128#3%top},[<pos2=int32#5,: 128]!
  5245. # asm 2: vld1.8 {>g4=d4->g4=d5},[<pos2=r4,: 128]!
  5246. vld1.8 {d4-d5},[r4,: 128]!
  5247. # qhasm: 4x f4 += g4
  5248. # asm 1: vadd.i32 >f4=reg128#2,<f4=reg128#2,<g4=reg128#3
  5249. # asm 2: vadd.i32 >f4=q1,<f4=q1,<g4=q2
  5250. vadd.i32 q1,q1,q2
  5251. # qhasm: pos0 = playground1_ptr + 288
  5252. # asm 1: add >pos0=int32#6,<playground1_ptr=int32#4,#288
  5253. # asm 2: add >pos0=r5,<playground1_ptr=r3,#288
  5254. add r5,r3,#288
  5255. # qhasm: new f8
  5256. # qhasm: new g8
  5257. # qhasm: f8 aligned= mem64[pos1] f8[1]
  5258. # asm 1: vld1.8 {<f8=reg128#3%bot},[<pos1=int32#3,: 64]
  5259. # asm 2: vld1.8 {<f8=d4},[<pos1=r2,: 64]
  5260. vld1.8 {d4},[r2,: 64]
  5261. # qhasm: g8 aligned= mem64[pos2] g8[1]
  5262. # asm 1: vld1.8 {<g8=reg128#4%bot},[<pos2=int32#5,: 64]
  5263. # asm 2: vld1.8 {<g8=d6},[<pos2=r4,: 64]
  5264. vld1.8 {d6},[r4,: 64]
  5265. # qhasm: 4x f8 += g8
  5266. # asm 1: vadd.i32 >f8=reg128#3,<f8=reg128#3,<g8=reg128#4
  5267. # asm 2: vadd.i32 >f8=q2,<f8=q2,<g8=q3
  5268. vadd.i32 q2,q2,q3
  5269. # qhasm: mem128[pos0] aligned= f0;pos0 += 16
  5270. # asm 1: vst1.8 {<f0=reg128#1%bot-<f0=reg128#1%top},[<pos0=int32#6,: 128]!
  5271. # asm 2: vst1.8 {<f0=d0-<f0=d1},[<pos0=r5,: 128]!
  5272. vst1.8 {d0-d1},[r5,: 128]!
  5273. # qhasm: mem128[pos0] aligned= f4;pos0 += 16
  5274. # asm 1: vst1.8 {<f4=reg128#2%bot-<f4=reg128#2%top},[<pos0=int32#6,: 128]!
  5275. # asm 2: vst1.8 {<f4=d2-<f4=d3},[<pos0=r5,: 128]!
  5276. vst1.8 {d2-d3},[r5,: 128]!
  5277. # qhasm: mem64[pos0] aligned= f8[0]
  5278. # asm 1: vst1.8 <f8=reg128#3%bot,[<pos0=int32#6,: 64]
  5279. # asm 2: vst1.8 <f8=d4,[<pos0=r5,: 64]
  5280. vst1.8 d4,[r5,: 64]
  5281. # qhasm: posf = playground1_ptr + 48
  5282. # asm 1: add >posf=int32#3,<playground1_ptr=int32#4,#48
  5283. # asm 2: add >posf=r2,<playground1_ptr=r3,#48
  5284. add r2,r3,#48
  5285. # qhasm: posg = playground1_ptr + 144
  5286. # asm 1: add >posg=int32#5,<playground1_ptr=int32#4,#144
  5287. # asm 2: add >posg=r4,<playground1_ptr=r3,#144
  5288. add r4,r3,#144
  5289. # qhasm: g02 aligned= mem128[posg];posg += 16
  5290. # asm 1: vld1.8 {>g02=reg128#1%bot->g02=reg128#1%top},[<posg=int32#5,: 128]!
  5291. # asm 2: vld1.8 {>g02=d0->g02=d1},[<posg=r4,: 128]!
  5292. vld1.8 {d0-d1},[r4,: 128]!
  5293. # qhasm: g46 aligned= mem128[posg];posg += 16
  5294. # asm 1: vld1.8 {>g46=reg128#2%bot->g46=reg128#2%top},[<posg=int32#5,: 128]!
  5295. # asm 2: vld1.8 {>g46=d2->g46=d3},[<posg=r4,: 128]!
  5296. vld1.8 {d2-d3},[r4,: 128]!
  5297. # qhasm: new g89
  5298. # qhasm: g89 aligned= mem64[posg] g89[1]
  5299. # asm 1: vld1.8 {<g89=reg128#3%bot},[<posg=int32#5,: 64]
  5300. # asm 2: vld1.8 {<g89=d4},[<posg=r4,: 64]
  5301. vld1.8 {d4},[r4,: 64]
  5302. # qhasm: posG = playground1_ptr + 288
  5303. # asm 1: add >posG=int32#5,<playground1_ptr=int32#4,#288
  5304. # asm 2: add >posG=r4,<playground1_ptr=r3,#288
  5305. add r4,r3,#288
  5306. # qhasm: g13 aligned= mem128[posG];posG += 16
  5307. # asm 1: vld1.8 {>g13=reg128#4%bot->g13=reg128#4%top},[<posG=int32#5,: 128]!
  5308. # asm 2: vld1.8 {>g13=d6->g13=d7},[<posG=r4,: 128]!
  5309. vld1.8 {d6-d7},[r4,: 128]!
  5310. # qhasm: g02 g13 = g02[0]g13[0] g02[2]g13[2] g02[1]g13[1] g02[3]g13[3]
  5311. # asm 1: vtrn.32 <g02=reg128#1,<g13=reg128#4
  5312. # asm 2: vtrn.32 <g02=q0,<g13=q3
  5313. vtrn.32 q0,q3
  5314. # qhasm: g57 aligned= mem128[posG];posG += 16
  5315. # asm 1: vld1.8 {>g57=reg128#5%bot->g57=reg128#5%top},[<posG=int32#5,: 128]!
  5316. # asm 2: vld1.8 {>g57=d8->g57=d9},[<posG=r4,: 128]!
  5317. vld1.8 {d8-d9},[r4,: 128]!
  5318. # qhasm: 4x mix = g02 << 4
  5319. # asm 1: vshl.i32 >mix=reg128#6,<g02=reg128#1,#4
  5320. # asm 2: vshl.i32 >mix=q5,<g02=q0,#4
  5321. vshl.i32 q5,q0,#4
  5322. # qhasm: g46 g57 = g46[0]g57[0] g46[2]g57[2] g46[1]g57[1] g46[3]g57[3]
  5323. # asm 1: vtrn.32 <g46=reg128#2,<g57=reg128#5
  5324. # asm 2: vtrn.32 <g46=q1,<g57=q4
  5325. vtrn.32 q1,q4
  5326. # qhasm: 4x g13_19 = g13 << 4
  5327. # asm 1: vshl.i32 >g13_19=reg128#7,<g13=reg128#4,#4
  5328. # asm 2: vshl.i32 >g13_19=q6,<g13=q3,#4
  5329. vshl.i32 q6,q3,#4
  5330. # qhasm: 4x mix += g02
  5331. # asm 1: vadd.i32 >mix=reg128#6,<mix=reg128#6,<g02=reg128#1
  5332. # asm 2: vadd.i32 >mix=q5,<mix=q5,<g02=q0
  5333. vadd.i32 q5,q5,q0
  5334. # qhasm: 4x g13_19 += g13
  5335. # asm 1: vadd.i32 >g13_19=reg128#7,<g13_19=reg128#7,<g13=reg128#4
  5336. # asm 2: vadd.i32 >g13_19=q6,<g13_19=q6,<g13=q3
  5337. vadd.i32 q6,q6,q3
  5338. # qhasm: 4x g46_19 = g46 << 4
  5339. # asm 1: vshl.i32 >g46_19=reg128#8,<g46=reg128#2,#4
  5340. # asm 2: vshl.i32 >g46_19=q7,<g46=q1,#4
  5341. vshl.i32 q7,q1,#4
  5342. # qhasm: g89 aligned= g89[0] mem64[posG]
  5343. # asm 1: vld1.8 {<g89=reg128#3%top},[<posG=int32#5,: 64]
  5344. # asm 2: vld1.8 {<g89=d5},[<posG=r4,: 64]
  5345. vld1.8 {d5},[r4,: 64]
  5346. # qhasm: 4x g57_19 = g57 << 4
  5347. # asm 1: vshl.i32 >g57_19=reg128#9,<g57=reg128#5,#4
  5348. # asm 2: vshl.i32 >g57_19=q8,<g57=q4,#4
  5349. vshl.i32 q8,q4,#4
  5350. # qhasm: g89 = g89[0] g89[2] g89[1] g89[3]
  5351. # asm 1: vtrn.32 <g89=reg128#3%bot,<g89=reg128#3%top
  5352. # asm 2: vtrn.32 <g89=d4,<g89=d5
  5353. vtrn.32 d4,d5
  5354. # qhasm: 4x g46_19 += g46
  5355. # asm 1: vadd.i32 >g46_19=reg128#8,<g46_19=reg128#8,<g46=reg128#2
  5356. # asm 2: vadd.i32 >g46_19=q7,<g46_19=q7,<g46=q1
  5357. vadd.i32 q7,q7,q1
  5358. # qhasm: 4x g57_19 += g57
  5359. # asm 1: vadd.i32 >g57_19=reg128#9,<g57_19=reg128#9,<g57=reg128#5
  5360. # asm 2: vadd.i32 >g57_19=q8,<g57_19=q8,<g57=q4
  5361. vadd.i32 q8,q8,q4
  5362. # qhasm: f02 aligned= mem128[posf];posf += 16
  5363. # asm 1: vld1.8 {>f02=reg128#10%bot->f02=reg128#10%top},[<posf=int32#3,: 128]!
  5364. # asm 2: vld1.8 {>f02=d18->f02=d19},[<posf=r2,: 128]!
  5365. vld1.8 {d18-d19},[r2,: 128]!
  5366. # qhasm: 4x g89_19 = g89 << 4
  5367. # asm 1: vshl.i32 >g89_19=reg128#11,<g89=reg128#3,#4
  5368. # asm 2: vshl.i32 >g89_19=q10,<g89=q2,#4
  5369. vshl.i32 q10,q2,#4
  5370. # qhasm: f46 aligned= mem128[posf];posf += 16
  5371. # asm 1: vld1.8 {>f46=reg128#12%bot->f46=reg128#12%top},[<posf=int32#3,: 128]!
  5372. # asm 2: vld1.8 {>f46=d22->f46=d23},[<posf=r2,: 128]!
  5373. vld1.8 {d22-d23},[r2,: 128]!
  5374. # qhasm: 4x g89_19 += g89
  5375. # asm 1: vadd.i32 >g89_19=reg128#11,<g89_19=reg128#11,<g89=reg128#3
  5376. # asm 2: vadd.i32 >g89_19=q10,<g89_19=q10,<g89=q2
  5377. vadd.i32 q10,q10,q2
  5378. # qhasm: new f89
  5379. # qhasm: f89 aligned= mem64[posf] f89[1]
  5380. # asm 1: vld1.8 {<f89=reg128#13%bot},[<posf=int32#3,: 64]
  5381. # asm 2: vld1.8 {<f89=d24},[<posf=r2,: 64]
  5382. vld1.8 {d24},[r2,: 64]
  5383. # qhasm: 4x mix += g02
  5384. # asm 1: vadd.i32 >mix=reg128#6,<mix=reg128#6,<g02=reg128#1
  5385. # asm 2: vadd.i32 >mix=q5,<mix=q5,<g02=q0
  5386. vadd.i32 q5,q5,q0
  5387. # qhasm: posF = playground1_ptr + 240
  5388. # asm 1: add >posF=int32#3,<playground1_ptr=int32#4,#240
  5389. # asm 2: add >posF=r2,<playground1_ptr=r3,#240
  5390. add r2,r3,#240
  5391. # qhasm: f13 aligned= mem128[posF];posF += 16
  5392. # asm 1: vld1.8 {>f13=reg128#14%bot->f13=reg128#14%top},[<posF=int32#3,: 128]!
  5393. # asm 2: vld1.8 {>f13=d26->f13=d27},[<posF=r2,: 128]!
  5394. vld1.8 {d26-d27},[r2,: 128]!
  5395. # qhasm: 4x g13_19 += g13
  5396. # asm 1: vadd.i32 >g13_19=reg128#7,<g13_19=reg128#7,<g13=reg128#4
  5397. # asm 2: vadd.i32 >g13_19=q6,<g13_19=q6,<g13=q3
  5398. vadd.i32 q6,q6,q3
  5399. # qhasm: f57 aligned= mem128[posF];posF += 16
  5400. # asm 1: vld1.8 {>f57=reg128#15%bot->f57=reg128#15%top},[<posF=int32#3,: 128]!
  5401. # asm 2: vld1.8 {>f57=d28->f57=d29},[<posF=r2,: 128]!
  5402. vld1.8 {d28-d29},[r2,: 128]!
  5403. # qhasm: 4x g57_19 += g57
  5404. # asm 1: vadd.i32 >g57_19=reg128#9,<g57_19=reg128#9,<g57=reg128#5
  5405. # asm 2: vadd.i32 >g57_19=q8,<g57_19=q8,<g57=q4
  5406. vadd.i32 q8,q8,q4
  5407. # qhasm: f89 aligned= f89[0] mem64[posF]
  5408. # asm 1: vld1.8 {<f89=reg128#13%top},[<posF=int32#3,: 64]
  5409. # asm 2: vld1.8 {<f89=d25},[<posF=r2,: 64]
  5410. vld1.8 {d25},[r2,: 64]
  5411. # qhasm: 4x g89_19 += g89
  5412. # asm 1: vadd.i32 >g89_19=reg128#11,<g89_19=reg128#11,<g89=reg128#3
  5413. # asm 2: vadd.i32 >g89_19=q10,<g89_19=q10,<g89=q2
  5414. vadd.i32 q10,q10,q2
  5415. # qhasm: f02 f13 = f02[0]f13[0] f02[2]f13[2] f02[1]f13[1] f02[3]f13[3]
  5416. # asm 1: vtrn.32 <f02=reg128#10,<f13=reg128#14
  5417. # asm 2: vtrn.32 <f02=q9,<f13=q13
  5418. vtrn.32 q9,q13
  5419. # qhasm: 4x g46_19 += g46
  5420. # asm 1: vadd.i32 >g46_19=reg128#8,<g46_19=reg128#8,<g46=reg128#2
  5421. # asm 2: vadd.i32 >g46_19=q7,<g46_19=q7,<g46=q1
  5422. vadd.i32 q7,q7,q1
  5423. # qhasm: 4x mix += g02
  5424. # asm 1: vadd.i32 >mix=reg128#6,<mix=reg128#6,<g02=reg128#1
  5425. # asm 2: vadd.i32 >mix=q5,<mix=q5,<g02=q0
  5426. vadd.i32 q5,q5,q0
  5427. # qhasm: f46 f57 = f46[0]f57[0] f46[2]f57[2] f46[1]f57[1] f46[3]f57[3]
  5428. # asm 1: vtrn.32 <f46=reg128#12,<f57=reg128#15
  5429. # asm 2: vtrn.32 <f46=q11,<f57=q14
  5430. vtrn.32 q11,q14
  5431. # qhasm: 4x g13_19 += g13
  5432. # asm 1: vadd.i32 >g13_19=reg128#7,<g13_19=reg128#7,<g13=reg128#4
  5433. # asm 2: vadd.i32 >g13_19=q6,<g13_19=q6,<g13=q3
  5434. vadd.i32 q6,q6,q3
  5435. # qhasm: new g13_19_stack
  5436. # qhasm: ptr = &g13_19_stack
  5437. # asm 1: lea >ptr=int32#3,<g13_19_stack=stack128#4
  5438. # asm 2: lea >ptr=r2,<g13_19_stack=[sp,#560]
  5439. add r2,sp,#560
  5440. # qhasm: 4x g89_19 += g89
  5441. # asm 1: vadd.i32 >g89_19=reg128#11,<g89_19=reg128#11,<g89=reg128#3
  5442. # asm 2: vadd.i32 >g89_19=q10,<g89_19=q10,<g89=q2
  5443. vadd.i32 q10,q10,q2
  5444. # qhasm: f89 = f89[0] f89[2] f89[1] f89[3]
  5445. # asm 1: vtrn.32 <f89=reg128#13%bot,<f89=reg128#13%top
  5446. # asm 2: vtrn.32 <f89=d24,<f89=d25
  5447. vtrn.32 d24,d25
  5448. # qhasm: mem128[ptr] aligned= g13_19
  5449. # asm 1: vst1.8 {<g13_19=reg128#7%bot-<g13_19=reg128#7%top},[<ptr=int32#3,: 128]
  5450. # asm 2: vst1.8 {<g13_19=d12-<g13_19=d13},[<ptr=r2,: 128]
  5451. vst1.8 {d12-d13},[r2,: 128]
  5452. # qhasm: 4x f13_2 = f13 << 1
  5453. # asm 1: vshl.i32 >f13_2=reg128#7,<f13=reg128#14,#1
  5454. # asm 2: vshl.i32 >f13_2=q6,<f13=q13,#1
  5455. vshl.i32 q6,q13,#1
  5456. # qhasm: new g89_19_stack
  5457. # qhasm: ptr = &g89_19_stack
  5458. # asm 1: lea >ptr=int32#3,<g89_19_stack=stack128#5
  5459. # asm 2: lea >ptr=r2,<g89_19_stack=[sp,#576]
  5460. add r2,sp,#576
  5461. # qhasm: mem128[ptr] aligned= g89_19
  5462. # asm 1: vst1.8 {<g89_19=reg128#11%bot-<g89_19=reg128#11%top},[<ptr=int32#3,: 128]
  5463. # asm 2: vst1.8 {<g89_19=d20-<g89_19=d21},[<ptr=r2,: 128]
  5464. vst1.8 {d20-d21},[r2,: 128]
  5465. # qhasm: 4x f57_2 = f57 << 1
  5466. # asm 1: vshl.i32 >f57_2=reg128#11,<f57=reg128#15,#1
  5467. # asm 2: vshl.i32 >f57_2=q10,<f57=q14,#1
  5468. vshl.i32 q10,q14,#1
  5469. # qhasm: new f13_2_stack
  5470. # qhasm: ptr = &f13_2_stack
  5471. # asm 1: lea >ptr=int32#3,<f13_2_stack=stack128#6
  5472. # asm 2: lea >ptr=r2,<f13_2_stack=[sp,#592]
  5473. add r2,sp,#592
  5474. # qhasm: mem128[ptr] aligned= f13_2
  5475. # asm 1: vst1.8 {<f13_2=reg128#7%bot-<f13_2=reg128#7%top},[<ptr=int32#3,: 128]
  5476. # asm 2: vst1.8 {<f13_2=d12-<f13_2=d13},[<ptr=r2,: 128]
  5477. vst1.8 {d12-d13},[r2,: 128]
  5478. # qhasm: 4x f89_2 = f89 << 1
  5479. # asm 1: vshl.i32 >f89_2=reg128#16,<f89=reg128#13,#1
  5480. # asm 2: vshl.i32 >f89_2=q15,<f89=q12,#1
  5481. vshl.i32 q15,q12,#1
  5482. # qhasm: 4x g57_19 += g57
  5483. # asm 1: vadd.i32 >g57_19=reg128#9,<g57_19=reg128#9,<g57=reg128#5
  5484. # asm 2: vadd.i32 >g57_19=q8,<g57_19=q8,<g57=q4
  5485. vadd.i32 q8,q8,q4
  5486. # qhasm: mix = f89_2[2,3] mix[2,3]
  5487. # asm 1: vext.32 <mix=reg128#6%bot,<f89_2=reg128#16%top,<f89_2=reg128#16%bot,#0
  5488. # asm 2: vext.32 <mix=d10,<f89_2=d31,<f89_2=d30,#0
  5489. vext.32 d10,d31,d30,#0
  5490. # qhasm: 4x g46_19 += g46
  5491. # asm 1: vadd.i32 >g46_19=reg128#8,<g46_19=reg128#8,<g46=reg128#2
  5492. # asm 2: vadd.i32 >g46_19=q7,<g46_19=q7,<g46=q1
  5493. vadd.i32 q7,q7,q1
  5494. # qhasm: new g57_19_stack
  5495. # qhasm: ptr = &g57_19_stack
  5496. # asm 1: lea >ptr=int32#3,<g57_19_stack=stack128#7
  5497. # asm 2: lea >ptr=r2,<g57_19_stack=[sp,#608]
  5498. add r2,sp,#608
  5499. # qhasm: mem128[ptr] aligned= g57_19
  5500. # asm 1: vst1.8 {<g57_19=reg128#9%bot-<g57_19=reg128#9%top},[<ptr=int32#3,: 128]
  5501. # asm 2: vst1.8 {<g57_19=d16-<g57_19=d17},[<ptr=r2,: 128]
  5502. vst1.8 {d16-d17},[r2,: 128]
  5503. # qhasm: h9[0,1] = f02[0] signed* g89[2];h9[2,3] = f02[1] signed* g89[3]
  5504. # asm 1: vmull.s32 >h9=reg128#9,<f02=reg128#10%bot,<g89=reg128#3%top
  5505. # asm 2: vmull.s32 >h9=q8,<f02=d18,<g89=d5
  5506. vmull.s32 q8,d18,d5
  5507. # qhasm: h9[0,1] += f13[0] signed* g89[0];h9[2,3] += f13[1] signed* g89[1]
  5508. # asm 1: vmlal.s32 <h9=reg128#9,<f13=reg128#14%bot,<g89=reg128#3%bot
  5509. # asm 2: vmlal.s32 <h9=q8,<f13=d26,<g89=d4
  5510. vmlal.s32 q8,d26,d4
  5511. # qhasm: h9[0,1] += f02[2] signed* g57[2];h9[2,3] += f02[3] signed* g57[3]
  5512. # asm 1: vmlal.s32 <h9=reg128#9,<f02=reg128#10%top,<g57=reg128#5%top
  5513. # asm 2: vmlal.s32 <h9=q8,<f02=d19,<g57=d9
  5514. vmlal.s32 q8,d19,d9
  5515. # qhasm: h9[0,1] += f13[2] signed* g46[2];h9[2,3] += f13[3] signed* g46[3]
  5516. # asm 1: vmlal.s32 <h9=reg128#9,<f13=reg128#14%top,<g46=reg128#2%top
  5517. # asm 2: vmlal.s32 <h9=q8,<f13=d27,<g46=d3
  5518. vmlal.s32 q8,d27,d3
  5519. # qhasm: h9[0,1] += f46[0] signed* g57[0];h9[2,3] += f46[1] signed* g57[1]
  5520. # asm 1: vmlal.s32 <h9=reg128#9,<f46=reg128#12%bot,<g57=reg128#5%bot
  5521. # asm 2: vmlal.s32 <h9=q8,<f46=d22,<g57=d8
  5522. vmlal.s32 q8,d22,d8
  5523. # qhasm: h9[0,1] += f57[0] signed* g46[0];h9[2,3] += f57[1] signed* g46[1]
  5524. # asm 1: vmlal.s32 <h9=reg128#9,<f57=reg128#15%bot,<g46=reg128#2%bot
  5525. # asm 2: vmlal.s32 <h9=q8,<f57=d28,<g46=d2
  5526. vmlal.s32 q8,d28,d2
  5527. # qhasm: h9[0,1] += f46[2] signed* g13[2];h9[2,3] += f46[3] signed* g13[3]
  5528. # asm 1: vmlal.s32 <h9=reg128#9,<f46=reg128#12%top,<g13=reg128#4%top
  5529. # asm 2: vmlal.s32 <h9=q8,<f46=d23,<g13=d7
  5530. vmlal.s32 q8,d23,d7
  5531. # qhasm: h9[0,1] += f57[2] signed* g02[2];h9[2,3] += f57[3] signed* g02[3]
  5532. # asm 1: vmlal.s32 <h9=reg128#9,<f57=reg128#15%top,<g02=reg128#1%top
  5533. # asm 2: vmlal.s32 <h9=q8,<f57=d29,<g02=d1
  5534. vmlal.s32 q8,d29,d1
  5535. # qhasm: h9[0,1] += f89[0] signed* g13[0];h9[2,3] += f89[1] signed* g13[1]
  5536. # asm 1: vmlal.s32 <h9=reg128#9,<f89=reg128#13%bot,<g13=reg128#4%bot
  5537. # asm 2: vmlal.s32 <h9=q8,<f89=d24,<g13=d6
  5538. vmlal.s32 q8,d24,d6
  5539. # qhasm: h9[0,1] += f89[2] signed* g02[0];h9[2,3] += f89[3] signed* g02[1]
  5540. # asm 1: vmlal.s32 <h9=reg128#9,<f89=reg128#13%top,<g02=reg128#1%bot
  5541. # asm 2: vmlal.s32 <h9=q8,<f89=d25,<g02=d0
  5542. vmlal.s32 q8,d25,d0
  5543. # qhasm: new g46_19_stack
  5544. # qhasm: ptr = &g46_19_stack
  5545. # asm 1: lea >ptr=int32#3,<g46_19_stack=stack128#8
  5546. # asm 2: lea >ptr=r2,<g46_19_stack=[sp,#624]
  5547. add r2,sp,#624
  5548. # qhasm: mem128[ptr] aligned= g46_19
  5549. # asm 1: vst1.8 {<g46_19=reg128#8%bot-<g46_19=reg128#8%top},[<ptr=int32#3,: 128]
  5550. # asm 2: vst1.8 {<g46_19=d14-<g46_19=d15},[<ptr=r2,: 128]
  5551. vst1.8 {d14-d15},[r2,: 128]
  5552. # qhasm: h8[0,1] = f02[0] signed* g89[0];h8[2,3] = f02[1] signed* g89[1]
  5553. # asm 1: vmull.s32 >h8=reg128#3,<f02=reg128#10%bot,<g89=reg128#3%bot
  5554. # asm 2: vmull.s32 >h8=q2,<f02=d18,<g89=d4
  5555. vmull.s32 q2,d18,d4
  5556. # qhasm: h8[0,1] += f13_2[0] signed* g57[2];h8[2,3] += f13_2[1] signed* g57[3]
  5557. # asm 1: vmlal.s32 <h8=reg128#3,<f13_2=reg128#7%bot,<g57=reg128#5%top
  5558. # asm 2: vmlal.s32 <h8=q2,<f13_2=d12,<g57=d9
  5559. vmlal.s32 q2,d12,d9
  5560. # qhasm: h8[0,1] += f13_2[2] signed* g57[0];h8[2,3] += f13_2[3] signed* g57[1]
  5561. # asm 1: vmlal.s32 <h8=reg128#3,<f13_2=reg128#7%top,<g57=reg128#5%bot
  5562. # asm 2: vmlal.s32 <h8=q2,<f13_2=d13,<g57=d8
  5563. vmlal.s32 q2,d13,d8
  5564. # qhasm: h8[0,1] += f02[2] signed* g46[2];h8[2,3] += f02[3] signed* g46[3]
  5565. # asm 1: vmlal.s32 <h8=reg128#3,<f02=reg128#10%top,<g46=reg128#2%top
  5566. # asm 2: vmlal.s32 <h8=q2,<f02=d19,<g46=d3
  5567. vmlal.s32 q2,d19,d3
  5568. # qhasm: h8[0,1] += f46[0] signed* g46[0];h8[2,3] += f46[1] signed* g46[1]
  5569. # asm 1: vmlal.s32 <h8=reg128#3,<f46=reg128#12%bot,<g46=reg128#2%bot
  5570. # asm 2: vmlal.s32 <h8=q2,<f46=d22,<g46=d2
  5571. vmlal.s32 q2,d22,d2
  5572. # qhasm: h8[0,1] += f46[2] signed* g02[2];h8[2,3] += f46[3] signed* g02[3]
  5573. # asm 1: vmlal.s32 <h8=reg128#3,<f46=reg128#12%top,<g02=reg128#1%top
  5574. # asm 2: vmlal.s32 <h8=q2,<f46=d23,<g02=d1
  5575. vmlal.s32 q2,d23,d1
  5576. # qhasm: h8[0,1] += f89[0] signed* g02[0];h8[2,3] += f89[1] signed* g02[1]
  5577. # asm 1: vmlal.s32 <h8=reg128#3,<f89=reg128#13%bot,<g02=reg128#1%bot
  5578. # asm 2: vmlal.s32 <h8=q2,<f89=d24,<g02=d0
  5579. vmlal.s32 q2,d24,d0
  5580. # qhasm: new f57_2_stack
  5581. # qhasm: ptr = &f57_2_stack
  5582. # asm 1: lea >ptr=int32#3,<f57_2_stack=stack128#9
  5583. # asm 2: lea >ptr=r2,<f57_2_stack=[sp,#640]
  5584. add r2,sp,#640
  5585. # qhasm: mem128[ptr] aligned= f57_2
  5586. # asm 1: vst1.8 {<f57_2=reg128#11%bot-<f57_2=reg128#11%top},[<ptr=int32#3,: 128]
  5587. # asm 2: vst1.8 {<f57_2=d20-<f57_2=d21},[<ptr=r2,: 128]
  5588. vst1.8 {d20-d21},[r2,: 128]
  5589. # qhasm: h7[0,1] = f02[0] signed* g57[2];h7[2,3] = f02[1] signed* g57[3]
  5590. # asm 1: vmull.s32 >h7=reg128#8,<f02=reg128#10%bot,<g57=reg128#5%top
  5591. # asm 2: vmull.s32 >h7=q7,<f02=d18,<g57=d9
  5592. vmull.s32 q7,d18,d9
  5593. # qhasm: h7[0,1] += f13[0] signed* g46[2];h7[2,3] += f13[1] signed* g46[3]
  5594. # asm 1: vmlal.s32 <h7=reg128#8,<f13=reg128#14%bot,<g46=reg128#2%top
  5595. # asm 2: vmlal.s32 <h7=q7,<f13=d26,<g46=d3
  5596. vmlal.s32 q7,d26,d3
  5597. # qhasm: h7[0,1] += f02[2] signed* g57[0];h7[2,3] += f02[3] signed* g57[1]
  5598. # asm 1: vmlal.s32 <h7=reg128#8,<f02=reg128#10%top,<g57=reg128#5%bot
  5599. # asm 2: vmlal.s32 <h7=q7,<f02=d19,<g57=d8
  5600. vmlal.s32 q7,d19,d8
  5601. # qhasm: h7[0,1] += f13[2] signed* g46[0];h7[2,3] += f13[3] signed* g46[1]
  5602. # asm 1: vmlal.s32 <h7=reg128#8,<f13=reg128#14%top,<g46=reg128#2%bot
  5603. # asm 2: vmlal.s32 <h7=q7,<f13=d27,<g46=d2
  5604. vmlal.s32 q7,d27,d2
  5605. # qhasm: h7[0,1] += f46[0] signed* g13[2];h7[2,3] += f46[1] signed* g13[3]
  5606. # asm 1: vmlal.s32 <h7=reg128#8,<f46=reg128#12%bot,<g13=reg128#4%top
  5607. # asm 2: vmlal.s32 <h7=q7,<f46=d22,<g13=d7
  5608. vmlal.s32 q7,d22,d7
  5609. # qhasm: h7[0,1] += f57[0] signed* g02[2];h7[2,3] += f57[1] signed* g02[3]
  5610. # asm 1: vmlal.s32 <h7=reg128#8,<f57=reg128#15%bot,<g02=reg128#1%top
  5611. # asm 2: vmlal.s32 <h7=q7,<f57=d28,<g02=d1
  5612. vmlal.s32 q7,d28,d1
  5613. # qhasm: h7[0,1] += f46[2] signed* g13[0];h7[2,3] += f46[3] signed* g13[1]
  5614. # asm 1: vmlal.s32 <h7=reg128#8,<f46=reg128#12%top,<g13=reg128#4%bot
  5615. # asm 2: vmlal.s32 <h7=q7,<f46=d23,<g13=d6
  5616. vmlal.s32 q7,d23,d6
  5617. # qhasm: h7[0,1] += f57[2] signed* g02[0];h7[2,3] += f57[3] signed* g02[1]
  5618. # asm 1: vmlal.s32 <h7=reg128#8,<f57=reg128#15%top,<g02=reg128#1%bot
  5619. # asm 2: vmlal.s32 <h7=q7,<f57=d29,<g02=d0
  5620. vmlal.s32 q7,d29,d0
  5621. # qhasm: new mix_stack
  5622. # qhasm: ptr = &mix_stack
  5623. # asm 1: lea >ptr=int32#3,<mix_stack=stack128#10
  5624. # asm 2: lea >ptr=r2,<mix_stack=[sp,#656]
  5625. add r2,sp,#656
  5626. # qhasm: mem128[ptr] aligned= mix
  5627. # asm 1: vst1.8 {<mix=reg128#6%bot-<mix=reg128#6%top},[<ptr=int32#3,: 128]
  5628. # asm 2: vst1.8 {<mix=d10-<mix=d11},[<ptr=r2,: 128]
  5629. vst1.8 {d10-d11},[r2,: 128]
  5630. # qhasm: h6[0,1] = f02[0] signed* g46[2];h6[2,3] = f02[1] signed* g46[3]
  5631. # asm 1: vmull.s32 >h6=reg128#6,<f02=reg128#10%bot,<g46=reg128#2%top
  5632. # asm 2: vmull.s32 >h6=q5,<f02=d18,<g46=d3
  5633. vmull.s32 q5,d18,d3
  5634. # qhasm: h6[0,1] += f02[2] signed* g46[0];h6[2,3] += f02[3] signed* g46[1]
  5635. # asm 1: vmlal.s32 <h6=reg128#6,<f02=reg128#10%top,<g46=reg128#2%bot
  5636. # asm 2: vmlal.s32 <h6=q5,<f02=d19,<g46=d2
  5637. vmlal.s32 q5,d19,d2
  5638. # qhasm: h6[0,1] += f46[0] signed* g02[2];h6[2,3] += f46[1] signed* g02[3]
  5639. # asm 1: vmlal.s32 <h6=reg128#6,<f46=reg128#12%bot,<g02=reg128#1%top
  5640. # asm 2: vmlal.s32 <h6=q5,<f46=d22,<g02=d1
  5641. vmlal.s32 q5,d22,d1
  5642. # qhasm: h6[0,1] += f46[2] signed* g02[0];h6[2,3] += f46[3] signed* g02[1]
  5643. # asm 1: vmlal.s32 <h6=reg128#6,<f46=reg128#12%top,<g02=reg128#1%bot
  5644. # asm 2: vmlal.s32 <h6=q5,<f46=d23,<g02=d0
  5645. vmlal.s32 q5,d23,d0
  5646. # qhasm: h6[0,1] += f13_2[0] signed* g57[0];h6[2,3] += f13_2[1] signed* g57[1]
  5647. # asm 1: vmlal.s32 <h6=reg128#6,<f13_2=reg128#7%bot,<g57=reg128#5%bot
  5648. # asm 2: vmlal.s32 <h6=q5,<f13_2=d12,<g57=d8
  5649. vmlal.s32 q5,d12,d8
  5650. # qhasm: new h9_stack
  5651. # qhasm: ptr = &h9_stack
  5652. # asm 1: lea >ptr=int32#3,<h9_stack=stack128#11
  5653. # asm 2: lea >ptr=r2,<h9_stack=[sp,#672]
  5654. add r2,sp,#672
  5655. # qhasm: mem128[ptr] aligned= h9
  5656. # asm 1: vst1.8 {<h9=reg128#9%bot-<h9=reg128#9%top},[<ptr=int32#3,: 128]
  5657. # asm 2: vst1.8 {<h9=d16-<h9=d17},[<ptr=r2,: 128]
  5658. vst1.8 {d16-d17},[r2,: 128]
  5659. # qhasm: h5[0,1] = f02[0] signed* g57[0];h5[2,3] = f02[1] signed* g57[1]
  5660. # asm 1: vmull.s32 >h5=reg128#5,<f02=reg128#10%bot,<g57=reg128#5%bot
  5661. # asm 2: vmull.s32 >h5=q4,<f02=d18,<g57=d8
  5662. vmull.s32 q4,d18,d8
  5663. # qhasm: h5[0,1] += f13[0] signed* g46[0];h5[2,3] += f13[1] signed* g46[1]
  5664. # asm 1: vmlal.s32 <h5=reg128#5,<f13=reg128#14%bot,<g46=reg128#2%bot
  5665. # asm 2: vmlal.s32 <h5=q4,<f13=d26,<g46=d2
  5666. vmlal.s32 q4,d26,d2
  5667. # qhasm: h5[0,1] += f02[2] signed* g13[2];h5[2,3] += f02[3] signed* g13[3]
  5668. # asm 1: vmlal.s32 <h5=reg128#5,<f02=reg128#10%top,<g13=reg128#4%top
  5669. # asm 2: vmlal.s32 <h5=q4,<f02=d19,<g13=d7
  5670. vmlal.s32 q4,d19,d7
  5671. # qhasm: h5[0,1] += f13[2] signed* g02[2];h5[2,3] += f13[3] signed* g02[3]
  5672. # asm 1: vmlal.s32 <h5=reg128#5,<f13=reg128#14%top,<g02=reg128#1%top
  5673. # asm 2: vmlal.s32 <h5=q4,<f13=d27,<g02=d1
  5674. vmlal.s32 q4,d27,d1
  5675. # qhasm: h5[0,1] += f46[0] signed* g13[0];h5[2,3] += f46[1] signed* g13[1]
  5676. # asm 1: vmlal.s32 <h5=reg128#5,<f46=reg128#12%bot,<g13=reg128#4%bot
  5677. # asm 2: vmlal.s32 <h5=q4,<f46=d22,<g13=d6
  5678. vmlal.s32 q4,d22,d6
  5679. # qhasm: h5[0,1] += f57[0] signed* g02[0];h5[2,3] += f57[1] signed* g02[1]
  5680. # asm 1: vmlal.s32 <h5=reg128#5,<f57=reg128#15%bot,<g02=reg128#1%bot
  5681. # asm 2: vmlal.s32 <h5=q4,<f57=d28,<g02=d0
  5682. vmlal.s32 q4,d28,d0
  5683. # qhasm: h3[0,1] = f02[0] signed* g13[2];h3[2,3] = f02[1] signed* g13[3]
  5684. # asm 1: vmull.s32 >h3=reg128#9,<f02=reg128#10%bot,<g13=reg128#4%top
  5685. # asm 2: vmull.s32 >h3=q8,<f02=d18,<g13=d7
  5686. vmull.s32 q8,d18,d7
  5687. # qhasm: h3[0,1] += f13[0] signed* g02[2];h3[2,3] += f13[1] signed* g02[3]
  5688. # asm 1: vmlal.s32 <h3=reg128#9,<f13=reg128#14%bot,<g02=reg128#1%top
  5689. # asm 2: vmlal.s32 <h3=q8,<f13=d26,<g02=d1
  5690. vmlal.s32 q8,d26,d1
  5691. # qhasm: h3[0,1] += f02[2] signed* g13[0];h3[2,3] += f02[3] signed* g13[1]
  5692. # asm 1: vmlal.s32 <h3=reg128#9,<f02=reg128#10%top,<g13=reg128#4%bot
  5693. # asm 2: vmlal.s32 <h3=q8,<f02=d19,<g13=d6
  5694. vmlal.s32 q8,d19,d6
  5695. # qhasm: h3[0,1] += f13[2] signed* g02[0];h3[2,3] += f13[3] signed* g02[1]
  5696. # asm 1: vmlal.s32 <h3=reg128#9,<f13=reg128#14%top,<g02=reg128#1%bot
  5697. # asm 2: vmlal.s32 <h3=q8,<f13=d27,<g02=d0
  5698. vmlal.s32 q8,d27,d0
  5699. # qhasm: ptr = &g89_19_stack
  5700. # asm 1: lea >ptr=int32#3,<g89_19_stack=stack128#5
  5701. # asm 2: lea >ptr=r2,<g89_19_stack=[sp,#576]
  5702. add r2,sp,#576
  5703. # qhasm: g89_19 aligned= mem128[ptr]
  5704. # asm 1: vld1.8 {>g89_19=reg128#11%bot->g89_19=reg128#11%top},[<ptr=int32#3,: 128]
  5705. # asm 2: vld1.8 {>g89_19=d20->g89_19=d21},[<ptr=r2,: 128]
  5706. vld1.8 {d20-d21},[r2,: 128]
  5707. # qhasm: h7[0,1] += f89[0] signed* g89_19[2];h7[2,3] += f89[1] signed* g89_19[3]
  5708. # asm 1: vmlal.s32 <h7=reg128#8,<f89=reg128#13%bot,<g89_19=reg128#11%top
  5709. # asm 2: vmlal.s32 <h7=q7,<f89=d24,<g89_19=d21
  5710. vmlal.s32 q7,d24,d21
  5711. # qhasm: h7[0,1] += f89[2] signed* g89_19[0];h7[2,3] += f89[3] signed* g89_19[1]
  5712. # asm 1: vmlal.s32 <h7=reg128#8,<f89=reg128#13%top,<g89_19=reg128#11%bot
  5713. # asm 2: vmlal.s32 <h7=q7,<f89=d25,<g89_19=d20
  5714. vmlal.s32 q7,d25,d20
  5715. # qhasm: h5[0,1] += f46[2] signed* g89_19[2];h5[2,3] += f46[3] signed* g89_19[3]
  5716. # asm 1: vmlal.s32 <h5=reg128#5,<f46=reg128#12%top,<g89_19=reg128#11%top
  5717. # asm 2: vmlal.s32 <h5=q4,<f46=d23,<g89_19=d21
  5718. vmlal.s32 q4,d23,d21
  5719. # qhasm: h5[0,1] += f57[2] signed* g89_19[0];h5[2,3] += f57[3] signed* g89_19[1]
  5720. # asm 1: vmlal.s32 <h5=reg128#5,<f57=reg128#15%top,<g89_19=reg128#11%bot
  5721. # asm 2: vmlal.s32 <h5=q4,<f57=d29,<g89_19=d20
  5722. vmlal.s32 q4,d29,d20
  5723. # qhasm: h3[0,1] += f46[0] signed* g89_19[2];h3[2,3] += f46[1] signed* g89_19[3]
  5724. # asm 1: vmlal.s32 <h3=reg128#9,<f46=reg128#12%bot,<g89_19=reg128#11%top
  5725. # asm 2: vmlal.s32 <h3=q8,<f46=d22,<g89_19=d21
  5726. vmlal.s32 q8,d22,d21
  5727. # qhasm: h3[0,1] += f57[0] signed* g89_19[0];h3[2,3] += f57[1] signed* g89_19[1]
  5728. # asm 1: vmlal.s32 <h3=reg128#9,<f57=reg128#15%bot,<g89_19=reg128#11%bot
  5729. # asm 2: vmlal.s32 <h3=q8,<f57=d28,<g89_19=d20
  5730. vmlal.s32 q8,d28,d20
  5731. # qhasm: h6[0,1] += f89[0] signed* g89_19[0];h6[2,3] += f89[1] signed* g89_19[1]
  5732. # asm 1: vmlal.s32 <h6=reg128#6,<f89=reg128#13%bot,<g89_19=reg128#11%bot
  5733. # asm 2: vmlal.s32 <h6=q5,<f89=d24,<g89_19=d20
  5734. vmlal.s32 q5,d24,d20
  5735. # qhasm: new h7_stack
  5736. # qhasm: ptr = &h7_stack
  5737. # asm 1: lea >ptr=int32#3,<h7_stack=stack128#5
  5738. # asm 2: lea >ptr=r2,<h7_stack=[sp,#576]
  5739. add r2,sp,#576
  5740. # qhasm: mem128[ptr] aligned= h7
  5741. # asm 1: vst1.8 {<h7=reg128#8%bot-<h7=reg128#8%top},[<ptr=int32#3,: 128]
  5742. # asm 2: vst1.8 {<h7=d14-<h7=d15},[<ptr=r2,: 128]
  5743. vst1.8 {d14-d15},[r2,: 128]
  5744. # qhasm: h1[0,1] = f02[0] signed* g13[0];h1[2,3] = f02[1] signed* g13[1]
  5745. # asm 1: vmull.s32 >h1=reg128#8,<f02=reg128#10%bot,<g13=reg128#4%bot
  5746. # asm 2: vmull.s32 >h1=q7,<f02=d18,<g13=d6
  5747. vmull.s32 q7,d18,d6
  5748. # qhasm: h1[0,1] += f13[0] signed* g02[0];h1[2,3] += f13[1] signed* g02[1]
  5749. # asm 1: vmlal.s32 <h1=reg128#8,<f13=reg128#14%bot,<g02=reg128#1%bot
  5750. # asm 2: vmlal.s32 <h1=q7,<f13=d26,<g02=d0
  5751. vmlal.s32 q7,d26,d0
  5752. # qhasm: ptr = &mix_stack
  5753. # asm 1: lea >ptr=int32#3,<mix_stack=stack128#10
  5754. # asm 2: lea >ptr=r2,<mix_stack=[sp,#656]
  5755. add r2,sp,#656
  5756. # qhasm: mix aligned= mem128[ptr]
  5757. # asm 1: vld1.8 {>mix=reg128#16%bot->mix=reg128#16%top},[<ptr=int32#3,: 128]
  5758. # asm 2: vld1.8 {>mix=d30->mix=d31},[<ptr=r2,: 128]
  5759. vld1.8 {d30-d31},[r2,: 128]
  5760. # qhasm: h8[0,1] += mix[0] signed* g89_19[2];h8[2,3] += mix[1] signed* g89_19[3]
  5761. # asm 1: vmlal.s32 <h8=reg128#3,<mix=reg128#16%bot,<g89_19=reg128#11%top
  5762. # asm 2: vmlal.s32 <h8=q2,<mix=d30,<g89_19=d21
  5763. vmlal.s32 q2,d30,d21
  5764. # qhasm: h1[0,1] += f02[2] signed* g89_19[2];h1[2,3] += f02[3] signed* g89_19[3]
  5765. # asm 1: vmlal.s32 <h1=reg128#8,<f02=reg128#10%top,<g89_19=reg128#11%top
  5766. # asm 2: vmlal.s32 <h1=q7,<f02=d19,<g89_19=d21
  5767. vmlal.s32 q7,d19,d21
  5768. # qhasm: h1[0,1] += f13[2] signed* g89_19[0];h1[2,3] += f13[3] signed* g89_19[1]
  5769. # asm 1: vmlal.s32 <h1=reg128#8,<f13=reg128#14%top,<g89_19=reg128#11%bot
  5770. # asm 2: vmlal.s32 <h1=q7,<f13=d27,<g89_19=d20
  5771. vmlal.s32 q7,d27,d20
  5772. # qhasm: ptr = &g46_19_stack
  5773. # asm 1: lea >ptr=int32#3,<g46_19_stack=stack128#8
  5774. # asm 2: lea >ptr=r2,<g46_19_stack=[sp,#624]
  5775. add r2,sp,#624
  5776. # qhasm: g46_19 aligned= mem128[ptr]
  5777. # asm 1: vld1.8 {>g46_19=reg128#14%bot->g46_19=reg128#14%top},[<ptr=int32#3,: 128]
  5778. # asm 2: vld1.8 {>g46_19=d26->g46_19=d27},[<ptr=r2,: 128]
  5779. vld1.8 {d26-d27},[r2,: 128]
  5780. # qhasm: h5[0,1] += f89[2] signed* g46_19[2];h5[2,3] += f89[3] signed* g46_19[3]
  5781. # asm 1: vmlal.s32 <h5=reg128#5,<f89=reg128#13%top,<g46_19=reg128#14%top
  5782. # asm 2: vmlal.s32 <h5=q4,<f89=d25,<g46_19=d27
  5783. vmlal.s32 q4,d25,d27
  5784. # qhasm: h3[0,1] += f57[2] signed* g46_19[2];h3[2,3] += f57[3] signed* g46_19[3]
  5785. # asm 1: vmlal.s32 <h3=reg128#9,<f57=reg128#15%top,<g46_19=reg128#14%top
  5786. # asm 2: vmlal.s32 <h3=q8,<f57=d29,<g46_19=d27
  5787. vmlal.s32 q8,d29,d27
  5788. # qhasm: h3[0,1] += f89[2] signed* g46_19[0];h3[2,3] += f89[3] signed* g46_19[1]
  5789. # asm 1: vmlal.s32 <h3=reg128#9,<f89=reg128#13%top,<g46_19=reg128#14%bot
  5790. # asm 2: vmlal.s32 <h3=q8,<f89=d25,<g46_19=d26
  5791. vmlal.s32 q8,d25,d26
  5792. # qhasm: h1[0,1] += f57[0] signed* g46_19[2];h1[2,3] += f57[1] signed* g46_19[3]
  5793. # asm 1: vmlal.s32 <h1=reg128#8,<f57=reg128#15%bot,<g46_19=reg128#14%top
  5794. # asm 2: vmlal.s32 <h1=q7,<f57=d28,<g46_19=d27
  5795. vmlal.s32 q7,d28,d27
  5796. # qhasm: h1[0,1] += f57[2] signed* g46_19[0];h1[2,3] += f57[3] signed* g46_19[1]
  5797. # asm 1: vmlal.s32 <h1=reg128#8,<f57=reg128#15%top,<g46_19=reg128#14%bot
  5798. # asm 2: vmlal.s32 <h1=q7,<f57=d29,<g46_19=d26
  5799. vmlal.s32 q7,d29,d26
  5800. # qhasm: ptr = &g57_19_stack
  5801. # asm 1: lea >ptr=int32#3,<g57_19_stack=stack128#7
  5802. # asm 2: lea >ptr=r2,<g57_19_stack=[sp,#608]
  5803. add r2,sp,#608
  5804. # qhasm: g57_19 aligned= mem128[ptr]
  5805. # asm 1: vld1.8 {>g57_19=reg128#15%bot->g57_19=reg128#15%top},[<ptr=int32#3,: 128]
  5806. # asm 2: vld1.8 {>g57_19=d28->g57_19=d29},[<ptr=r2,: 128]
  5807. vld1.8 {d28-d29},[r2,: 128]
  5808. # qhasm: h5[0,1] += f89[0] signed* g57_19[2];h5[2,3] += f89[1] signed* g57_19[3]
  5809. # asm 1: vmlal.s32 <h5=reg128#5,<f89=reg128#13%bot,<g57_19=reg128#15%top
  5810. # asm 2: vmlal.s32 <h5=q4,<f89=d24,<g57_19=d29
  5811. vmlal.s32 q4,d24,d29
  5812. # qhasm: h3[0,1] += f46[2] signed* g57_19[2];h3[2,3] += f46[3] signed* g57_19[3]
  5813. # asm 1: vmlal.s32 <h3=reg128#9,<f46=reg128#12%top,<g57_19=reg128#15%top
  5814. # asm 2: vmlal.s32 <h3=q8,<f46=d23,<g57_19=d29
  5815. vmlal.s32 q8,d23,d29
  5816. # qhasm: h3[0,1] += f89[0] signed* g57_19[0];h3[2,3] += f89[1] signed* g57_19[1]
  5817. # asm 1: vmlal.s32 <h3=reg128#9,<f89=reg128#13%bot,<g57_19=reg128#15%bot
  5818. # asm 2: vmlal.s32 <h3=q8,<f89=d24,<g57_19=d28
  5819. vmlal.s32 q8,d24,d28
  5820. # qhasm: h1[0,1] += f46[0] signed* g57_19[2];h1[2,3] += f46[1] signed* g57_19[3]
  5821. # asm 1: vmlal.s32 <h1=reg128#8,<f46=reg128#12%bot,<g57_19=reg128#15%top
  5822. # asm 2: vmlal.s32 <h1=q7,<f46=d22,<g57_19=d29
  5823. vmlal.s32 q7,d22,d29
  5824. # qhasm: h1[0,1] += f46[2] signed* g57_19[0];h1[2,3] += f46[3] signed* g57_19[1]
  5825. # asm 1: vmlal.s32 <h1=reg128#8,<f46=reg128#12%top,<g57_19=reg128#15%bot
  5826. # asm 2: vmlal.s32 <h1=q7,<f46=d23,<g57_19=d28
  5827. vmlal.s32 q7,d23,d28
  5828. # qhasm: new h5_stack
  5829. # qhasm: ptr = &h5_stack
  5830. # asm 1: lea >ptr=int32#3,<h5_stack=stack128#7
  5831. # asm 2: lea >ptr=r2,<h5_stack=[sp,#608]
  5832. add r2,sp,#608
  5833. # qhasm: mem128[ptr] aligned= h5
  5834. # asm 1: vst1.8 {<h5=reg128#5%bot-<h5=reg128#5%top},[<ptr=int32#3,: 128]
  5835. # asm 2: vst1.8 {<h5=d8-<h5=d9},[<ptr=r2,: 128]
  5836. vst1.8 {d8-d9},[r2,: 128]
  5837. # qhasm: ptr = &g13_19_stack
  5838. # asm 1: lea >ptr=int32#3,<g13_19_stack=stack128#4
  5839. # asm 2: lea >ptr=r2,<g13_19_stack=[sp,#560]
  5840. add r2,sp,#560
  5841. # qhasm: g13_19 aligned= mem128[ptr]
  5842. # asm 1: vld1.8 {>g13_19=reg128#5%bot->g13_19=reg128#5%top},[<ptr=int32#3,: 128]
  5843. # asm 2: vld1.8 {>g13_19=d8->g13_19=d9},[<ptr=r2,: 128]
  5844. vld1.8 {d8-d9},[r2,: 128]
  5845. # qhasm: h1[0,1] += f89[0] signed* g13_19[2];h1[2,3] += f89[1] signed* g13_19[3]
  5846. # asm 1: vmlal.s32 <h1=reg128#8,<f89=reg128#13%bot,<g13_19=reg128#5%top
  5847. # asm 2: vmlal.s32 <h1=q7,<f89=d24,<g13_19=d9
  5848. vmlal.s32 q7,d24,d9
  5849. # qhasm: h1[0,1] += f89[2] signed* mix[2];h1[2,3] += f89[3] signed* mix[3]
  5850. # asm 1: vmlal.s32 <h1=reg128#8,<f89=reg128#13%top,<mix=reg128#16%top
  5851. # asm 2: vmlal.s32 <h1=q7,<f89=d25,<mix=d31
  5852. vmlal.s32 q7,d25,d31
  5853. # qhasm: h4[0,1] = f02[0] signed* g46[0];h4[2,3] = f02[1] signed* g46[1]
  5854. # asm 1: vmull.s32 >h4=reg128#2,<f02=reg128#10%bot,<g46=reg128#2%bot
  5855. # asm 2: vmull.s32 >h4=q1,<f02=d18,<g46=d2
  5856. vmull.s32 q1,d18,d2
  5857. # qhasm: h4[0,1] += f02[2] signed* g02[2];h4[2,3] += f02[3] signed* g02[3]
  5858. # asm 1: vmlal.s32 <h4=reg128#2,<f02=reg128#10%top,<g02=reg128#1%top
  5859. # asm 2: vmlal.s32 <h4=q1,<f02=d19,<g02=d1
  5860. vmlal.s32 q1,d19,d1
  5861. # qhasm: h4[0,1] += f46[0] signed* g02[0];h4[2,3] += f46[1] signed* g02[1]
  5862. # asm 1: vmlal.s32 <h4=reg128#2,<f46=reg128#12%bot,<g02=reg128#1%bot
  5863. # asm 2: vmlal.s32 <h4=q1,<f46=d22,<g02=d0
  5864. vmlal.s32 q1,d22,d0
  5865. # qhasm: h4[0,1] += f89[0] signed* g46_19[2];h4[2,3] += f89[1] signed* g46_19[3]
  5866. # asm 1: vmlal.s32 <h4=reg128#2,<f89=reg128#13%bot,<g46_19=reg128#14%top
  5867. # asm 2: vmlal.s32 <h4=q1,<f89=d24,<g46_19=d27
  5868. vmlal.s32 q1,d24,d27
  5869. # qhasm: h4[0,1] += f46[2] signed* g89_19[0];h4[2,3] += f46[3] signed* g89_19[1]
  5870. # asm 1: vmlal.s32 <h4=reg128#2,<f46=reg128#12%top,<g89_19=reg128#11%bot
  5871. # asm 2: vmlal.s32 <h4=q1,<f46=d23,<g89_19=d20
  5872. vmlal.s32 q1,d23,d20
  5873. # qhasm: h4[0,1] += f13_2[0] signed* g13[2];h4[2,3] += f13_2[1] signed* g13[3]
  5874. # asm 1: vmlal.s32 <h4=reg128#2,<f13_2=reg128#7%bot,<g13=reg128#4%top
  5875. # asm 2: vmlal.s32 <h4=q1,<f13_2=d12,<g13=d7
  5876. vmlal.s32 q1,d12,d7
  5877. # qhasm: h4[0,1] += f13_2[2] signed* g13[0];h4[2,3] += f13_2[3] signed* g13[1]
  5878. # asm 1: vmlal.s32 <h4=reg128#2,<f13_2=reg128#7%top,<g13=reg128#4%bot
  5879. # asm 2: vmlal.s32 <h4=q1,<f13_2=d13,<g13=d6
  5880. vmlal.s32 q1,d13,d6
  5881. # qhasm: h2[0,1] = f02[0] signed* g02[2];h2[2,3] = f02[1] signed* g02[3]
  5882. # asm 1: vmull.s32 >h2=reg128#7,<f02=reg128#10%bot,<g02=reg128#1%top
  5883. # asm 2: vmull.s32 >h2=q6,<f02=d18,<g02=d1
  5884. vmull.s32 q6,d18,d1
  5885. # qhasm: h2[0,1] += f02[2] signed* g02[0];h2[2,3] += f02[3] signed* g02[1]
  5886. # asm 1: vmlal.s32 <h2=reg128#7,<f02=reg128#10%top,<g02=reg128#1%bot
  5887. # asm 2: vmlal.s32 <h2=q6,<f02=d19,<g02=d0
  5888. vmlal.s32 q6,d19,d0
  5889. # qhasm: h2[0,1] += f46[2] signed* g46_19[2];h2[2,3] += f46[3] signed* g46_19[3]
  5890. # asm 1: vmlal.s32 <h2=reg128#7,<f46=reg128#12%top,<g46_19=reg128#14%top
  5891. # asm 2: vmlal.s32 <h2=q6,<f46=d23,<g46_19=d27
  5892. vmlal.s32 q6,d23,d27
  5893. # qhasm: h2[0,1] += f46[0] signed* g89_19[0];h2[2,3] += f46[1] signed* g89_19[1]
  5894. # asm 1: vmlal.s32 <h2=reg128#7,<f46=reg128#12%bot,<g89_19=reg128#11%bot
  5895. # asm 2: vmlal.s32 <h2=q6,<f46=d22,<g89_19=d20
  5896. vmlal.s32 q6,d22,d20
  5897. # qhasm: h2[0,1] += f89[0] signed* g46_19[0];h2[2,3] += f89[1] signed* g46_19[1]
  5898. # asm 1: vmlal.s32 <h2=reg128#7,<f89=reg128#13%bot,<g46_19=reg128#14%bot
  5899. # asm 2: vmlal.s32 <h2=q6,<f89=d24,<g46_19=d26
  5900. vmlal.s32 q6,d24,d26
  5901. # qhasm: h0[0,1] = f02[0] signed* g02[0];h0[2,3] = f02[1] signed* g02[1]
  5902. # asm 1: vmull.s32 >h0=reg128#1,<f02=reg128#10%bot,<g02=reg128#1%bot
  5903. # asm 2: vmull.s32 >h0=q0,<f02=d18,<g02=d0
  5904. vmull.s32 q0,d18,d0
  5905. # qhasm: h0[0,1] += f46[0] signed* g46_19[2];h0[2,3] += f46[1] signed* g46_19[3]
  5906. # asm 1: vmlal.s32 <h0=reg128#1,<f46=reg128#12%bot,<g46_19=reg128#14%top
  5907. # asm 2: vmlal.s32 <h0=q0,<f46=d22,<g46_19=d27
  5908. vmlal.s32 q0,d22,d27
  5909. # qhasm: h0[0,1] += f46[2] signed* g46_19[0];h0[2,3] += f46[3] signed* g46_19[1]
  5910. # asm 1: vmlal.s32 <h0=reg128#1,<f46=reg128#12%top,<g46_19=reg128#14%bot
  5911. # asm 2: vmlal.s32 <h0=q0,<f46=d23,<g46_19=d26
  5912. vmlal.s32 q0,d23,d26
  5913. # qhasm: h0[0,1] += f89[0] signed* mix[2];h0[2,3] += f89[1] signed* mix[3]
  5914. # asm 1: vmlal.s32 <h0=reg128#1,<f89=reg128#13%bot,<mix=reg128#16%top
  5915. # asm 2: vmlal.s32 <h0=q0,<f89=d24,<mix=d31
  5916. vmlal.s32 q0,d24,d31
  5917. # qhasm: h0[0,1] += f02[2] signed* g89_19[0];h0[2,3] += f02[3] signed* g89_19[1]
  5918. # asm 1: vmlal.s32 <h0=reg128#1,<f02=reg128#10%top,<g89_19=reg128#11%bot
  5919. # asm 2: vmlal.s32 <h0=q0,<f02=d19,<g89_19=d20
  5920. vmlal.s32 q0,d19,d20
  5921. # qhasm: ptr = &f57_2_stack
  5922. # asm 1: lea >ptr=int32#3,<f57_2_stack=stack128#9
  5923. # asm 2: lea >ptr=r2,<f57_2_stack=[sp,#640]
  5924. add r2,sp,#640
  5925. # qhasm: f57_2 aligned= mem128[ptr]
  5926. # asm 1: vld1.8 {>f57_2=reg128#10%bot->f57_2=reg128#10%top},[<ptr=int32#3,: 128]
  5927. # asm 2: vld1.8 {>f57_2=d18->f57_2=d19},[<ptr=r2,: 128]
  5928. vld1.8 {d18-d19},[r2,: 128]
  5929. # qhasm: h8[0,1] += f57_2[0] signed* g13[2];h8[2,3] += f57_2[1] signed* g13[3]
  5930. # asm 1: vmlal.s32 <h8=reg128#3,<f57_2=reg128#10%bot,<g13=reg128#4%top
  5931. # asm 2: vmlal.s32 <h8=q2,<f57_2=d18,<g13=d7
  5932. vmlal.s32 q2,d18,d7
  5933. # qhasm: h8[0,1] += f57_2[2] signed* g13[0];h8[2,3] += f57_2[3] signed* g13[1]
  5934. # asm 1: vmlal.s32 <h8=reg128#3,<f57_2=reg128#10%top,<g13=reg128#4%bot
  5935. # asm 2: vmlal.s32 <h8=q2,<f57_2=d19,<g13=d6
  5936. vmlal.s32 q2,d19,d6
  5937. # qhasm: h6[0,1] += f57_2[0] signed* g13[0];h6[2,3] += f57_2[1] signed* g13[1]
  5938. # asm 1: vmlal.s32 <h6=reg128#6,<f57_2=reg128#10%bot,<g13=reg128#4%bot
  5939. # asm 2: vmlal.s32 <h6=q5,<f57_2=d18,<g13=d6
  5940. vmlal.s32 q5,d18,d6
  5941. # qhasm: h6[0,1] += f57_2[2] signed* g89_19[2];h6[2,3] += f57_2[3] signed* g89_19[3]
  5942. # asm 1: vmlal.s32 <h6=reg128#6,<f57_2=reg128#10%top,<g89_19=reg128#11%top
  5943. # asm 2: vmlal.s32 <h6=q5,<f57_2=d19,<g89_19=d21
  5944. vmlal.s32 q5,d19,d21
  5945. # qhasm: h4[0,1] += f57_2[0] signed* g89_19[2];h4[2,3] += f57_2[1] signed* g89_19[3]
  5946. # asm 1: vmlal.s32 <h4=reg128#2,<f57_2=reg128#10%bot,<g89_19=reg128#11%top
  5947. # asm 2: vmlal.s32 <h4=q1,<f57_2=d18,<g89_19=d21
  5948. vmlal.s32 q1,d18,d21
  5949. # qhasm: h4[0,1] += f57_2[2] signed* g57_19[2];h4[2,3] += f57_2[3] signed* g57_19[3]
  5950. # asm 1: vmlal.s32 <h4=reg128#2,<f57_2=reg128#10%top,<g57_19=reg128#15%top
  5951. # asm 2: vmlal.s32 <h4=q1,<f57_2=d19,<g57_19=d29
  5952. vmlal.s32 q1,d19,d29
  5953. # qhasm: h0[0,1] += f57_2[0] signed* g57_19[0];h0[2,3] += f57_2[1] signed* g57_19[1]
  5954. # asm 1: vmlal.s32 <h0=reg128#1,<f57_2=reg128#10%bot,<g57_19=reg128#15%bot
  5955. # asm 2: vmlal.s32 <h0=q0,<f57_2=d18,<g57_19=d28
  5956. vmlal.s32 q0,d18,d28
  5957. # qhasm: h0[0,1] += f57_2[2] signed* g13_19[2];h0[2,3] += f57_2[3] signed* g13_19[3]
  5958. # asm 1: vmlal.s32 <h0=reg128#1,<f57_2=reg128#10%top,<g13_19=reg128#5%top
  5959. # asm 2: vmlal.s32 <h0=q0,<f57_2=d19,<g13_19=d9
  5960. vmlal.s32 q0,d19,d9
  5961. # qhasm: h2[0,1] += f57_2[0] signed* g57_19[2];h2[2,3] += f57_2[1] signed* g57_19[3]
  5962. # asm 1: vmlal.s32 <h2=reg128#7,<f57_2=reg128#10%bot,<g57_19=reg128#15%top
  5963. # asm 2: vmlal.s32 <h2=q6,<f57_2=d18,<g57_19=d29
  5964. vmlal.s32 q6,d18,d29
  5965. # qhasm: h2[0,1] += f57_2[2] signed* g57_19[0];h2[2,3] += f57_2[3] signed* g57_19[1]
  5966. # asm 1: vmlal.s32 <h2=reg128#7,<f57_2=reg128#10%top,<g57_19=reg128#15%bot
  5967. # asm 2: vmlal.s32 <h2=q6,<f57_2=d19,<g57_19=d28
  5968. vmlal.s32 q6,d19,d28
  5969. # qhasm: ptr = &f13_2_stack
  5970. # asm 1: lea >ptr=int32#3,<f13_2_stack=stack128#6
  5971. # asm 2: lea >ptr=r2,<f13_2_stack=[sp,#592]
  5972. add r2,sp,#592
  5973. # qhasm: f13_2 aligned= mem128[ptr]
  5974. # asm 1: vld1.8 {>f13_2=reg128#10%bot->f13_2=reg128#10%top},[<ptr=int32#3,: 128]
  5975. # asm 2: vld1.8 {>f13_2=d18->f13_2=d19},[<ptr=r2,: 128]
  5976. vld1.8 {d18-d19},[r2,: 128]
  5977. # qhasm: ptr = &_0x2000000_stack
  5978. # asm 1: lea >ptr=int32#3,<_0x2000000_stack=stack128#1
  5979. # asm 2: lea >ptr=r2,<_0x2000000_stack=[sp,#512]
  5980. add r2,sp,#512
  5981. # qhasm: _0x2000000 aligned= mem128[ptr]
  5982. # asm 1: vld1.8 {>_0x2000000=reg128#12%bot->_0x2000000=reg128#12%top},[<ptr=int32#3,: 128]
  5983. # asm 2: vld1.8 {>_0x2000000=d22->_0x2000000=d23},[<ptr=r2,: 128]
  5984. vld1.8 {d22-d23},[r2,: 128]
  5985. # qhasm: h6[0,1] += f13_2[2] signed* g13[2];h6[2,3] += f13_2[3] signed* g13[3]
  5986. # asm 1: vmlal.s32 <h6=reg128#6,<f13_2=reg128#10%top,<g13=reg128#4%top
  5987. # asm 2: vmlal.s32 <h6=q5,<f13_2=d19,<g13=d7
  5988. vmlal.s32 q5,d19,d7
  5989. # qhasm: h0[0,1] += f13_2[0] signed* g89_19[2];h0[2,3] += f13_2[1] signed* g89_19[3]
  5990. # asm 1: vmlal.s32 <h0=reg128#1,<f13_2=reg128#10%bot,<g89_19=reg128#11%top
  5991. # asm 2: vmlal.s32 <h0=q0,<f13_2=d18,<g89_19=d21
  5992. vmlal.s32 q0,d18,d21
  5993. # qhasm: h0[0,1] += f13_2[2] signed* g57_19[2];h0[2,3] += f13_2[3] signed* g57_19[3]
  5994. # asm 1: vmlal.s32 <h0=reg128#1,<f13_2=reg128#10%top,<g57_19=reg128#15%top
  5995. # asm 2: vmlal.s32 <h0=q0,<f13_2=d19,<g57_19=d29
  5996. vmlal.s32 q0,d19,d29
  5997. # qhasm: h2[0,1] += f13_2[0] signed* g13[0];h2[2,3] += f13_2[1] signed* g13[1]
  5998. # asm 1: vmlal.s32 <h2=reg128#7,<f13_2=reg128#10%bot,<g13=reg128#4%bot
  5999. # asm 2: vmlal.s32 <h2=q6,<f13_2=d18,<g13=d6
  6000. vmlal.s32 q6,d18,d6
  6001. # qhasm: ptr = &_0x1000000_stack
  6002. # asm 1: lea >ptr=int32#3,<_0x1000000_stack=stack128#2
  6003. # asm 2: lea >ptr=r2,<_0x1000000_stack=[sp,#528]
  6004. add r2,sp,#528
  6005. # qhasm: _0x1000000 aligned= mem128[ptr]
  6006. # asm 1: vld1.8 {>_0x1000000=reg128#4%bot->_0x1000000=reg128#4%top},[<ptr=int32#3,: 128]
  6007. # asm 2: vld1.8 {>_0x1000000=d6->_0x1000000=d7},[<ptr=r2,: 128]
  6008. vld1.8 {d6-d7},[r2,: 128]
  6009. # qhasm: h2[0,1] += f13_2[2] signed* g89_19[2];h2[2,3] += f13_2[3] signed* g89_19[3]
  6010. # asm 1: vmlal.s32 <h2=reg128#7,<f13_2=reg128#10%top,<g89_19=reg128#11%top
  6011. # asm 2: vmlal.s32 <h2=q6,<f13_2=d19,<g89_19=d21
  6012. vmlal.s32 q6,d19,d21
  6013. # qhasm: ptr = &h7_stack
  6014. # asm 1: lea >ptr=int32#3,<h7_stack=stack128#5
  6015. # asm 2: lea >ptr=r2,<h7_stack=[sp,#576]
  6016. add r2,sp,#576
  6017. # qhasm: h7 aligned= mem128[ptr]
  6018. # asm 1: vld1.8 {>h7=reg128#10%bot->h7=reg128#10%top},[<ptr=int32#3,: 128]
  6019. # asm 2: vld1.8 {>h7=d18->h7=d19},[<ptr=r2,: 128]
  6020. vld1.8 {d18-d19},[r2,: 128]
  6021. # qhasm: h0[0,1] += mix[0] signed* g13_19[0];h0[2,3] += mix[1] signed* g13_19[1]
  6022. # asm 1: vmlal.s32 <h0=reg128#1,<mix=reg128#16%bot,<g13_19=reg128#5%bot
  6023. # asm 2: vmlal.s32 <h0=q0,<mix=d30,<g13_19=d8
  6024. vmlal.s32 q0,d30,d8
  6025. # qhasm: ptr = &h9_stack
  6026. # asm 1: lea >ptr=int32#3,<h9_stack=stack128#11
  6027. # asm 2: lea >ptr=r2,<h9_stack=[sp,#672]
  6028. add r2,sp,#672
  6029. # qhasm: h9 aligned= mem128[ptr]
  6030. # asm 1: vld1.8 {>h9=reg128#11%bot->h9=reg128#11%top},[<ptr=int32#3,: 128]
  6031. # asm 2: vld1.8 {>h9=d20->h9=d21},[<ptr=r2,: 128]
  6032. vld1.8 {d20-d21},[r2,: 128]
  6033. # qhasm: h6[0,1] += mix[0] signed* g57_19[2];h6[2,3] += mix[1] signed* g57_19[3]
  6034. # asm 1: vmlal.s32 <h6=reg128#6,<mix=reg128#16%bot,<g57_19=reg128#15%top
  6035. # asm 2: vmlal.s32 <h6=q5,<mix=d30,<g57_19=d29
  6036. vmlal.s32 q5,d30,d29
  6037. # qhasm: ptr = &h5_stack
  6038. # asm 1: lea >ptr=int32#3,<h5_stack=stack128#7
  6039. # asm 2: lea >ptr=r2,<h5_stack=[sp,#608]
  6040. add r2,sp,#608
  6041. # qhasm: h5 aligned= mem128[ptr]
  6042. # asm 1: vld1.8 {>h5=reg128#13%bot->h5=reg128#13%top},[<ptr=int32#3,: 128]
  6043. # asm 2: vld1.8 {>h5=d24->h5=d25},[<ptr=r2,: 128]
  6044. vld1.8 {d24-d25},[r2,: 128]
  6045. # qhasm: h4[0,1] += mix[0] signed* g57_19[0];h4[2,3] += mix[1] signed* g57_19[1]
  6046. # asm 1: vmlal.s32 <h4=reg128#2,<mix=reg128#16%bot,<g57_19=reg128#15%bot
  6047. # asm 2: vmlal.s32 <h4=q1,<mix=d30,<g57_19=d28
  6048. vmlal.s32 q1,d30,d28
  6049. # qhasm: 2x t0 = h0 + _0x2000000
  6050. # asm 1: vadd.i64 >t0=reg128#14,<h0=reg128#1,<_0x2000000=reg128#12
  6051. # asm 2: vadd.i64 >t0=q13,<h0=q0,<_0x2000000=q11
  6052. vadd.i64 q13,q0,q11
  6053. # qhasm: 2x t6 = h6 + _0x2000000
  6054. # asm 1: vadd.i64 >t6=reg128#15,<h6=reg128#6,<_0x2000000=reg128#12
  6055. # asm 2: vadd.i64 >t6=q14,<h6=q5,<_0x2000000=q11
  6056. vadd.i64 q14,q5,q11
  6057. # qhasm: h2[0,1] += mix[0] signed* g13_19[2];h2[2,3] += mix[1] signed* g13_19[3]
  6058. # asm 1: vmlal.s32 <h2=reg128#7,<mix=reg128#16%bot,<g13_19=reg128#5%top
  6059. # asm 2: vmlal.s32 <h2=q6,<mix=d30,<g13_19=d9
  6060. vmlal.s32 q6,d30,d9
  6061. # qhasm: 2x c0 = t0 signed>> 26
  6062. # asm 1: vshr.s64 >c0=reg128#5,<t0=reg128#14,#26
  6063. # asm 2: vshr.s64 >c0=q4,<t0=q13,#26
  6064. vshr.s64 q4,q13,#26
  6065. # qhasm: 2x c6 = t6 signed>> 26
  6066. # asm 1: vshr.s64 >c6=reg128#14,<t6=reg128#15,#26
  6067. # asm 2: vshr.s64 >c6=q13,<t6=q14,#26
  6068. vshr.s64 q13,q14,#26
  6069. # qhasm: 2x h1 += c0
  6070. # asm 1: vadd.i64 >h1=reg128#8,<h1=reg128#8,<c0=reg128#5
  6071. # asm 2: vadd.i64 >h1=q7,<h1=q7,<c0=q4
  6072. vadd.i64 q7,q7,q4
  6073. # qhasm: 2x t0 = c0 << 26
  6074. # asm 1: vshl.i64 >t0=reg128#5,<c0=reg128#5,#26
  6075. # asm 2: vshl.i64 >t0=q4,<c0=q4,#26
  6076. vshl.i64 q4,q4,#26
  6077. # qhasm: 2x t1 = h1 + _0x1000000
  6078. # asm 1: vadd.i64 >t1=reg128#15,<h1=reg128#8,<_0x1000000=reg128#4
  6079. # asm 2: vadd.i64 >t1=q14,<h1=q7,<_0x1000000=q3
  6080. vadd.i64 q14,q7,q3
  6081. # qhasm: 2x h7 += c6
  6082. # asm 1: vadd.i64 >h7=reg128#10,<h7=reg128#10,<c6=reg128#14
  6083. # asm 2: vadd.i64 >h7=q9,<h7=q9,<c6=q13
  6084. vadd.i64 q9,q9,q13
  6085. # qhasm: 2x t6 = c6 << 26
  6086. # asm 1: vshl.i64 >t6=reg128#14,<c6=reg128#14,#26
  6087. # asm 2: vshl.i64 >t6=q13,<c6=q13,#26
  6088. vshl.i64 q13,q13,#26
  6089. # qhasm: 2x t7 = h7 + _0x1000000
  6090. # asm 1: vadd.i64 >t7=reg128#16,<h7=reg128#10,<_0x1000000=reg128#4
  6091. # asm 2: vadd.i64 >t7=q15,<h7=q9,<_0x1000000=q3
  6092. vadd.i64 q15,q9,q3
  6093. # qhasm: 2x h0 -= t0
  6094. # asm 1: vsub.i64 >h0=reg128#1,<h0=reg128#1,<t0=reg128#5
  6095. # asm 2: vsub.i64 >h0=q0,<h0=q0,<t0=q4
  6096. vsub.i64 q0,q0,q4
  6097. # qhasm: 2x c1 = t1 signed>> 25
  6098. # asm 1: vshr.s64 >c1=reg128#5,<t1=reg128#15,#25
  6099. # asm 2: vshr.s64 >c1=q4,<t1=q14,#25
  6100. vshr.s64 q4,q14,#25
  6101. # qhasm: 2x h6 -= t6
  6102. # asm 1: vsub.i64 >h6=reg128#6,<h6=reg128#6,<t6=reg128#14
  6103. # asm 2: vsub.i64 >h6=q5,<h6=q5,<t6=q13
  6104. vsub.i64 q5,q5,q13
  6105. # qhasm: 2x c7 = t7 signed>> 25
  6106. # asm 1: vshr.s64 >c7=reg128#14,<t7=reg128#16,#25
  6107. # asm 2: vshr.s64 >c7=q13,<t7=q15,#25
  6108. vshr.s64 q13,q15,#25
  6109. # qhasm: 2x h2 += c1
  6110. # asm 1: vadd.i64 >h2=reg128#7,<h2=reg128#7,<c1=reg128#5
  6111. # asm 2: vadd.i64 >h2=q6,<h2=q6,<c1=q4
  6112. vadd.i64 q6,q6,q4
  6113. # qhasm: 2x t1 = c1 << 25
  6114. # asm 1: vshl.i64 >t1=reg128#5,<c1=reg128#5,#25
  6115. # asm 2: vshl.i64 >t1=q4,<c1=q4,#25
  6116. vshl.i64 q4,q4,#25
  6117. # qhasm: 2x t2 = h2 + _0x2000000
  6118. # asm 1: vadd.i64 >t2=reg128#15,<h2=reg128#7,<_0x2000000=reg128#12
  6119. # asm 2: vadd.i64 >t2=q14,<h2=q6,<_0x2000000=q11
  6120. vadd.i64 q14,q6,q11
  6121. # qhasm: 2x h8 += c7
  6122. # asm 1: vadd.i64 >h8=reg128#3,<h8=reg128#3,<c7=reg128#14
  6123. # asm 2: vadd.i64 >h8=q2,<h8=q2,<c7=q13
  6124. vadd.i64 q2,q2,q13
  6125. # qhasm: 2x h1 -= t1
  6126. # asm 1: vsub.i64 >h1=reg128#5,<h1=reg128#8,<t1=reg128#5
  6127. # asm 2: vsub.i64 >h1=q4,<h1=q7,<t1=q4
  6128. vsub.i64 q4,q7,q4
  6129. # qhasm: 2x c2 = t2 signed>> 26
  6130. # asm 1: vshr.s64 >c2=reg128#8,<t2=reg128#15,#26
  6131. # asm 2: vshr.s64 >c2=q7,<t2=q14,#26
  6132. vshr.s64 q7,q14,#26
  6133. # qhasm: 2x t7 = c7 << 25
  6134. # asm 1: vshl.i64 >t7=reg128#14,<c7=reg128#14,#25
  6135. # asm 2: vshl.i64 >t7=q13,<c7=q13,#25
  6136. vshl.i64 q13,q13,#25
  6137. # qhasm: 2x t8 = h8 + _0x2000000
  6138. # asm 1: vadd.i64 >t8=reg128#15,<h8=reg128#3,<_0x2000000=reg128#12
  6139. # asm 2: vadd.i64 >t8=q14,<h8=q2,<_0x2000000=q11
  6140. vadd.i64 q14,q2,q11
  6141. # qhasm: 2x h3 += c2
  6142. # asm 1: vadd.i64 >h3=reg128#9,<h3=reg128#9,<c2=reg128#8
  6143. # asm 2: vadd.i64 >h3=q8,<h3=q8,<c2=q7
  6144. vadd.i64 q8,q8,q7
  6145. # qhasm: 2x t2 = c2 << 26
  6146. # asm 1: vshl.i64 >t2=reg128#8,<c2=reg128#8,#26
  6147. # asm 2: vshl.i64 >t2=q7,<c2=q7,#26
  6148. vshl.i64 q7,q7,#26
  6149. # qhasm: 2x t3 = h3 + _0x1000000
  6150. # asm 1: vadd.i64 >t3=reg128#16,<h3=reg128#9,<_0x1000000=reg128#4
  6151. # asm 2: vadd.i64 >t3=q15,<h3=q8,<_0x1000000=q3
  6152. vadd.i64 q15,q8,q3
  6153. # qhasm: 2x h7 -= t7
  6154. # asm 1: vsub.i64 >h7=reg128#10,<h7=reg128#10,<t7=reg128#14
  6155. # asm 2: vsub.i64 >h7=q9,<h7=q9,<t7=q13
  6156. vsub.i64 q9,q9,q13
  6157. # qhasm: 2x c8 = t8 signed>> 26
  6158. # asm 1: vshr.s64 >c8=reg128#14,<t8=reg128#15,#26
  6159. # asm 2: vshr.s64 >c8=q13,<t8=q14,#26
  6160. vshr.s64 q13,q14,#26
  6161. # qhasm: 2x h2 -= t2
  6162. # asm 1: vsub.i64 >h2=reg128#7,<h2=reg128#7,<t2=reg128#8
  6163. # asm 2: vsub.i64 >h2=q6,<h2=q6,<t2=q7
  6164. vsub.i64 q6,q6,q7
  6165. # qhasm: 2x c3 = t3 signed>> 25
  6166. # asm 1: vshr.s64 >c3=reg128#8,<t3=reg128#16,#25
  6167. # asm 2: vshr.s64 >c3=q7,<t3=q15,#25
  6168. vshr.s64 q7,q15,#25
  6169. # qhasm: 2x h9 += c8
  6170. # asm 1: vadd.i64 >h9=reg128#11,<h9=reg128#11,<c8=reg128#14
  6171. # asm 2: vadd.i64 >h9=q10,<h9=q10,<c8=q13
  6172. vadd.i64 q10,q10,q13
  6173. # qhasm: 2x t8 = c8 << 26
  6174. # asm 1: vshl.i64 >t8=reg128#14,<c8=reg128#14,#26
  6175. # asm 2: vshl.i64 >t8=q13,<c8=q13,#26
  6176. vshl.i64 q13,q13,#26
  6177. # qhasm: 2x t9 = h9 + _0x1000000
  6178. # asm 1: vadd.i64 >t9=reg128#15,<h9=reg128#11,<_0x1000000=reg128#4
  6179. # asm 2: vadd.i64 >t9=q14,<h9=q10,<_0x1000000=q3
  6180. vadd.i64 q14,q10,q3
  6181. # qhasm: 2x h4 += c3
  6182. # asm 1: vadd.i64 >h4=reg128#2,<h4=reg128#2,<c3=reg128#8
  6183. # asm 2: vadd.i64 >h4=q1,<h4=q1,<c3=q7
  6184. vadd.i64 q1,q1,q7
  6185. # qhasm: posh = playground1_ptr + 240
  6186. # asm 1: add >posh=int32#3,<playground1_ptr=int32#4,#240
  6187. # asm 2: add >posh=r2,<playground1_ptr=r3,#240
  6188. add r2,r3,#240
  6189. # qhasm: 2x t3 = c3 << 25
  6190. # asm 1: vshl.i64 >t3=reg128#8,<c3=reg128#8,#25
  6191. # asm 2: vshl.i64 >t3=q7,<c3=q7,#25
  6192. vshl.i64 q7,q7,#25
  6193. # qhasm: posH = playground1_ptr + 144
  6194. # asm 1: add >posH=int32#5,<playground1_ptr=int32#4,#144
  6195. # asm 2: add >posH=r4,<playground1_ptr=r3,#144
  6196. add r4,r3,#144
  6197. # qhasm: 2x t4 = h4 + _0x2000000
  6198. # asm 1: vadd.i64 >t4=reg128#16,<h4=reg128#2,<_0x2000000=reg128#12
  6199. # asm 2: vadd.i64 >t4=q15,<h4=q1,<_0x2000000=q11
  6200. vadd.i64 q15,q1,q11
  6201. # qhasm: posh+=8
  6202. # asm 1: add >posh=int32#3,<posh=int32#3,#8
  6203. # asm 2: add >posh=r2,<posh=r2,#8
  6204. add r2,r2,#8
  6205. # qhasm: 2x h8 -= t8
  6206. # asm 1: vsub.i64 >h8=reg128#3,<h8=reg128#3,<t8=reg128#14
  6207. # asm 2: vsub.i64 >h8=q2,<h8=q2,<t8=q13
  6208. vsub.i64 q2,q2,q13
  6209. # qhasm: posH+=8
  6210. # asm 1: add >posH=int32#5,<posH=int32#5,#8
  6211. # asm 2: add >posH=r4,<posH=r4,#8
  6212. add r4,r4,#8
  6213. # qhasm: 2x c9 = t9 signed>> 25
  6214. # asm 1: vshr.s64 >c9=reg128#14,<t9=reg128#15,#25
  6215. # asm 2: vshr.s64 >c9=q13,<t9=q14,#25
  6216. vshr.s64 q13,q14,#25
  6217. # qhasm: 2x h3 -= t3
  6218. # asm 1: vsub.i64 >h3=reg128#8,<h3=reg128#9,<t3=reg128#8
  6219. # asm 2: vsub.i64 >h3=q7,<h3=q8,<t3=q7
  6220. vsub.i64 q7,q8,q7
  6221. # qhasm: 2x c4 = t4 signed>> 26
  6222. # asm 1: vshr.s64 >c4=reg128#9,<t4=reg128#16,#26
  6223. # asm 2: vshr.s64 >c4=q8,<t4=q15,#26
  6224. vshr.s64 q8,q15,#26
  6225. # qhasm: 2x s = c9 + c9
  6226. # asm 1: vadd.i64 >s=reg128#15,<c9=reg128#14,<c9=reg128#14
  6227. # asm 2: vadd.i64 >s=q14,<c9=q13,<c9=q13
  6228. vadd.i64 q14,q13,q13
  6229. # qhasm: 2x h5 += c4
  6230. # asm 1: vadd.i64 >h5=reg128#13,<h5=reg128#13,<c4=reg128#9
  6231. # asm 2: vadd.i64 >h5=q12,<h5=q12,<c4=q8
  6232. vadd.i64 q12,q12,q8
  6233. # qhasm: h2 h3 = h2[0]h3[0]h2[2]h2[3] h2[1]h3[1]h3[2]h3[3]
  6234. # asm 1: vtrn.32 <h2=reg128#7%bot,<h3=reg128#8%bot
  6235. # asm 2: vtrn.32 <h2=d12,<h3=d14
  6236. vtrn.32 d12,d14
  6237. # qhasm: 2x t4 = c4 << 26
  6238. # asm 1: vshl.i64 >t4=reg128#9,<c4=reg128#9,#26
  6239. # asm 2: vshl.i64 >t4=q8,<c4=q8,#26
  6240. vshl.i64 q8,q8,#26
  6241. # qhasm: h2 h3 = h2[0]h2[1]h2[2]h3[2] h3[0]h3[1]h2[3]h3[3]
  6242. # asm 1: vtrn.32 <h2=reg128#7%top,<h3=reg128#8%top
  6243. # asm 2: vtrn.32 <h2=d13,<h3=d15
  6244. vtrn.32 d13,d15
  6245. # qhasm: 2x t5 = h5 + _0x1000000
  6246. # asm 1: vadd.i64 >t5=reg128#4,<h5=reg128#13,<_0x1000000=reg128#4
  6247. # asm 2: vadd.i64 >t5=q3,<h5=q12,<_0x1000000=q3
  6248. vadd.i64 q3,q12,q3
  6249. # qhasm: 2x h0 += s
  6250. # asm 1: vadd.i64 >h0=reg128#1,<h0=reg128#1,<s=reg128#15
  6251. # asm 2: vadd.i64 >h0=q0,<h0=q0,<s=q14
  6252. vadd.i64 q0,q0,q14
  6253. # qhasm: mem64[posh] aligned= h2[0];posh+=8
  6254. # asm 1: vst1.8 <h2=reg128#7%bot,[<posh=int32#3,: 64]!
  6255. # asm 2: vst1.8 <h2=d12,[<posh=r2,: 64]!
  6256. vst1.8 d12,[r2,: 64]!
  6257. # qhasm: 2x s = c9 << 4
  6258. # asm 1: vshl.i64 >s=reg128#8,<c9=reg128#14,#4
  6259. # asm 2: vshl.i64 >s=q7,<c9=q13,#4
  6260. vshl.i64 q7,q13,#4
  6261. # qhasm: mem64[posH] aligned= h2[1];posH+=8
  6262. # asm 1: vst1.8 <h2=reg128#7%top,[<posH=int32#5,: 64]!
  6263. # asm 2: vst1.8 <h2=d13,[<posH=r4,: 64]!
  6264. vst1.8 d13,[r4,: 64]!
  6265. # qhasm: 2x h4 -= t4
  6266. # asm 1: vsub.i64 >h4=reg128#2,<h4=reg128#2,<t4=reg128#9
  6267. # asm 2: vsub.i64 >h4=q1,<h4=q1,<t4=q8
  6268. vsub.i64 q1,q1,q8
  6269. # qhasm: 2x c5 = t5 signed>> 25
  6270. # asm 1: vshr.s64 >c5=reg128#4,<t5=reg128#4,#25
  6271. # asm 2: vshr.s64 >c5=q3,<t5=q3,#25
  6272. vshr.s64 q3,q3,#25
  6273. # qhasm: 2x h0 += s
  6274. # asm 1: vadd.i64 >h0=reg128#1,<h0=reg128#1,<s=reg128#8
  6275. # asm 2: vadd.i64 >h0=q0,<h0=q0,<s=q7
  6276. vadd.i64 q0,q0,q7
  6277. # qhasm: 2x h6 += c5
  6278. # asm 1: vadd.i64 >h6=reg128#6,<h6=reg128#6,<c5=reg128#4
  6279. # asm 2: vadd.i64 >h6=q5,<h6=q5,<c5=q3
  6280. vadd.i64 q5,q5,q3
  6281. # qhasm: 2x t5 = c5 << 25
  6282. # asm 1: vshl.i64 >t5=reg128#4,<c5=reg128#4,#25
  6283. # asm 2: vshl.i64 >t5=q3,<c5=q3,#25
  6284. vshl.i64 q3,q3,#25
  6285. # qhasm: 2x t6 = h6 + _0x2000000
  6286. # asm 1: vadd.i64 >t6=reg128#7,<h6=reg128#6,<_0x2000000=reg128#12
  6287. # asm 2: vadd.i64 >t6=q6,<h6=q5,<_0x2000000=q11
  6288. vadd.i64 q6,q5,q11
  6289. # qhasm: 2x h0 += c9
  6290. # asm 1: vadd.i64 >h0=reg128#1,<h0=reg128#1,<c9=reg128#14
  6291. # asm 2: vadd.i64 >h0=q0,<h0=q0,<c9=q13
  6292. vadd.i64 q0,q0,q13
  6293. # qhasm: 2x t9 = c9 << 25
  6294. # asm 1: vshl.i64 >t9=reg128#8,<c9=reg128#14,#25
  6295. # asm 2: vshl.i64 >t9=q7,<c9=q13,#25
  6296. vshl.i64 q7,q13,#25
  6297. # qhasm: 2x t0 = h0 + _0x2000000
  6298. # asm 1: vadd.i64 >t0=reg128#9,<h0=reg128#1,<_0x2000000=reg128#12
  6299. # asm 2: vadd.i64 >t0=q8,<h0=q0,<_0x2000000=q11
  6300. vadd.i64 q8,q0,q11
  6301. # qhasm: 2x h5 -= t5
  6302. # asm 1: vsub.i64 >h5=reg128#4,<h5=reg128#13,<t5=reg128#4
  6303. # asm 2: vsub.i64 >h5=q3,<h5=q12,<t5=q3
  6304. vsub.i64 q3,q12,q3
  6305. # qhasm: 2x c6 = t6 signed>> 26
  6306. # asm 1: vshr.s64 >c6=reg128#7,<t6=reg128#7,#26
  6307. # asm 2: vshr.s64 >c6=q6,<t6=q6,#26
  6308. vshr.s64 q6,q6,#26
  6309. # qhasm: 2x h9 -= t9
  6310. # asm 1: vsub.i64 >h9=reg128#8,<h9=reg128#11,<t9=reg128#8
  6311. # asm 2: vsub.i64 >h9=q7,<h9=q10,<t9=q7
  6312. vsub.i64 q7,q10,q7
  6313. # qhasm: h4 h5 = h4[0]h5[0]h4[2]h4[3] h4[1]h5[1]h5[2]h5[3]
  6314. # asm 1: vtrn.32 <h4=reg128#2%bot,<h5=reg128#4%bot
  6315. # asm 2: vtrn.32 <h4=d2,<h5=d6
  6316. vtrn.32 d2,d6
  6317. # qhasm: 2x c0 = t0 signed>> 26
  6318. # asm 1: vshr.s64 >c0=reg128#9,<t0=reg128#9,#26
  6319. # asm 2: vshr.s64 >c0=q8,<t0=q8,#26
  6320. vshr.s64 q8,q8,#26
  6321. # qhasm: h4 h5 = h4[0]h4[1]h4[2]h5[2] h5[0]h5[1]h4[3]h5[3]
  6322. # asm 1: vtrn.32 <h4=reg128#2%top,<h5=reg128#4%top
  6323. # asm 2: vtrn.32 <h4=d3,<h5=d7
  6324. vtrn.32 d3,d7
  6325. # qhasm: 2x h7 += c6
  6326. # asm 1: vadd.i64 >h7=reg128#4,<h7=reg128#10,<c6=reg128#7
  6327. # asm 2: vadd.i64 >h7=q3,<h7=q9,<c6=q6
  6328. vadd.i64 q3,q9,q6
  6329. # qhasm: mem64[posh] aligned= h4[0]
  6330. # asm 1: vst1.8 <h4=reg128#2%bot,[<posh=int32#3,: 64]
  6331. # asm 2: vst1.8 <h4=d2,[<posh=r2,: 64]
  6332. vst1.8 d2,[r2,: 64]
  6333. # qhasm: 2x t6 = c6 << 26
  6334. # asm 1: vshl.i64 >t6=reg128#7,<c6=reg128#7,#26
  6335. # asm 2: vshl.i64 >t6=q6,<c6=q6,#26
  6336. vshl.i64 q6,q6,#26
  6337. # qhasm: mem64[posH] aligned= h4[1]
  6338. # asm 1: vst1.8 <h4=reg128#2%top,[<posH=int32#5,: 64]
  6339. # asm 2: vst1.8 <h4=d3,[<posH=r4,: 64]
  6340. vst1.8 d3,[r4,: 64]
  6341. # qhasm: 2x h1 += c0
  6342. # asm 1: vadd.i64 >h1=reg128#2,<h1=reg128#5,<c0=reg128#9
  6343. # asm 2: vadd.i64 >h1=q1,<h1=q4,<c0=q8
  6344. vadd.i64 q1,q4,q8
  6345. # qhasm: h8 h9 = h8[0]h9[0]h8[2]h8[3] h8[1]h9[1]h9[2]h9[3]
  6346. # asm 1: vtrn.32 <h8=reg128#3%bot,<h9=reg128#8%bot
  6347. # asm 2: vtrn.32 <h8=d4,<h9=d14
  6348. vtrn.32 d4,d14
  6349. # qhasm: 2x t0 = c0 << 26
  6350. # asm 1: vshl.i64 >t0=reg128#5,<c0=reg128#9,#26
  6351. # asm 2: vshl.i64 >t0=q4,<c0=q8,#26
  6352. vshl.i64 q4,q8,#26
  6353. # qhasm: h8 h9 = h8[0]h8[1]h8[2]h9[2] h9[0]h9[1]h8[3]h9[3]
  6354. # asm 1: vtrn.32 <h8=reg128#3%top,<h9=reg128#8%top
  6355. # asm 2: vtrn.32 <h8=d5,<h9=d15
  6356. vtrn.32 d5,d15
  6357. # qhasm: 2x h6 -= t6
  6358. # asm 1: vsub.i64 >h6=reg128#6,<h6=reg128#6,<t6=reg128#7
  6359. # asm 2: vsub.i64 >h6=q5,<h6=q5,<t6=q6
  6360. vsub.i64 q5,q5,q6
  6361. # qhasm: posh+=16
  6362. # asm 1: add >posh=int32#3,<posh=int32#3,#16
  6363. # asm 2: add >posh=r2,<posh=r2,#16
  6364. add r2,r2,#16
  6365. # qhasm: 2x h0 -= t0
  6366. # asm 1: vsub.i64 >h0=reg128#1,<h0=reg128#1,<t0=reg128#5
  6367. # asm 2: vsub.i64 >h0=q0,<h0=q0,<t0=q4
  6368. vsub.i64 q0,q0,q4
  6369. # qhasm: mem64[posh] aligned= h8[0]
  6370. # asm 1: vst1.8 <h8=reg128#3%bot,[<posh=int32#3,: 64]
  6371. # asm 2: vst1.8 <h8=d4,[<posh=r2,: 64]
  6372. vst1.8 d4,[r2,: 64]
  6373. # qhasm: posH+=16
  6374. # asm 1: add >posH=int32#5,<posH=int32#5,#16
  6375. # asm 2: add >posH=r4,<posH=r4,#16
  6376. add r4,r4,#16
  6377. # qhasm: mem64[posH] aligned= h8[1]
  6378. # asm 1: vst1.8 <h8=reg128#3%top,[<posH=int32#5,: 64]
  6379. # asm 2: vst1.8 <h8=d5,[<posH=r4,: 64]
  6380. vst1.8 d5,[r4,: 64]
  6381. # qhasm: h6 h7 = h6[0]h7[0]h6[2]h6[3] h6[1]h7[1]h7[2]h7[3]
  6382. # asm 1: vtrn.32 <h6=reg128#6%bot,<h7=reg128#4%bot
  6383. # asm 2: vtrn.32 <h6=d10,<h7=d6
  6384. vtrn.32 d10,d6
  6385. # qhasm: h6 h7 = h6[0]h6[1]h6[2]h7[2] h7[0]h7[1]h6[3]h7[3]
  6386. # asm 1: vtrn.32 <h6=reg128#6%top,<h7=reg128#4%top
  6387. # asm 2: vtrn.32 <h6=d11,<h7=d7
  6388. vtrn.32 d11,d7
  6389. # qhasm: posh-=8
  6390. # asm 1: sub >posh=int32#3,<posh=int32#3,#8
  6391. # asm 2: sub >posh=r2,<posh=r2,#8
  6392. sub r2,r2,#8
  6393. # qhasm: posH-=8
  6394. # asm 1: sub >posH=int32#5,<posH=int32#5,#8
  6395. # asm 2: sub >posH=r4,<posH=r4,#8
  6396. sub r4,r4,#8
  6397. # qhasm: h0 h1 = h0[0]h1[0]h0[2]h0[3] h0[1]h1[1]h1[2]h1[3]
  6398. # asm 1: vtrn.32 <h0=reg128#1%bot,<h1=reg128#2%bot
  6399. # asm 2: vtrn.32 <h0=d0,<h1=d2
  6400. vtrn.32 d0,d2
  6401. # qhasm: h0 h1 = h0[0]h0[1]h0[2]h1[2] h1[0]h1[1]h0[3]h1[3]
  6402. # asm 1: vtrn.32 <h0=reg128#1%top,<h1=reg128#2%top
  6403. # asm 2: vtrn.32 <h0=d1,<h1=d3
  6404. vtrn.32 d1,d3
  6405. # qhasm: mem64[posh] aligned= h6[0]
  6406. # asm 1: vst1.8 <h6=reg128#6%bot,[<posh=int32#3,: 64]
  6407. # asm 2: vst1.8 <h6=d10,[<posh=r2,: 64]
  6408. vst1.8 d10,[r2,: 64]
  6409. # qhasm: mem64[posH] aligned= h6[1]
  6410. # asm 1: vst1.8 <h6=reg128#6%top,[<posH=int32#5,: 64]
  6411. # asm 2: vst1.8 <h6=d11,[<posH=r4,: 64]
  6412. vst1.8 d11,[r4,: 64]
  6413. # qhasm: posh-=24
  6414. # asm 1: sub >posh=int32#3,<posh=int32#3,#24
  6415. # asm 2: sub >posh=r2,<posh=r2,#24
  6416. sub r2,r2,#24
  6417. # qhasm: posH-=24
  6418. # asm 1: sub >posH=int32#5,<posH=int32#5,#24
  6419. # asm 2: sub >posH=r4,<posH=r4,#24
  6420. sub r4,r4,#24
  6421. # qhasm: mem64[posh] aligned= h0[0]
  6422. # asm 1: vst1.8 <h0=reg128#1%bot,[<posh=int32#3,: 64]
  6423. # asm 2: vst1.8 <h0=d0,[<posh=r2,: 64]
  6424. vst1.8 d0,[r2,: 64]
  6425. # qhasm: mem64[posH] aligned= h0[1]
  6426. # asm 1: vst1.8 <h0=reg128#1%top,[<posH=int32#5,: 64]
  6427. # asm 2: vst1.8 <h0=d1,[<posH=r4,: 64]
  6428. vst1.8 d1,[r4,: 64]
  6429. # qhasm: pos = pos_stack
  6430. # asm 1: ldr >pos=int32#3,<pos_stack=stack32#3
  6431. # asm 2: ldr >pos=r2,<pos_stack=[sp,#488]
  6432. ldr r2,[sp,#488]
  6433. # qhasm: swap = swap_stack
  6434. # asm 1: ldr >swap=int32#5,<swap_stack=stack32#4
  6435. # asm 2: ldr >swap=r4,<swap_stack=[sp,#492]
  6436. ldr r4,[sp,#492]
  6437. # qhasm: signed<? pos -= 1
  6438. # asm 1: subs >pos=int32#6,<pos=int32#3,#1
  6439. # asm 2: subs >pos=r5,<pos=r2,#1
  6440. subs r5,r2,#1
  6441. # qhasm: goto mainloop if !signed<
  6442. bge ._mainloop
  6443. # qhasm: posy = playground1_ptr + 144
  6444. # asm 1: add >posy=int32#2,<playground1_ptr=int32#4,#144
  6445. # asm 2: add >posy=r1,<playground1_ptr=r3,#144
  6446. add r1,r3,#144
  6447. # qhasm: posx = playground1_ptr + 336
  6448. # asm 1: add >posx=int32#3,<playground1_ptr=int32#4,#336
  6449. # asm 2: add >posx=r2,<playground1_ptr=r3,#336
  6450. add r2,r3,#336
  6451. # qhasm: f0 aligned= mem128[posy];posy += 16
  6452. # asm 1: vld1.8 {>f0=reg128#1%bot->f0=reg128#1%top},[<posy=int32#2,: 128]!
  6453. # asm 2: vld1.8 {>f0=d0->f0=d1},[<posy=r1,: 128]!
  6454. vld1.8 {d0-d1},[r1,: 128]!
  6455. # qhasm: f4 aligned= mem128[posy];posy += 16
  6456. # asm 1: vld1.8 {>f4=reg128#2%bot->f4=reg128#2%top},[<posy=int32#2,: 128]!
  6457. # asm 2: vld1.8 {>f4=d2->f4=d3},[<posy=r1,: 128]!
  6458. vld1.8 {d2-d3},[r1,: 128]!
  6459. # qhasm: new f8
  6460. # qhasm: f8 aligned= mem64[posy] f8[1]
  6461. # asm 1: vld1.8 {<f8=reg128#3%bot},[<posy=int32#2,: 64]
  6462. # asm 2: vld1.8 {<f8=d4},[<posy=r1,: 64]
  6463. vld1.8 {d4},[r1,: 64]
  6464. # qhasm: mem128[posx] aligned= f0;posx += 16
  6465. # asm 1: vst1.8 {<f0=reg128#1%bot-<f0=reg128#1%top},[<posx=int32#3,: 128]!
  6466. # asm 2: vst1.8 {<f0=d0-<f0=d1},[<posx=r2,: 128]!
  6467. vst1.8 {d0-d1},[r2,: 128]!
  6468. # qhasm: mem128[posx] aligned= f4;posx += 16
  6469. # asm 1: vst1.8 {<f4=reg128#2%bot-<f4=reg128#2%top},[<posx=int32#3,: 128]!
  6470. # asm 2: vst1.8 {<f4=d2-<f4=d3},[<posx=r2,: 128]!
  6471. vst1.8 {d2-d3},[r2,: 128]!
  6472. # qhasm: mem64[posx] aligned= f8[0]
  6473. # asm 1: vst1.8 <f8=reg128#3%bot,[<posx=int32#3,: 64]
  6474. # asm 2: vst1.8 <f8=d4,[<posx=r2,: 64]
  6475. vst1.8 d4,[r2,: 64]
  6476. # qhasm: i = 0
  6477. # asm 1: ldr >i=int32#2,=0
  6478. # asm 2: ldr >i=r1,=0
  6479. ldr r1,=0
  6480. # qhasm: invertloop:
  6481. ._invertloop:
  6482. # qhasm: mulsource = playground1_ptr + 144
  6483. # asm 1: add >mulsource=int32#3,<playground1_ptr=int32#4,#144
  6484. # asm 2: add >mulsource=r2,<playground1_ptr=r3,#144
  6485. add r2,r3,#144
  6486. # qhasm: postcopy = 0
  6487. # asm 1: ldr >postcopy=int32#5,=0
  6488. # asm 2: ldr >postcopy=r4,=0
  6489. ldr r4,=0
  6490. # qhasm: j = 2
  6491. # asm 1: ldr >j=int32#6,=2
  6492. # asm 2: ldr >j=r5,=2
  6493. ldr r5,=2
  6494. # qhasm: =? i - 1
  6495. # asm 1: cmp <i=int32#2,#1
  6496. # asm 2: cmp <i=r1,#1
  6497. cmp r1,#1
  6498. # qhasm: j = 1 if =
  6499. # asm 1: ldreq <j=int32#6,=1
  6500. # asm 2: ldreq <j=r5,=1
  6501. ldreq r5,=1
  6502. # qhasm: mulsource = playground1_ptr + 336 if =
  6503. # asm 1: addeq <mulsource=int32#3,<playground1_ptr=int32#4,#336
  6504. # asm 2: addeq <mulsource=r2,<playground1_ptr=r3,#336
  6505. addeq r2,r3,#336
  6506. # qhasm: postcopy = playground1_ptr + 48 if =
  6507. # asm 1: addeq <postcopy=int32#5,<playground1_ptr=int32#4,#48
  6508. # asm 2: addeq <postcopy=r4,<playground1_ptr=r3,#48
  6509. addeq r4,r3,#48
  6510. # qhasm: =? i - 2
  6511. # asm 1: cmp <i=int32#2,#2
  6512. # asm 2: cmp <i=r1,#2
  6513. cmp r1,#2
  6514. # qhasm: j = 1 if =
  6515. # asm 1: ldreq <j=int32#6,=1
  6516. # asm 2: ldreq <j=r5,=1
  6517. ldreq r5,=1
  6518. # qhasm: mulsource = playground1_ptr + 48 if =
  6519. # asm 1: addeq <mulsource=int32#3,<playground1_ptr=int32#4,#48
  6520. # asm 2: addeq <mulsource=r2,<playground1_ptr=r3,#48
  6521. addeq r2,r3,#48
  6522. # qhasm: =? i - 3
  6523. # asm 1: cmp <i=int32#2,#3
  6524. # asm 2: cmp <i=r1,#3
  6525. cmp r1,#3
  6526. # qhasm: j = 5 if =
  6527. # asm 1: ldreq <j=int32#6,=5
  6528. # asm 2: ldreq <j=r5,=5
  6529. ldreq r5,=5
  6530. # qhasm: postcopy = playground1_ptr + 336 if =
  6531. # asm 1: addeq <postcopy=int32#5,<playground1_ptr=int32#4,#336
  6532. # asm 2: addeq <postcopy=r4,<playground1_ptr=r3,#336
  6533. addeq r4,r3,#336
  6534. # qhasm: =? i - 4
  6535. # asm 1: cmp <i=int32#2,#4
  6536. # asm 2: cmp <i=r1,#4
  6537. cmp r1,#4
  6538. # qhasm: j = 10 if =
  6539. # asm 1: ldreq <j=int32#6,=10
  6540. # asm 2: ldreq <j=r5,=10
  6541. ldreq r5,=10
  6542. # qhasm: =? i - 5
  6543. # asm 1: cmp <i=int32#2,#5
  6544. # asm 2: cmp <i=r1,#5
  6545. cmp r1,#5
  6546. # qhasm: j = 20 if =
  6547. # asm 1: ldreq <j=int32#6,=20
  6548. # asm 2: ldreq <j=r5,=20
  6549. ldreq r5,=20
  6550. # qhasm: =? i - 6
  6551. # asm 1: cmp <i=int32#2,#6
  6552. # asm 2: cmp <i=r1,#6
  6553. cmp r1,#6
  6554. # qhasm: j = 10 if =
  6555. # asm 1: ldreq <j=int32#6,=10
  6556. # asm 2: ldreq <j=r5,=10
  6557. ldreq r5,=10
  6558. # qhasm: mulsource = playground1_ptr + 336 if =
  6559. # asm 1: addeq <mulsource=int32#3,<playground1_ptr=int32#4,#336
  6560. # asm 2: addeq <mulsource=r2,<playground1_ptr=r3,#336
  6561. addeq r2,r3,#336
  6562. # qhasm: postcopy = playground1_ptr + 336 if =
  6563. # asm 1: addeq <postcopy=int32#5,<playground1_ptr=int32#4,#336
  6564. # asm 2: addeq <postcopy=r4,<playground1_ptr=r3,#336
  6565. addeq r4,r3,#336
  6566. # qhasm: =? i - 7
  6567. # asm 1: cmp <i=int32#2,#7
  6568. # asm 2: cmp <i=r1,#7
  6569. cmp r1,#7
  6570. # qhasm: j = 50 if =
  6571. # asm 1: ldreq <j=int32#6,=50
  6572. # asm 2: ldreq <j=r5,=50
  6573. ldreq r5,=50
  6574. # qhasm: =? i - 8
  6575. # asm 1: cmp <i=int32#2,#8
  6576. # asm 2: cmp <i=r1,#8
  6577. cmp r1,#8
  6578. # qhasm: j = 100 if =
  6579. # asm 1: ldreq <j=int32#6,=100
  6580. # asm 2: ldreq <j=r5,=100
  6581. ldreq r5,=100
  6582. # qhasm: =? i - 9
  6583. # asm 1: cmp <i=int32#2,#9
  6584. # asm 2: cmp <i=r1,#9
  6585. cmp r1,#9
  6586. # qhasm: j = 50 if =
  6587. # asm 1: ldreq <j=int32#6,=50
  6588. # asm 2: ldreq <j=r5,=50
  6589. ldreq r5,=50
  6590. # qhasm: mulsource = playground1_ptr + 336 if =
  6591. # asm 1: addeq <mulsource=int32#3,<playground1_ptr=int32#4,#336
  6592. # asm 2: addeq <mulsource=r2,<playground1_ptr=r3,#336
  6593. addeq r2,r3,#336
  6594. # qhasm: =? i - 10
  6595. # asm 1: cmp <i=int32#2,#10
  6596. # asm 2: cmp <i=r1,#10
  6597. cmp r1,#10
  6598. # qhasm: j = 5 if =
  6599. # asm 1: ldreq <j=int32#6,=5
  6600. # asm 2: ldreq <j=r5,=5
  6601. ldreq r5,=5
  6602. # qhasm: mulsource = playground1_ptr + 48 if =
  6603. # asm 1: addeq <mulsource=int32#3,<playground1_ptr=int32#4,#48
  6604. # asm 2: addeq <mulsource=r2,<playground1_ptr=r3,#48
  6605. addeq r2,r3,#48
  6606. # qhasm: =? i - 11
  6607. # asm 1: cmp <i=int32#2,#11
  6608. # asm 2: cmp <i=r1,#11
  6609. cmp r1,#11
  6610. # qhasm: j = 0 if =
  6611. # asm 1: ldreq <j=int32#6,=0
  6612. # asm 2: ldreq <j=r5,=0
  6613. ldreq r5,=0
  6614. # qhasm: mulsource = playground1_ptr + 96 if =
  6615. # asm 1: addeq <mulsource=int32#3,<playground1_ptr=int32#4,#96
  6616. # asm 2: addeq <mulsource=r2,<playground1_ptr=r3,#96
  6617. addeq r2,r3,#96
  6618. # qhasm: posy = playground1_ptr + 144
  6619. # asm 1: add >posy=int32#7,<playground1_ptr=int32#4,#144
  6620. # asm 2: add >posy=r6,<playground1_ptr=r3,#144
  6621. add r6,r3,#144
  6622. # qhasm: posx = playground1_ptr + 288
  6623. # asm 1: add >posx=int32#8,<playground1_ptr=int32#4,#288
  6624. # asm 2: add >posx=r7,<playground1_ptr=r3,#288
  6625. add r7,r3,#288
  6626. # qhasm: f0 aligned= mem128[posy];posy += 16
  6627. # asm 1: vld1.8 {>f0=reg128#1%bot->f0=reg128#1%top},[<posy=int32#7,: 128]!
  6628. # asm 2: vld1.8 {>f0=d0->f0=d1},[<posy=r6,: 128]!
  6629. vld1.8 {d0-d1},[r6,: 128]!
  6630. # qhasm: f4 aligned= mem128[posy];posy += 16
  6631. # asm 1: vld1.8 {>f4=reg128#2%bot->f4=reg128#2%top},[<posy=int32#7,: 128]!
  6632. # asm 2: vld1.8 {>f4=d2->f4=d3},[<posy=r6,: 128]!
  6633. vld1.8 {d2-d3},[r6,: 128]!
  6634. # qhasm: new f8
  6635. # qhasm: f8 aligned= mem64[posy] f8[1]
  6636. # asm 1: vld1.8 {<f8=reg128#3%bot},[<posy=int32#7,: 64]
  6637. # asm 2: vld1.8 {<f8=d4},[<posy=r6,: 64]
  6638. vld1.8 {d4},[r6,: 64]
  6639. # qhasm: mem128[posx] aligned= f0;posx += 16
  6640. # asm 1: vst1.8 {<f0=reg128#1%bot-<f0=reg128#1%top},[<posx=int32#8,: 128]!
  6641. # asm 2: vst1.8 {<f0=d0-<f0=d1},[<posx=r7,: 128]!
  6642. vst1.8 {d0-d1},[r7,: 128]!
  6643. # qhasm: mem128[posx] aligned= f4;posx += 16
  6644. # asm 1: vst1.8 {<f4=reg128#2%bot-<f4=reg128#2%top},[<posx=int32#8,: 128]!
  6645. # asm 2: vst1.8 {<f4=d2-<f4=d3},[<posx=r7,: 128]!
  6646. vst1.8 {d2-d3},[r7,: 128]!
  6647. # qhasm: mem64[posx] aligned= f8[0]
  6648. # asm 1: vst1.8 <f8=reg128#3%bot,[<posx=int32#8,: 64]
  6649. # asm 2: vst1.8 <f8=d4,[<posx=r7,: 64]
  6650. vst1.8 d4,[r7,: 64]
  6651. # qhasm: =? j - 0
  6652. # asm 1: cmp <j=int32#6,#0
  6653. # asm 2: cmp <j=r5,#0
  6654. cmp r5,#0
  6655. # qhasm: goto skipsquaringloop if =
  6656. beq ._skipsquaringloop
  6657. # qhasm: squaringloop:
  6658. ._squaringloop:
  6659. # qhasm: posf = playground1_ptr + 288
  6660. # asm 1: add >posf=int32#7,<playground1_ptr=int32#4,#288
  6661. # asm 2: add >posf=r6,<playground1_ptr=r3,#288
  6662. add r6,r3,#288
  6663. # qhasm: posg = playground1_ptr + 288
  6664. # asm 1: add >posg=int32#8,<playground1_ptr=int32#4,#288
  6665. # asm 2: add >posg=r7,<playground1_ptr=r3,#288
  6666. add r7,r3,#288
  6667. # qhasm: posh = playground1_ptr + 288
  6668. # asm 1: add >posh=int32#9,<playground1_ptr=int32#4,#288
  6669. # asm 2: add >posh=r8,<playground1_ptr=r3,#288
  6670. add r8,r3,#288
  6671. # qhasm: 4x _19_19_19_19 = 19
  6672. # asm 1: vmov.i32 >_19_19_19_19=reg128#1,#19
  6673. # asm 2: vmov.i32 >_19_19_19_19=q0,#19
  6674. vmov.i32 q0,#19
  6675. # qhasm: 4x _0_1_0_1 = 0
  6676. # asm 1: vmov.i32 >_0_1_0_1=reg128#2,#0
  6677. # asm 2: vmov.i32 >_0_1_0_1=q1,#0
  6678. vmov.i32 q1,#0
  6679. # qhasm: 4x _1_1_1_1 = 1
  6680. # asm 1: vmov.i32 >_1_1_1_1=reg128#3,#1
  6681. # asm 2: vmov.i32 >_1_1_1_1=q2,#1
  6682. vmov.i32 q2,#1
  6683. # qhasm: _0_1_0_1[0,1,2,3] _1_1_1_1[0,1,2,3] = _0_1_0_1[0]_1_1_1_1[0]_0_1_0_1[1]_1_1_1_1[1] _0_1_0_1[2]_1_1_1_1[2]_0_1_0_1[3]_1_1_1_1[3]
  6684. # asm 1: vzip.i32 <_0_1_0_1=reg128#2,<_1_1_1_1=reg128#3
  6685. # asm 2: vzip.i32 <_0_1_0_1=q1,<_1_1_1_1=q2
  6686. vzip.i32 q1,q2
  6687. # qhasm: g0_g1_g2_g3 aligned= mem128[posg];posg+=16
  6688. # asm 1: vld1.8 {>g0_g1_g2_g3=reg128#3%bot->g0_g1_g2_g3=reg128#3%top},[<posg=int32#8,: 128]!
  6689. # asm 2: vld1.8 {>g0_g1_g2_g3=d4->g0_g1_g2_g3=d5},[<posg=r7,: 128]!
  6690. vld1.8 {d4-d5},[r7,: 128]!
  6691. # qhasm: g4_g5_g6_g7 aligned= mem128[posg];posg+=16
  6692. # asm 1: vld1.8 {>g4_g5_g6_g7=reg128#4%bot->g4_g5_g6_g7=reg128#4%top},[<posg=int32#8,: 128]!
  6693. # asm 2: vld1.8 {>g4_g5_g6_g7=d6->g4_g5_g6_g7=d7},[<posg=r7,: 128]!
  6694. vld1.8 {d6-d7},[r7,: 128]!
  6695. # qhasm: new f8_f9_g8_g9
  6696. # qhasm: f8_f9_g8_g9 aligned= f8_f9_g8_g9[0]mem64[posg]
  6697. # asm 1: vld1.8 {<f8_f9_g8_g9=reg128#5%top},[<posg=int32#8,: 64]
  6698. # asm 2: vld1.8 {<f8_f9_g8_g9=d9},[<posg=r7,: 64]
  6699. vld1.8 {d9},[r7,: 64]
  6700. # qhasm: f0_f1_f2_f3 aligned= mem128[posf];posf+=16
  6701. # asm 1: vld1.8 {>f0_f1_f2_f3=reg128#6%bot->f0_f1_f2_f3=reg128#6%top},[<posf=int32#7,: 128]!
  6702. # asm 2: vld1.8 {>f0_f1_f2_f3=d10->f0_f1_f2_f3=d11},[<posf=r6,: 128]!
  6703. vld1.8 {d10-d11},[r6,: 128]!
  6704. # qhasm: playp = &playground2
  6705. # asm 1: lea >playp=int32#8,<playground2=stack512#1
  6706. # asm 2: lea >playp=r7,<playground2=[sp,#416]
  6707. add r7,sp,#416
  6708. # qhasm: f4_f5_f6_f7 aligned= mem128[posf];posf+=16
  6709. # asm 1: vld1.8 {>f4_f5_f6_f7=reg128#7%bot->f4_f5_f6_f7=reg128#7%top},[<posf=int32#7,: 128]!
  6710. # asm 2: vld1.8 {>f4_f5_f6_f7=d12->f4_f5_f6_f7=d13},[<posf=r6,: 128]!
  6711. vld1.8 {d12-d13},[r6,: 128]!
  6712. # qhasm: 4x 19g0_19g1_19g2_19g3 = g0_g1_g2_g3 * _19_19_19_19
  6713. # asm 1: vmul.i32 >19g0_19g1_19g2_19g3=reg128#8,<g0_g1_g2_g3=reg128#3,<_19_19_19_19=reg128#1
  6714. # asm 2: vmul.i32 >19g0_19g1_19g2_19g3=q7,<g0_g1_g2_g3=q2,<_19_19_19_19=q0
  6715. vmul.i32 q7,q2,q0
  6716. # qhasm: f8_f9_g8_g9 aligned= mem64[posf]f8_f9_g8_g9[1]
  6717. # asm 1: vld1.8 {<f8_f9_g8_g9=reg128#5%bot},[<posf=int32#7,: 64]
  6718. # asm 2: vld1.8 {<f8_f9_g8_g9=d8},[<posf=r6,: 64]
  6719. vld1.8 {d8},[r6,: 64]
  6720. # qhasm: new f1_f8_f3_f0
  6721. # qhasm: f1_f8_f3_f0 = f1_f8_f3_f0[0,1]f0_f1_f2_f3[3]f0_f1_f2_f3[0]
  6722. # asm 1: vext.32 <f1_f8_f3_f0=reg128#9%top,<f0_f1_f2_f3=reg128#6%top,<f0_f1_f2_f3=reg128#6%bot,#1
  6723. # asm 2: vext.32 <f1_f8_f3_f0=d17,<f0_f1_f2_f3=d11,<f0_f1_f2_f3=d10,#1
  6724. vext.32 d17,d11,d10,#1
  6725. # qhasm: 4x 19g4_19g5_19g6_19g7 = g4_g5_g6_g7 * _19_19_19_19
  6726. # asm 1: vmul.i32 >19g4_19g5_19g6_19g7=reg128#10,<g4_g5_g6_g7=reg128#4,<_19_19_19_19=reg128#1
  6727. # asm 2: vmul.i32 >19g4_19g5_19g6_19g7=q9,<g4_g5_g6_g7=q3,<_19_19_19_19=q0
  6728. vmul.i32 q9,q3,q0
  6729. # qhasm: f1_f8_f3_f0 = f0_f1_f2_f3[1]f8_f9_g8_g9[0]f1_f8_f3_f0[2,3]
  6730. # asm 1: vext.32 <f1_f8_f3_f0=reg128#9%bot,<f0_f1_f2_f3=reg128#6%bot,<f8_f9_g8_g9=reg128#5%bot,#1
  6731. # asm 2: vext.32 <f1_f8_f3_f0=d16,<f0_f1_f2_f3=d10,<f8_f9_g8_g9=d8,#1
  6732. vext.32 d16,d10,d8,#1
  6733. # qhasm: 4x f0_2f1_f2_2f3 = f0_f1_f2_f3 << _0_1_0_1
  6734. # asm 1: vshl.u32 >f0_2f1_f2_2f3=reg128#11,<f0_f1_f2_f3=reg128#6,<_0_1_0_1=reg128#2
  6735. # asm 2: vshl.u32 >f0_2f1_f2_2f3=q10,<f0_f1_f2_f3=q5,<_0_1_0_1=q1
  6736. vshl.u32 q10,q5,q1
  6737. # qhasm: new g0_19g1_g2_19g3
  6738. # qhasm: g0_19g1_g2_19g3 = 19g0_19g1_19g2_19g3[1]g0_g1_g2_g3[0]g0_19g1_g2_19g3[2,3]
  6739. # asm 1: vext.32 <g0_19g1_g2_19g3=reg128#12%bot,<19g0_19g1_19g2_19g3=reg128#8%bot,<g0_g1_g2_g3=reg128#3%bot,#1
  6740. # asm 2: vext.32 <g0_19g1_g2_19g3=d22,<19g0_19g1_19g2_19g3=d14,<g0_g1_g2_g3=d4,#1
  6741. vext.32 d22,d14,d4,#1
  6742. # qhasm: new g4_19g5_g6_19g7
  6743. # qhasm: g4_19g5_g6_19g7 = 19g4_19g5_19g6_19g7[1]g4_g5_g6_g7[0]g4_19g5_g6_19g7[2,3]
  6744. # asm 1: vext.32 <g4_19g5_g6_19g7=reg128#13%bot,<19g4_19g5_19g6_19g7=reg128#10%bot,<g4_g5_g6_g7=reg128#4%bot,#1
  6745. # asm 2: vext.32 <g4_19g5_g6_19g7=d24,<19g4_19g5_19g6_19g7=d18,<g4_g5_g6_g7=d6,#1
  6746. vext.32 d24,d18,d6,#1
  6747. # qhasm: 4x f4_2f5_f6_2f7 = f4_f5_f6_f7 << _0_1_0_1
  6748. # asm 1: vshl.u32 >f4_2f5_f6_2f7=reg128#14,<f4_f5_f6_f7=reg128#7,<_0_1_0_1=reg128#2
  6749. # asm 2: vshl.u32 >f4_2f5_f6_2f7=q13,<f4_f5_f6_f7=q6,<_0_1_0_1=q1
  6750. vshl.u32 q13,q6,q1
  6751. # qhasm: new f8_2f9_f9_f6
  6752. # qhasm: f8_2f9_f9_f6 = f8_f9_g8_g9[0] << _0_1_0_1[0],f8_f9_g8_g9[1] << _0_1_0_1[1],f8_2f9_f9_f6[2,3]
  6753. # asm 1: vshl.u32 <f8_2f9_f9_f6=reg128#15%bot,<f8_f9_g8_g9=reg128#5%bot,<_0_1_0_1=reg128#2%bot
  6754. # asm 2: vshl.u32 <f8_2f9_f9_f6=d28,<f8_f9_g8_g9=d8,<_0_1_0_1=d2
  6755. vshl.u32 d28,d8,d2
  6756. # qhasm: g0_19g1_g2_19g3 = g0_19g1_g2_19g3[1]g0_19g1_g2_19g3[0]g0_19g1_g2_19g3[2,3]
  6757. # asm 1: vrev64.i32 <g0_19g1_g2_19g3=reg128#12%bot,<g0_19g1_g2_19g3=reg128#12%bot
  6758. # asm 2: vrev64.i32 <g0_19g1_g2_19g3=d22,<g0_19g1_g2_19g3=d22
  6759. vrev64.i32 d22,d22
  6760. # qhasm: g8_19g9_19g8_19g9[0,1] = g8_19g9_19g8_19g9[0,1];g8_19g9_19g8_19g9[2] = f8_f9_g8_g9[2] * _19_19_19_19[2];g8_19g9_19g8_19g9[3] = f8_f9_g8_g9[3] * _19_19_19_19[3]
  6761. # asm 1: vmul.i32 >g8_19g9_19g8_19g9=reg128#1%top,<f8_f9_g8_g9=reg128#5%top,<_19_19_19_19=reg128#1%top
  6762. # asm 2: vmul.i32 >g8_19g9_19g8_19g9=d1,<f8_f9_g8_g9=d9,<_19_19_19_19=d1
  6763. vmul.i32 d1,d9,d1
  6764. # qhasm: g4_19g5_g6_19g7 = g4_19g5_g6_19g7[1]g4_19g5_g6_19g7[0]g4_19g5_g6_19g7[2,3]
  6765. # asm 1: vrev64.i32 <g4_19g5_g6_19g7=reg128#13%bot,<g4_19g5_g6_19g7=reg128#13%bot
  6766. # asm 2: vrev64.i32 <g4_19g5_g6_19g7=d24,<g4_19g5_g6_19g7=d24
  6767. vrev64.i32 d24,d24
  6768. # qhasm: f8_2f9_f9_f6 = f8_2f9_f9_f6[0,1]f8_f9_g8_g9[1]f4_f5_f6_f7[2]
  6769. # asm 1: vext.32 <f8_2f9_f9_f6=reg128#15%top,<f8_f9_g8_g9=reg128#5%bot,<f4_f5_f6_f7=reg128#7%top,#1
  6770. # asm 2: vext.32 <f8_2f9_f9_f6=d29,<f8_f9_g8_g9=d8,<f4_f5_f6_f7=d13,#1
  6771. vext.32 d29,d8,d13,#1
  6772. # qhasm: g8_19g9_19g8_19g9 = g8_19g9_19g8_19g9[3]f8_f9_g8_g9[2]g8_19g9_19g8_19g9[2,3]
  6773. # asm 1: vext.32 <g8_19g9_19g8_19g9=reg128#1%bot,<g8_19g9_19g8_19g9=reg128#1%top,<f8_f9_g8_g9=reg128#5%top,#1
  6774. # asm 2: vext.32 <g8_19g9_19g8_19g9=d0,<g8_19g9_19g8_19g9=d1,<f8_f9_g8_g9=d9,#1
  6775. vext.32 d0,d1,d9,#1
  6776. # qhasm: g8_19g9_19g8_19g9 = g8_19g9_19g8_19g9[1]g8_19g9_19g8_19g9[0]g8_19g9_19g8_19g9[2,3]
  6777. # asm 1: vrev64.i32 <g8_19g9_19g8_19g9=reg128#1%bot,<g8_19g9_19g8_19g9=reg128#1%bot
  6778. # asm 2: vrev64.i32 <g8_19g9_19g8_19g9=d0,<g8_19g9_19g8_19g9=d0
  6779. vrev64.i32 d0,d0
  6780. # qhasm: new 19g8_g9_19g2_g3
  6781. # qhasm: 19g8_g9_19g2_g3 = f8_f9_g8_g9[3]g8_19g9_19g8_19g9[2]19g8_g9_19g2_g3[2,3]
  6782. # asm 1: vext.32 <19g8_g9_19g2_g3=reg128#2%bot,<f8_f9_g8_g9=reg128#5%top,<g8_19g9_19g8_19g9=reg128#1%top,#1
  6783. # asm 2: vext.32 <19g8_g9_19g2_g3=d2,<f8_f9_g8_g9=d9,<g8_19g9_19g8_19g9=d1,#1
  6784. vext.32 d2,d9,d1,#1
  6785. # qhasm: g0_19g1_g2_19g3 = g0_19g1_g2_19g3[0,1]19g0_19g1_19g2_19g3[3]g0_g1_g2_g3[2]
  6786. # asm 1: vext.32 <g0_19g1_g2_19g3=reg128#12%top,<19g0_19g1_19g2_19g3=reg128#8%top,<g0_g1_g2_g3=reg128#3%top,#1
  6787. # asm 2: vext.32 <g0_19g1_g2_19g3=d23,<19g0_19g1_19g2_19g3=d15,<g0_g1_g2_g3=d5,#1
  6788. vext.32 d23,d15,d5,#1
  6789. # qhasm: h02[0,1] = f0_2f1_f2_2f3[0] signed* g0_g1_g2_g3[0];h02[2,3] = f0_2f1_f2_2f3[1] signed* g0_g1_g2_g3[1]
  6790. # asm 1: vmull.s32 >h02=reg128#5,<f0_2f1_f2_2f3=reg128#11%bot,<g0_g1_g2_g3=reg128#3%bot
  6791. # asm 2: vmull.s32 >h02=q4,<f0_2f1_f2_2f3=d20,<g0_g1_g2_g3=d4
  6792. vmull.s32 q4,d20,d4
  6793. # qhasm: g0_19g1_g2_19g3 = g0_19g1_g2_19g3[0,1]g0_19g1_g2_19g3[3]g0_19g1_g2_19g3[2]
  6794. # asm 1: vrev64.i32 <g0_19g1_g2_19g3=reg128#12%top,<g0_19g1_g2_19g3=reg128#12%top
  6795. # asm 2: vrev64.i32 <g0_19g1_g2_19g3=d23,<g0_19g1_g2_19g3=d23
  6796. vrev64.i32 d23,d23
  6797. # qhasm: h02[0,1] += f0_2f1_f2_2f3[2] signed* g8_19g9_19g8_19g9[2];h02[2,3] += f0_2f1_f2_2f3[3] signed* g8_19g9_19g8_19g9[3]
  6798. # asm 1: vmlal.s32 <h02=reg128#5,<f0_2f1_f2_2f3=reg128#11%top,<g8_19g9_19g8_19g9=reg128#1%top
  6799. # asm 2: vmlal.s32 <h02=q4,<f0_2f1_f2_2f3=d21,<g8_19g9_19g8_19g9=d1
  6800. vmlal.s32 q4,d21,d1
  6801. # qhasm: 19g8_g9_19g2_g3 = 19g8_g9_19g2_g3[1]19g8_g9_19g2_g3[0]19g8_g9_19g2_g3[2,3]
  6802. # asm 1: vrev64.i32 <19g8_g9_19g2_g3=reg128#2%bot,<19g8_g9_19g2_g3=reg128#2%bot
  6803. # asm 2: vrev64.i32 <19g8_g9_19g2_g3=d2,<19g8_g9_19g2_g3=d2
  6804. vrev64.i32 d2,d2
  6805. # qhasm: h02[0,1] += f4_2f5_f6_2f7[0] signed* 19g4_19g5_19g6_19g7[2];h02[2,3] += f4_2f5_f6_2f7[1] signed* 19g4_19g5_19g6_19g7[3]
  6806. # asm 1: vmlal.s32 <h02=reg128#5,<f4_2f5_f6_2f7=reg128#14%bot,<19g4_19g5_19g6_19g7=reg128#10%top
  6807. # asm 2: vmlal.s32 <h02=q4,<f4_2f5_f6_2f7=d26,<19g4_19g5_19g6_19g7=d19
  6808. vmlal.s32 q4,d26,d19
  6809. # qhasm: 19g8_g9_19g2_g3 = 19g8_g9_19g2_g3[0,1]g0_g1_g2_g3[3]19g0_19g1_19g2_19g3[2]
  6810. # asm 1: vext.32 <19g8_g9_19g2_g3=reg128#2%top,<g0_g1_g2_g3=reg128#3%top,<19g0_19g1_19g2_19g3=reg128#8%top,#1
  6811. # asm 2: vext.32 <19g8_g9_19g2_g3=d3,<g0_g1_g2_g3=d5,<19g0_19g1_19g2_19g3=d15,#1
  6812. vext.32 d3,d5,d15,#1
  6813. # qhasm: h02[0,1] += f4_2f5_f6_2f7[2] signed* 19g4_19g5_19g6_19g7[0];h02[2,3] += f4_2f5_f6_2f7[3] signed* 19g4_19g5_19g6_19g7[1]
  6814. # asm 1: vmlal.s32 <h02=reg128#5,<f4_2f5_f6_2f7=reg128#14%top,<19g4_19g5_19g6_19g7=reg128#10%bot
  6815. # asm 2: vmlal.s32 <h02=q4,<f4_2f5_f6_2f7=d27,<19g4_19g5_19g6_19g7=d18
  6816. vmlal.s32 q4,d27,d18
  6817. # qhasm: 19g8_g9_19g2_g3 = 19g8_g9_19g2_g3[0,1]19g8_g9_19g2_g3[3]19g8_g9_19g2_g3[2]
  6818. # asm 1: vrev64.i32 <19g8_g9_19g2_g3=reg128#2%top,<19g8_g9_19g2_g3=reg128#2%top
  6819. # asm 2: vrev64.i32 <19g8_g9_19g2_g3=d3,<19g8_g9_19g2_g3=d3
  6820. vrev64.i32 d3,d3
  6821. # qhasm: h02[0,1] += f8_2f9_f9_f6[0] signed* 19g0_19g1_19g2_19g3[2];h02[2,3] += f8_2f9_f9_f6[1] signed* 19g0_19g1_19g2_19g3[3]
  6822. # asm 1: vmlal.s32 <h02=reg128#5,<f8_2f9_f9_f6=reg128#15%bot,<19g0_19g1_19g2_19g3=reg128#8%top
  6823. # asm 2: vmlal.s32 <h02=q4,<f8_2f9_f9_f6=d28,<19g0_19g1_19g2_19g3=d15
  6824. vmlal.s32 q4,d28,d15
  6825. # qhasm: new f5_f2_f7_f4
  6826. # qhasm: f5_f2_f7_f4 = f4_f5_f6_f7[1]f0_f1_f2_f3[2]f5_f2_f7_f4[2,3]
  6827. # asm 1: vext.32 <f5_f2_f7_f4=reg128#8%bot,<f4_f5_f6_f7=reg128#7%bot,<f0_f1_f2_f3=reg128#6%top,#1
  6828. # asm 2: vext.32 <f5_f2_f7_f4=d14,<f4_f5_f6_f7=d12,<f0_f1_f2_f3=d11,#1
  6829. vext.32 d14,d12,d11,#1
  6830. # qhasm: h31[0,1] = f1_f8_f3_f0[0] signed* g0_19g1_g2_19g3[2];h31[2,3] = f1_f8_f3_f0[1] signed* g0_19g1_g2_19g3[3]
  6831. # asm 1: vmull.s32 >h31=reg128#6,<f1_f8_f3_f0=reg128#9%bot,<g0_19g1_g2_19g3=reg128#12%top
  6832. # asm 2: vmull.s32 >h31=q5,<f1_f8_f3_f0=d16,<g0_19g1_g2_19g3=d23
  6833. vmull.s32 q5,d16,d23
  6834. # qhasm: f5_f2_f7_f4 = f5_f2_f7_f4[0,1]f4_f5_f6_f7[3]f4_f5_f6_f7[0]
  6835. # asm 1: vext.32 <f5_f2_f7_f4=reg128#8%top,<f4_f5_f6_f7=reg128#7%top,<f4_f5_f6_f7=reg128#7%bot,#1
  6836. # asm 2: vext.32 <f5_f2_f7_f4=d15,<f4_f5_f6_f7=d13,<f4_f5_f6_f7=d12,#1
  6837. vext.32 d15,d13,d12,#1
  6838. # qhasm: h31[0,1] += f1_f8_f3_f0[2] signed* g0_g1_g2_g3[0];h31[2,3] += f1_f8_f3_f0[3] signed* g0_g1_g2_g3[1]
  6839. # asm 1: vmlal.s32 <h31=reg128#6,<f1_f8_f3_f0=reg128#9%top,<g0_g1_g2_g3=reg128#3%bot
  6840. # asm 2: vmlal.s32 <h31=q5,<f1_f8_f3_f0=d17,<g0_g1_g2_g3=d4
  6841. vmlal.s32 q5,d17,d4
  6842. # qhasm: mem64[playp] aligned= h02[0];playp+=8
  6843. # asm 1: vst1.8 <h02=reg128#5%bot,[<playp=int32#8,: 64]!
  6844. # asm 2: vst1.8 <h02=d8,[<playp=r7,: 64]!
  6845. vst1.8 d8,[r7,: 64]!
  6846. # qhasm: h31[0,1] += f5_f2_f7_f4[0] signed* g8_19g9_19g8_19g9[2];h31[2,3] += f5_f2_f7_f4[1] signed* g8_19g9_19g8_19g9[3]
  6847. # asm 1: vmlal.s32 <h31=reg128#6,<f5_f2_f7_f4=reg128#8%bot,<g8_19g9_19g8_19g9=reg128#1%top
  6848. # asm 2: vmlal.s32 <h31=q5,<f5_f2_f7_f4=d14,<g8_19g9_19g8_19g9=d1
  6849. vmlal.s32 q5,d14,d1
  6850. # qhasm: new h24
  6851. # qhasm: h24 = h02[2,3]h24[2,3]
  6852. # asm 1: vext.32 <h24=reg128#7%bot,<h02=reg128#5%top,<h02=reg128#5%bot,#0
  6853. # asm 2: vext.32 <h24=d12,<h02=d9,<h02=d8,#0
  6854. vext.32 d12,d9,d8,#0
  6855. # qhasm: h31[0,1] += f5_f2_f7_f4[2] signed* 19g4_19g5_19g6_19g7[2];h31[2,3] += f5_f2_f7_f4[3] signed* 19g4_19g5_19g6_19g7[3]
  6856. # asm 1: vmlal.s32 <h31=reg128#6,<f5_f2_f7_f4=reg128#8%top,<19g4_19g5_19g6_19g7=reg128#10%top
  6857. # asm 2: vmlal.s32 <h31=q5,<f5_f2_f7_f4=d15,<19g4_19g5_19g6_19g7=d19
  6858. vmlal.s32 q5,d15,d19
  6859. # qhasm: h24 = h24[0],0
  6860. # asm 1: vmov.i64 <h24=reg128#7%top,#0
  6861. # asm 2: vmov.i64 <h24=d13,#0
  6862. vmov.i64 d13,#0
  6863. # qhasm: h31[0,1] += f8_2f9_f9_f6[2] signed* 19g4_19g5_19g6_19g7[0];h31[2,3] += f8_2f9_f9_f6[3] signed* 19g4_19g5_19g6_19g7[1]
  6864. # asm 1: vmlal.s32 <h31=reg128#6,<f8_2f9_f9_f6=reg128#15%top,<19g4_19g5_19g6_19g7=reg128#10%bot
  6865. # asm 2: vmlal.s32 <h31=q5,<f8_2f9_f9_f6=d29,<19g4_19g5_19g6_19g7=d18
  6866. vmlal.s32 q5,d29,d18
  6867. # qhasm: g4_19g5_g6_19g7 = g4_19g5_g6_19g7[0,1]19g4_19g5_19g6_19g7[3]g4_g5_g6_g7[2]
  6868. # asm 1: vext.32 <g4_19g5_g6_19g7=reg128#13%top,<19g4_19g5_19g6_19g7=reg128#10%top,<g4_g5_g6_g7=reg128#4%top,#1
  6869. # asm 2: vext.32 <g4_19g5_g6_19g7=d25,<19g4_19g5_19g6_19g7=d19,<g4_g5_g6_g7=d7,#1
  6870. vext.32 d25,d19,d7,#1
  6871. # qhasm: h24[0,1] += f0_2f1_f2_2f3[0] signed* g0_g1_g2_g3[2];h24[2,3] += f0_2f1_f2_2f3[1] signed* g0_g1_g2_g3[3]
  6872. # asm 1: vmlal.s32 <h24=reg128#7,<f0_2f1_f2_2f3=reg128#11%bot,<g0_g1_g2_g3=reg128#3%top
  6873. # asm 2: vmlal.s32 <h24=q6,<f0_2f1_f2_2f3=d20,<g0_g1_g2_g3=d5
  6874. vmlal.s32 q6,d20,d5
  6875. # qhasm: g4_19g5_g6_19g7 = g4_19g5_g6_19g7[0,1]g4_19g5_g6_19g7[3]g4_19g5_g6_19g7[2]
  6876. # asm 1: vrev64.i32 <g4_19g5_g6_19g7=reg128#13%top,<g4_19g5_g6_19g7=reg128#13%top
  6877. # asm 2: vrev64.i32 <g4_19g5_g6_19g7=d25,<g4_19g5_g6_19g7=d25
  6878. vrev64.i32 d25,d25
  6879. # qhasm: h24[0,1] += f0_2f1_f2_2f3[2] signed* g0_g1_g2_g3[0];h24[2,3] += f0_2f1_f2_2f3[3] signed* g0_g1_g2_g3[1]
  6880. # asm 1: vmlal.s32 <h24=reg128#7,<f0_2f1_f2_2f3=reg128#11%top,<g0_g1_g2_g3=reg128#3%bot
  6881. # asm 2: vmlal.s32 <h24=q6,<f0_2f1_f2_2f3=d21,<g0_g1_g2_g3=d4
  6882. vmlal.s32 q6,d21,d4
  6883. # qhasm: mem64[playp] aligned= h31[1];playp+=8
  6884. # asm 1: vst1.8 <h31=reg128#6%top,[<playp=int32#8,: 64]!
  6885. # asm 2: vst1.8 <h31=d11,[<playp=r7,: 64]!
  6886. vst1.8 d11,[r7,: 64]!
  6887. # qhasm: h24[0,1] += f4_2f5_f6_2f7[0] signed* g8_19g9_19g8_19g9[2];h24[2,3] += f4_2f5_f6_2f7[1] signed* g8_19g9_19g8_19g9[3]
  6888. # asm 1: vmlal.s32 <h24=reg128#7,<f4_2f5_f6_2f7=reg128#14%bot,<g8_19g9_19g8_19g9=reg128#1%top
  6889. # asm 2: vmlal.s32 <h24=q6,<f4_2f5_f6_2f7=d26,<g8_19g9_19g8_19g9=d1
  6890. vmlal.s32 q6,d26,d1
  6891. # qhasm: new h53
  6892. # qhasm: h53 = h53[0,1]h31[0,1]
  6893. # asm 1: vext.32 <h53=reg128#5%top,<h31=reg128#6%bot,<h31=reg128#6%bot,#0
  6894. # asm 2: vext.32 <h53=d9,<h31=d10,<h31=d10,#0
  6895. vext.32 d9,d10,d10,#0
  6896. # qhasm: h24[0,1] += f4_2f5_f6_2f7[2] signed* 19g4_19g5_19g6_19g7[2];h24[2,3] += f4_2f5_f6_2f7[3] signed* 19g4_19g5_19g6_19g7[3]
  6897. # asm 1: vmlal.s32 <h24=reg128#7,<f4_2f5_f6_2f7=reg128#14%top,<19g4_19g5_19g6_19g7=reg128#10%top
  6898. # asm 2: vmlal.s32 <h24=q6,<f4_2f5_f6_2f7=d27,<19g4_19g5_19g6_19g7=d19
  6899. vmlal.s32 q6,d27,d19
  6900. # qhasm: h53 = 0,h53[1]
  6901. # asm 1: vmov.i64 <h53=reg128#5%bot,#0
  6902. # asm 2: vmov.i64 <h53=d8,#0
  6903. vmov.i64 d8,#0
  6904. # qhasm: h24[0,1] += f8_2f9_f9_f6[0] signed* 19g4_19g5_19g6_19g7[0];h24[2,3] += f8_2f9_f9_f6[1] signed* 19g4_19g5_19g6_19g7[1]
  6905. # asm 1: vmlal.s32 <h24=reg128#7,<f8_2f9_f9_f6=reg128#15%bot,<19g4_19g5_19g6_19g7=reg128#10%bot
  6906. # asm 2: vmlal.s32 <h24=q6,<f8_2f9_f9_f6=d28,<19g4_19g5_19g6_19g7=d18
  6907. vmlal.s32 q6,d28,d18
  6908. # qhasm: h53[0,1] += f1_f8_f3_f0[0] signed* g4_19g5_g6_19g7[0];h53[2,3] += f1_f8_f3_f0[1] signed* g4_19g5_g6_19g7[1]
  6909. # asm 1: vmlal.s32 <h53=reg128#5,<f1_f8_f3_f0=reg128#9%bot,<g4_19g5_g6_19g7=reg128#13%bot
  6910. # asm 2: vmlal.s32 <h53=q4,<f1_f8_f3_f0=d16,<g4_19g5_g6_19g7=d24
  6911. vmlal.s32 q4,d16,d24
  6912. # qhasm: h53[0,1] += f1_f8_f3_f0[2] signed* g0_g1_g2_g3[2];h53[2,3] += f1_f8_f3_f0[3] signed* g0_g1_g2_g3[3]
  6913. # asm 1: vmlal.s32 <h53=reg128#5,<f1_f8_f3_f0=reg128#9%top,<g0_g1_g2_g3=reg128#3%top
  6914. # asm 2: vmlal.s32 <h53=q4,<f1_f8_f3_f0=d17,<g0_g1_g2_g3=d5
  6915. vmlal.s32 q4,d17,d5
  6916. # qhasm: h53[0,1] += f5_f2_f7_f4[0] signed* g0_g1_g2_g3[0];h53[2,3] += f5_f2_f7_f4[1] signed* g0_g1_g2_g3[1]
  6917. # asm 1: vmlal.s32 <h53=reg128#5,<f5_f2_f7_f4=reg128#8%bot,<g0_g1_g2_g3=reg128#3%bot
  6918. # asm 2: vmlal.s32 <h53=q4,<f5_f2_f7_f4=d14,<g0_g1_g2_g3=d4
  6919. vmlal.s32 q4,d14,d4
  6920. # qhasm: mem64[playp] aligned= h24[0];playp+=8
  6921. # asm 1: vst1.8 <h24=reg128#7%bot,[<playp=int32#8,: 64]!
  6922. # asm 2: vst1.8 <h24=d12,[<playp=r7,: 64]!
  6923. vst1.8 d12,[r7,: 64]!
  6924. # qhasm: h53[0,1] += f5_f2_f7_f4[2] signed* g8_19g9_19g8_19g9[2];h53[2,3] += f5_f2_f7_f4[3] signed* g8_19g9_19g8_19g9[3]
  6925. # asm 1: vmlal.s32 <h53=reg128#5,<f5_f2_f7_f4=reg128#8%top,<g8_19g9_19g8_19g9=reg128#1%top
  6926. # asm 2: vmlal.s32 <h53=q4,<f5_f2_f7_f4=d15,<g8_19g9_19g8_19g9=d1
  6927. vmlal.s32 q4,d15,d1
  6928. # qhasm: new h46
  6929. # qhasm: h46 = h24[2,3]h46[2,3]
  6930. # asm 1: vext.32 <h46=reg128#6%bot,<h24=reg128#7%top,<h24=reg128#7%bot,#0
  6931. # asm 2: vext.32 <h46=d10,<h24=d13,<h24=d12,#0
  6932. vext.32 d10,d13,d12,#0
  6933. # qhasm: h53[0,1] += f8_2f9_f9_f6[2] signed* 19g4_19g5_19g6_19g7[2];h53[2,3] += f8_2f9_f9_f6[3] signed* 19g4_19g5_19g6_19g7[3]
  6934. # asm 1: vmlal.s32 <h53=reg128#5,<f8_2f9_f9_f6=reg128#15%top,<19g4_19g5_19g6_19g7=reg128#10%top
  6935. # asm 2: vmlal.s32 <h53=q4,<f8_2f9_f9_f6=d29,<19g4_19g5_19g6_19g7=d19
  6936. vmlal.s32 q4,d29,d19
  6937. # qhasm: h46 = h46[0],0
  6938. # asm 1: vmov.i64 <h46=reg128#6%top,#0
  6939. # asm 2: vmov.i64 <h46=d11,#0
  6940. vmov.i64 d11,#0
  6941. # qhasm: h46[0,1] += f0_2f1_f2_2f3[0] signed* g4_g5_g6_g7[0];h46[2,3] += f0_2f1_f2_2f3[1] signed* g4_g5_g6_g7[1]
  6942. # asm 1: vmlal.s32 <h46=reg128#6,<f0_2f1_f2_2f3=reg128#11%bot,<g4_g5_g6_g7=reg128#4%bot
  6943. # asm 2: vmlal.s32 <h46=q5,<f0_2f1_f2_2f3=d20,<g4_g5_g6_g7=d6
  6944. vmlal.s32 q5,d20,d6
  6945. # qhasm: h46[0,1] += f0_2f1_f2_2f3[2] signed* g0_g1_g2_g3[2];h46[2,3] += f0_2f1_f2_2f3[3] signed* g0_g1_g2_g3[3]
  6946. # asm 1: vmlal.s32 <h46=reg128#6,<f0_2f1_f2_2f3=reg128#11%top,<g0_g1_g2_g3=reg128#3%top
  6947. # asm 2: vmlal.s32 <h46=q5,<f0_2f1_f2_2f3=d21,<g0_g1_g2_g3=d5
  6948. vmlal.s32 q5,d21,d5
  6949. # qhasm: h46[0,1] += f4_2f5_f6_2f7[0] signed* g0_g1_g2_g3[0];h46[2,3] += f4_2f5_f6_2f7[1] signed* g0_g1_g2_g3[1]
  6950. # asm 1: vmlal.s32 <h46=reg128#6,<f4_2f5_f6_2f7=reg128#14%bot,<g0_g1_g2_g3=reg128#3%bot
  6951. # asm 2: vmlal.s32 <h46=q5,<f4_2f5_f6_2f7=d26,<g0_g1_g2_g3=d4
  6952. vmlal.s32 q5,d26,d4
  6953. # qhasm: new h75
  6954. # qhasm: h75 = h75[0,1]h53[0,1]
  6955. # asm 1: vext.32 <h75=reg128#7%top,<h53=reg128#5%bot,<h53=reg128#5%bot,#0
  6956. # asm 2: vext.32 <h75=d13,<h53=d8,<h53=d8,#0
  6957. vext.32 d13,d8,d8,#0
  6958. # qhasm: h46[0,1] += f4_2f5_f6_2f7[2] signed* g8_19g9_19g8_19g9[2];h46[2,3] += f4_2f5_f6_2f7[3] signed* g8_19g9_19g8_19g9[3]
  6959. # asm 1: vmlal.s32 <h46=reg128#6,<f4_2f5_f6_2f7=reg128#14%top,<g8_19g9_19g8_19g9=reg128#1%top
  6960. # asm 2: vmlal.s32 <h46=q5,<f4_2f5_f6_2f7=d27,<g8_19g9_19g8_19g9=d1
  6961. vmlal.s32 q5,d27,d1
  6962. # qhasm: h75 = 0,h75[1]
  6963. # asm 1: vmov.i64 <h75=reg128#7%bot,#0
  6964. # asm 2: vmov.i64 <h75=d12,#0
  6965. vmov.i64 d12,#0
  6966. # qhasm: h46[0,1] += f8_2f9_f9_f6[0] signed* 19g4_19g5_19g6_19g7[2];h46[2,3] += f8_2f9_f9_f6[1] signed* 19g4_19g5_19g6_19g7[3]
  6967. # asm 1: vmlal.s32 <h46=reg128#6,<f8_2f9_f9_f6=reg128#15%bot,<19g4_19g5_19g6_19g7=reg128#10%top
  6968. # asm 2: vmlal.s32 <h46=q5,<f8_2f9_f9_f6=d28,<19g4_19g5_19g6_19g7=d19
  6969. vmlal.s32 q5,d28,d19
  6970. # qhasm: mem64[playp] aligned= h53[1];playp+=8
  6971. # asm 1: vst1.8 <h53=reg128#5%top,[<playp=int32#8,: 64]!
  6972. # asm 2: vst1.8 <h53=d9,[<playp=r7,: 64]!
  6973. vst1.8 d9,[r7,: 64]!
  6974. # qhasm: h75[0,1] += f1_f8_f3_f0[0] signed* g4_19g5_g6_19g7[2];h75[2,3] += f1_f8_f3_f0[1] signed* g4_19g5_g6_19g7[3]
  6975. # asm 1: vmlal.s32 <h75=reg128#7,<f1_f8_f3_f0=reg128#9%bot,<g4_19g5_g6_19g7=reg128#13%top
  6976. # asm 2: vmlal.s32 <h75=q6,<f1_f8_f3_f0=d16,<g4_19g5_g6_19g7=d25
  6977. vmlal.s32 q6,d16,d25
  6978. # qhasm: h75[0,1] += f1_f8_f3_f0[2] signed* g4_g5_g6_g7[0];h75[2,3] += f1_f8_f3_f0[3] signed* g4_g5_g6_g7[1]
  6979. # asm 1: vmlal.s32 <h75=reg128#7,<f1_f8_f3_f0=reg128#9%top,<g4_g5_g6_g7=reg128#4%bot
  6980. # asm 2: vmlal.s32 <h75=q6,<f1_f8_f3_f0=d17,<g4_g5_g6_g7=d6
  6981. vmlal.s32 q6,d17,d6
  6982. # qhasm: mem64[playp] aligned= h46[0]
  6983. # asm 1: vst1.8 <h46=reg128#6%bot,[<playp=int32#8,: 64]
  6984. # asm 2: vst1.8 <h46=d10,[<playp=r7,: 64]
  6985. vst1.8 d10,[r7,: 64]
  6986. # qhasm: h75[0,1] += f5_f2_f7_f4[0] signed* g0_g1_g2_g3[2];h75[2,3] += f5_f2_f7_f4[1] signed* g0_g1_g2_g3[3]
  6987. # asm 1: vmlal.s32 <h75=reg128#7,<f5_f2_f7_f4=reg128#8%bot,<g0_g1_g2_g3=reg128#3%top
  6988. # asm 2: vmlal.s32 <h75=q6,<f5_f2_f7_f4=d14,<g0_g1_g2_g3=d5
  6989. vmlal.s32 q6,d14,d5
  6990. # qhasm: new h68
  6991. # qhasm: h68 = h46[2,3]h68[2,3]
  6992. # asm 1: vext.32 <h68=reg128#5%bot,<h46=reg128#6%top,<h46=reg128#6%bot,#0
  6993. # asm 2: vext.32 <h68=d8,<h46=d11,<h46=d10,#0
  6994. vext.32 d8,d11,d10,#0
  6995. # qhasm: h75[0,1] += f5_f2_f7_f4[2] signed* g0_g1_g2_g3[0];h75[2,3] += f5_f2_f7_f4[3] signed* g0_g1_g2_g3[1]
  6996. # asm 1: vmlal.s32 <h75=reg128#7,<f5_f2_f7_f4=reg128#8%top,<g0_g1_g2_g3=reg128#3%bot
  6997. # asm 2: vmlal.s32 <h75=q6,<f5_f2_f7_f4=d15,<g0_g1_g2_g3=d4
  6998. vmlal.s32 q6,d15,d4
  6999. # qhasm: h68 = h68[0],0
  7000. # asm 1: vmov.i64 <h68=reg128#5%top,#0
  7001. # asm 2: vmov.i64 <h68=d9,#0
  7002. vmov.i64 d9,#0
  7003. # qhasm: h75[0,1] += f8_2f9_f9_f6[2] signed* g8_19g9_19g8_19g9[2];h75[2,3] += f8_2f9_f9_f6[3] signed* g8_19g9_19g8_19g9[3]
  7004. # asm 1: vmlal.s32 <h75=reg128#7,<f8_2f9_f9_f6=reg128#15%top,<g8_19g9_19g8_19g9=reg128#1%top
  7005. # asm 2: vmlal.s32 <h75=q6,<f8_2f9_f9_f6=d29,<g8_19g9_19g8_19g9=d1
  7006. vmlal.s32 q6,d29,d1
  7007. # qhasm: h68[0,1] += f0_2f1_f2_2f3[0] signed* g4_g5_g6_g7[2];h68[2,3] += f0_2f1_f2_2f3[1] signed* g4_g5_g6_g7[3]
  7008. # asm 1: vmlal.s32 <h68=reg128#5,<f0_2f1_f2_2f3=reg128#11%bot,<g4_g5_g6_g7=reg128#4%top
  7009. # asm 2: vmlal.s32 <h68=q4,<f0_2f1_f2_2f3=d20,<g4_g5_g6_g7=d7
  7010. vmlal.s32 q4,d20,d7
  7011. # qhasm: h68[0,1] += f0_2f1_f2_2f3[2] signed* g4_g5_g6_g7[0];h68[2,3] += f0_2f1_f2_2f3[3] signed* g4_g5_g6_g7[1]
  7012. # asm 1: vmlal.s32 <h68=reg128#5,<f0_2f1_f2_2f3=reg128#11%top,<g4_g5_g6_g7=reg128#4%bot
  7013. # asm 2: vmlal.s32 <h68=q4,<f0_2f1_f2_2f3=d21,<g4_g5_g6_g7=d6
  7014. vmlal.s32 q4,d21,d6
  7015. # qhasm: h68[0,1] += f4_2f5_f6_2f7[0] signed* g0_g1_g2_g3[2];h68[2,3] += f4_2f5_f6_2f7[1] signed* g0_g1_g2_g3[3]
  7016. # asm 1: vmlal.s32 <h68=reg128#5,<f4_2f5_f6_2f7=reg128#14%bot,<g0_g1_g2_g3=reg128#3%top
  7017. # asm 2: vmlal.s32 <h68=q4,<f4_2f5_f6_2f7=d26,<g0_g1_g2_g3=d5
  7018. vmlal.s32 q4,d26,d5
  7019. # qhasm: new h97
  7020. # qhasm: h97 = h97[0,1]h75[0,1]
  7021. # asm 1: vext.32 <h97=reg128#6%top,<h75=reg128#7%bot,<h75=reg128#7%bot,#0
  7022. # asm 2: vext.32 <h97=d11,<h75=d12,<h75=d12,#0
  7023. vext.32 d11,d12,d12,#0
  7024. # qhasm: h68[0,1] += f4_2f5_f6_2f7[2] signed* g0_g1_g2_g3[0];h68[2,3] += f4_2f5_f6_2f7[3] signed* g0_g1_g2_g3[1]
  7025. # asm 1: vmlal.s32 <h68=reg128#5,<f4_2f5_f6_2f7=reg128#14%top,<g0_g1_g2_g3=reg128#3%bot
  7026. # asm 2: vmlal.s32 <h68=q4,<f4_2f5_f6_2f7=d27,<g0_g1_g2_g3=d4
  7027. vmlal.s32 q4,d27,d4
  7028. # qhasm: h97 = 0,h97[1]
  7029. # asm 1: vmov.i64 <h97=reg128#6%bot,#0
  7030. # asm 2: vmov.i64 <h97=d10,#0
  7031. vmov.i64 d10,#0
  7032. # qhasm: h68[0,1] += f8_2f9_f9_f6[0] signed* g8_19g9_19g8_19g9[2];h68[2,3] += f8_2f9_f9_f6[1] signed* g8_19g9_19g8_19g9[3]
  7033. # asm 1: vmlal.s32 <h68=reg128#5,<f8_2f9_f9_f6=reg128#15%bot,<g8_19g9_19g8_19g9=reg128#1%top
  7034. # asm 2: vmlal.s32 <h68=q4,<f8_2f9_f9_f6=d28,<g8_19g9_19g8_19g9=d1
  7035. vmlal.s32 q4,d28,d1
  7036. # qhasm: h97[0,1] += f1_f8_f3_f0[0] signed* g8_19g9_19g8_19g9[0];h97[2,3] += f1_f8_f3_f0[1] signed* g8_19g9_19g8_19g9[1]
  7037. # asm 1: vmlal.s32 <h97=reg128#6,<f1_f8_f3_f0=reg128#9%bot,<g8_19g9_19g8_19g9=reg128#1%bot
  7038. # asm 2: vmlal.s32 <h97=q5,<f1_f8_f3_f0=d16,<g8_19g9_19g8_19g9=d0
  7039. vmlal.s32 q5,d16,d0
  7040. # qhasm: playp -= 32
  7041. # asm 1: sub >playp=int32#7,<playp=int32#8,#32
  7042. # asm 2: sub >playp=r6,<playp=r7,#32
  7043. sub r6,r7,#32
  7044. # qhasm: h97[0,1] += f1_f8_f3_f0[2] signed* g4_g5_g6_g7[2];h97[2,3] += f1_f8_f3_f0[3] signed* g4_g5_g6_g7[3]
  7045. # asm 1: vmlal.s32 <h97=reg128#6,<f1_f8_f3_f0=reg128#9%top,<g4_g5_g6_g7=reg128#4%top
  7046. # asm 2: vmlal.s32 <h97=q5,<f1_f8_f3_f0=d17,<g4_g5_g6_g7=d7
  7047. vmlal.s32 q5,d17,d7
  7048. # qhasm: h97[0,1] += f5_f2_f7_f4[0] signed* g4_g5_g6_g7[0];h97[2,3] += f5_f2_f7_f4[1] signed* g4_g5_g6_g7[1]
  7049. # asm 1: vmlal.s32 <h97=reg128#6,<f5_f2_f7_f4=reg128#8%bot,<g4_g5_g6_g7=reg128#4%bot
  7050. # asm 2: vmlal.s32 <h97=q5,<f5_f2_f7_f4=d14,<g4_g5_g6_g7=d6
  7051. vmlal.s32 q5,d14,d6
  7052. # qhasm: new h80
  7053. # qhasm: h80 = h68[2,3]h80[2,3]
  7054. # asm 1: vext.32 <h80=reg128#16%bot,<h68=reg128#5%top,<h68=reg128#5%bot,#0
  7055. # asm 2: vext.32 <h80=d30,<h68=d9,<h68=d8,#0
  7056. vext.32 d30,d9,d8,#0
  7057. # qhasm: h97[0,1] += f5_f2_f7_f4[2] signed* g0_g1_g2_g3[2];h97[2,3] += f5_f2_f7_f4[3] signed* g0_g1_g2_g3[3]
  7058. # asm 1: vmlal.s32 <h97=reg128#6,<f5_f2_f7_f4=reg128#8%top,<g0_g1_g2_g3=reg128#3%top
  7059. # asm 2: vmlal.s32 <h97=q5,<f5_f2_f7_f4=d15,<g0_g1_g2_g3=d5
  7060. vmlal.s32 q5,d15,d5
  7061. # qhasm: h80 aligned= h80[0]mem64[playp];playp+=8
  7062. # asm 1: vld1.8 {<h80=reg128#16%top},[<playp=int32#7,: 64]!
  7063. # asm 2: vld1.8 {<h80=d31},[<playp=r6,: 64]!
  7064. vld1.8 {d31},[r6,: 64]!
  7065. # qhasm: h97[0,1] += f8_2f9_f9_f6[2] signed* g0_g1_g2_g3[0];h97[2,3] += f8_2f9_f9_f6[3] signed* g0_g1_g2_g3[1]
  7066. # asm 1: vmlal.s32 <h97=reg128#6,<f8_2f9_f9_f6=reg128#15%top,<g0_g1_g2_g3=reg128#3%bot
  7067. # asm 2: vmlal.s32 <h97=q5,<f8_2f9_f9_f6=d29,<g0_g1_g2_g3=d4
  7068. vmlal.s32 q5,d29,d4
  7069. # qhasm: h80[0,1] += f0_2f1_f2_2f3[0] signed* g8_19g9_19g8_19g9[0];h80[2,3] += f0_2f1_f2_2f3[1] signed* g8_19g9_19g8_19g9[1]
  7070. # asm 1: vmlal.s32 <h80=reg128#16,<f0_2f1_f2_2f3=reg128#11%bot,<g8_19g9_19g8_19g9=reg128#1%bot
  7071. # asm 2: vmlal.s32 <h80=q15,<f0_2f1_f2_2f3=d20,<g8_19g9_19g8_19g9=d0
  7072. vmlal.s32 q15,d20,d0
  7073. # qhasm: new 19g4_g5_19g6_g7
  7074. # qhasm: 19g4_g5_19g6_g7 = g4_g5_g6_g7[1]19g4_19g5_19g6_19g7[0]19g4_g5_19g6_g7[2,3]
  7075. # asm 1: vext.32 <19g4_g5_19g6_g7=reg128#1%bot,<g4_g5_g6_g7=reg128#4%bot,<19g4_19g5_19g6_19g7=reg128#10%bot,#1
  7076. # asm 2: vext.32 <19g4_g5_19g6_g7=d0,<g4_g5_g6_g7=d6,<19g4_19g5_19g6_19g7=d18,#1
  7077. vext.32 d0,d6,d18,#1
  7078. # qhasm: h80[0,1] += f0_2f1_f2_2f3[2] signed* g4_19g5_g6_19g7[2];h80[2,3] += f0_2f1_f2_2f3[3] signed* g4_19g5_g6_19g7[3]
  7079. # asm 1: vmlal.s32 <h80=reg128#16,<f0_2f1_f2_2f3=reg128#11%top,<g4_19g5_g6_19g7=reg128#13%top
  7080. # asm 2: vmlal.s32 <h80=q15,<f0_2f1_f2_2f3=d21,<g4_19g5_g6_19g7=d25
  7081. vmlal.s32 q15,d21,d25
  7082. # qhasm: 19g4_g5_19g6_g7 = 19g4_g5_19g6_g7[1]19g4_g5_19g6_g7[0]19g4_g5_19g6_g7[2,3]
  7083. # asm 1: vrev64.i32 <19g4_g5_19g6_g7=reg128#1%bot,<19g4_g5_19g6_g7=reg128#1%bot
  7084. # asm 2: vrev64.i32 <19g4_g5_19g6_g7=d0,<19g4_g5_19g6_g7=d0
  7085. vrev64.i32 d0,d0
  7086. # qhasm: h80[0,1] += f4_2f5_f6_2f7[0] signed* g4_19g5_g6_19g7[0];h80[2,3] += f4_2f5_f6_2f7[1] signed* g4_19g5_g6_19g7[1]
  7087. # asm 1: vmlal.s32 <h80=reg128#16,<f4_2f5_f6_2f7=reg128#14%bot,<g4_19g5_g6_19g7=reg128#13%bot
  7088. # asm 2: vmlal.s32 <h80=q15,<f4_2f5_f6_2f7=d26,<g4_19g5_g6_19g7=d24
  7089. vmlal.s32 q15,d26,d24
  7090. # qhasm: 19g4_g5_19g6_g7 = 19g4_g5_19g6_g7[0,1]g4_g5_g6_g7[3]19g4_19g5_19g6_19g7[2]
  7091. # asm 1: vext.32 <19g4_g5_19g6_g7=reg128#1%top,<g4_g5_g6_g7=reg128#4%top,<19g4_19g5_19g6_19g7=reg128#10%top,#1
  7092. # asm 2: vext.32 <19g4_g5_19g6_g7=d1,<g4_g5_g6_g7=d7,<19g4_19g5_19g6_19g7=d19,#1
  7093. vext.32 d1,d7,d19,#1
  7094. # qhasm: new h19
  7095. # qhasm: h19 = h19[0,1]h97[0,1]
  7096. # asm 1: vext.32 <h19=reg128#4%top,<h97=reg128#6%bot,<h97=reg128#6%bot,#0
  7097. # asm 2: vext.32 <h19=d7,<h97=d10,<h97=d10,#0
  7098. vext.32 d7,d10,d10,#0
  7099. # qhasm: h80[0,1] += f4_2f5_f6_2f7[2] signed* g0_19g1_g2_19g3[2];h80[2,3] += f4_2f5_f6_2f7[3] signed* g0_19g1_g2_19g3[3]
  7100. # asm 1: vmlal.s32 <h80=reg128#16,<f4_2f5_f6_2f7=reg128#14%top,<g0_19g1_g2_19g3=reg128#12%top
  7101. # asm 2: vmlal.s32 <h80=q15,<f4_2f5_f6_2f7=d27,<g0_19g1_g2_19g3=d23
  7102. vmlal.s32 q15,d27,d23
  7103. # qhasm: 19g4_g5_19g6_g7 = 19g4_g5_19g6_g7[0,1]19g4_g5_19g6_g7[3]19g4_g5_19g6_g7[2]
  7104. # asm 1: vrev64.i32 <19g4_g5_19g6_g7=reg128#1%top,<19g4_g5_19g6_g7=reg128#1%top
  7105. # asm 2: vrev64.i32 <19g4_g5_19g6_g7=d1,<19g4_g5_19g6_g7=d1
  7106. vrev64.i32 d1,d1
  7107. # qhasm: h19 aligned= mem64[playp]h19[1]
  7108. # asm 1: vld1.8 {<h19=reg128#4%bot},[<playp=int32#7,: 64]
  7109. # asm 2: vld1.8 {<h19=d6},[<playp=r6,: 64]
  7110. vld1.8 {d6},[r6,: 64]
  7111. # qhasm: h80[0,1] += f8_2f9_f9_f6[0] signed* g0_19g1_g2_19g3[0];h80[2,3] += f8_2f9_f9_f6[1] signed* g0_19g1_g2_19g3[1]
  7112. # asm 1: vmlal.s32 <h80=reg128#16,<f8_2f9_f9_f6=reg128#15%bot,<g0_19g1_g2_19g3=reg128#12%bot
  7113. # asm 2: vmlal.s32 <h80=q15,<f8_2f9_f9_f6=d28,<g0_19g1_g2_19g3=d22
  7114. vmlal.s32 q15,d28,d22
  7115. # qhasm: h19[0,1] += f1_f8_f3_f0[0] signed* g0_g1_g2_g3[0];h19[2,3] += f1_f8_f3_f0[1] signed* g0_g1_g2_g3[1]
  7116. # asm 1: vmlal.s32 <h19=reg128#4,<f1_f8_f3_f0=reg128#9%bot,<g0_g1_g2_g3=reg128#3%bot
  7117. # asm 2: vmlal.s32 <h19=q3,<f1_f8_f3_f0=d16,<g0_g1_g2_g3=d4
  7118. vmlal.s32 q3,d16,d4
  7119. # qhasm: playp+=24
  7120. # asm 1: add >playp=int32#7,<playp=int32#7,#24
  7121. # asm 2: add >playp=r6,<playp=r6,#24
  7122. add r6,r6,#24
  7123. # qhasm: h19[0,1] += f1_f8_f3_f0[2] signed* 19g8_g9_19g2_g3[0];h19[2,3] += f1_f8_f3_f0[3] signed* 19g8_g9_19g2_g3[1]
  7124. # asm 1: vmlal.s32 <h19=reg128#4,<f1_f8_f3_f0=reg128#9%top,<19g8_g9_19g2_g3=reg128#2%bot
  7125. # asm 2: vmlal.s32 <h19=q3,<f1_f8_f3_f0=d17,<19g8_g9_19g2_g3=d2
  7126. vmlal.s32 q3,d17,d2
  7127. # qhasm: new h04
  7128. # qhasm: h04 = h80[2,3]h04[2,3]
  7129. # asm 1: vext.32 <h04=reg128#3%bot,<h80=reg128#16%top,<h80=reg128#16%bot,#0
  7130. # asm 2: vext.32 <h04=d4,<h80=d31,<h80=d30,#0
  7131. vext.32 d4,d31,d30,#0
  7132. # qhasm: new h37
  7133. # qhasm: h37 = h37[0]h97[1]
  7134. # asm 1: vmov <h37=reg128#9%top,<h97=reg128#6%top
  7135. # asm 2: vmov <h37=d17,<h97=d11
  7136. vmov d17,d11
  7137. # qhasm: h19[0,1] += f5_f2_f7_f4[0] signed* 19g4_g5_19g6_g7[2];h19[2,3] += f5_f2_f7_f4[1] signed* 19g4_g5_19g6_g7[3]
  7138. # asm 1: vmlal.s32 <h19=reg128#4,<f5_f2_f7_f4=reg128#8%bot,<19g4_g5_19g6_g7=reg128#1%top
  7139. # asm 2: vmlal.s32 <h19=q3,<f5_f2_f7_f4=d14,<19g4_g5_19g6_g7=d1
  7140. vmlal.s32 q3,d14,d1
  7141. # qhasm: new h15
  7142. # qhasm: h15 = h15[0,1]h75[2,3]
  7143. # asm 1: vext.32 <h15=reg128#6%top,<h75=reg128#7%top,<h75=reg128#7%top,#0
  7144. # asm 2: vext.32 <h15=d11,<h75=d13,<h75=d13,#0
  7145. vext.32 d11,d13,d13,#0
  7146. # qhasm: new h48
  7147. # qhasm: h48 = h48[0,1]h80[0,1]
  7148. # asm 1: vext.32 <h48=reg128#7%top,<h80=reg128#16%bot,<h80=reg128#16%bot,#0
  7149. # asm 2: vext.32 <h48=d13,<h80=d30,<h80=d30,#0
  7150. vext.32 d13,d30,d30,#0
  7151. # qhasm: h19[0,1] += f5_f2_f7_f4[2] signed* 19g4_g5_19g6_g7[0];h19[2,3] += f5_f2_f7_f4[3] signed* 19g4_g5_19g6_g7[1]
  7152. # asm 1: vmlal.s32 <h19=reg128#4,<f5_f2_f7_f4=reg128#8%top,<19g4_g5_19g6_g7=reg128#1%bot
  7153. # asm 2: vmlal.s32 <h19=q3,<f5_f2_f7_f4=d15,<19g4_g5_19g6_g7=d0
  7154. vmlal.s32 q3,d15,d0
  7155. # qhasm: new h26
  7156. # qhasm: h26 = h26[0,1]h68[0,1]
  7157. # asm 1: vext.32 <h26=reg128#1%top,<h68=reg128#5%bot,<h68=reg128#5%bot,#0
  7158. # asm 2: vext.32 <h26=d1,<h68=d8,<h68=d8,#0
  7159. vext.32 d1,d8,d8,#0
  7160. # qhasm: h19[0,1] += f8_2f9_f9_f6[2] signed* 19g8_g9_19g2_g3[2];h19[2,3] += f8_2f9_f9_f6[3] signed* 19g8_g9_19g2_g3[3]
  7161. # asm 1: vmlal.s32 <h19=reg128#4,<f8_2f9_f9_f6=reg128#15%top,<19g8_g9_19g2_g3=reg128#2%top
  7162. # asm 2: vmlal.s32 <h19=q3,<f8_2f9_f9_f6=d29,<19g8_g9_19g2_g3=d3
  7163. vmlal.s32 q3,d29,d3
  7164. # qhasm: h04 aligned= h04[0]mem64[playp]
  7165. # asm 1: vld1.8 {<h04=reg128#3%top},[<playp=int32#7,: 64]
  7166. # asm 2: vld1.8 {<h04=d5},[<playp=r6,: 64]
  7167. vld1.8 {d5},[r6,: 64]
  7168. # qhasm: playp -= 16
  7169. # asm 1: sub >playp=int32#7,<playp=int32#7,#16
  7170. # asm 2: sub >playp=r6,<playp=r6,#16
  7171. sub r6,r6,#16
  7172. # qhasm: h15 = h19[0,1]h15[2,3]
  7173. # asm 1: vext.32 <h15=reg128#6%bot,<h19=reg128#4%bot,<h19=reg128#4%bot,#0
  7174. # asm 2: vext.32 <h15=d10,<h19=d6,<h19=d6,#0
  7175. vext.32 d10,d6,d6,#0
  7176. # qhasm: 4x mask26 = 0xffffffff
  7177. # asm 1: vmov.i32 >mask26=reg128#2,#0xffffffff
  7178. # asm 2: vmov.i32 >mask26=q1,#0xffffffff
  7179. vmov.i32 q1,#0xffffffff
  7180. # qhasm: 2x mask25 = mask26 << 25
  7181. # asm 1: vshl.i64 >mask25=reg128#5,<mask26=reg128#2,#25
  7182. # asm 2: vshl.i64 >mask25=q4,<mask26=q1,#25
  7183. vshl.i64 q4,q1,#25
  7184. # qhasm: ptr = &_0x2000000_stack
  7185. # asm 1: lea >ptr=int32#8,<_0x2000000_stack=stack128#1
  7186. # asm 2: lea >ptr=r7,<_0x2000000_stack=[sp,#512]
  7187. add r7,sp,#512
  7188. # qhasm: _0x2000000 aligned= mem128[ptr]
  7189. # asm 1: vld1.8 {>_0x2000000=reg128#8%bot->_0x2000000=reg128#8%top},[<ptr=int32#8,: 128]
  7190. # asm 2: vld1.8 {>_0x2000000=d14->_0x2000000=d15},[<ptr=r7,: 128]
  7191. vld1.8 {d14-d15},[r7,: 128]
  7192. # qhasm: 2x t0 = h04 + _0x2000000
  7193. # asm 1: vadd.i64 >t0=reg128#10,<h04=reg128#3,<_0x2000000=reg128#8
  7194. # asm 2: vadd.i64 >t0=q9,<h04=q2,<_0x2000000=q7
  7195. vadd.i64 q9,q2,q7
  7196. # qhasm: 2x mask26 <<= 26
  7197. # asm 1: vshl.i64 >mask26=reg128#2,<mask26=reg128#2,#26
  7198. # asm 2: vshl.i64 >mask26=q1,<mask26=q1,#26
  7199. vshl.i64 q1,q1,#26
  7200. # qhasm: 2x c = t0 signed>> 26
  7201. # asm 1: vshr.s64 >c=reg128#11,<t0=reg128#10,#26
  7202. # asm 2: vshr.s64 >c=q10,<t0=q9,#26
  7203. vshr.s64 q10,q9,#26
  7204. # qhasm: h26 aligned= mem64[playp]h26[1];playp += 8
  7205. # asm 1: vld1.8 {<h26=reg128#1%bot},[<playp=int32#7,: 64]!
  7206. # asm 2: vld1.8 {<h26=d0},[<playp=r6,: 64]!
  7207. vld1.8 {d0},[r6,: 64]!
  7208. # qhasm: 2x h15 += c
  7209. # asm 1: vadd.i64 >h15=reg128#6,<h15=reg128#6,<c=reg128#11
  7210. # asm 2: vadd.i64 >h15=q5,<h15=q5,<c=q10
  7211. vadd.i64 q5,q5,q10
  7212. # qhasm: t0 &= mask26
  7213. # asm 1: vand >t0=reg128#10,<t0=reg128#10,<mask26=reg128#2
  7214. # asm 2: vand >t0=q9,<t0=q9,<mask26=q1
  7215. vand q9,q9,q1
  7216. # qhasm: h37 aligned= mem64[playp]h37[1];playp += 8
  7217. # asm 1: vld1.8 {<h37=reg128#9%bot},[<playp=int32#7,: 64]!
  7218. # asm 2: vld1.8 {<h37=d16},[<playp=r6,: 64]!
  7219. vld1.8 {d16},[r6,: 64]!
  7220. # qhasm: ptr = &_0x1000000_stack
  7221. # asm 1: lea >ptr=int32#7,<_0x1000000_stack=stack128#2
  7222. # asm 2: lea >ptr=r6,<_0x1000000_stack=[sp,#528]
  7223. add r6,sp,#528
  7224. # qhasm: _0x1000000 aligned= mem128[ptr]
  7225. # asm 1: vld1.8 {>_0x1000000=reg128#11%bot->_0x1000000=reg128#11%top},[<ptr=int32#7,: 128]
  7226. # asm 2: vld1.8 {>_0x1000000=d20->_0x1000000=d21},[<ptr=r6,: 128]
  7227. vld1.8 {d20-d21},[r6,: 128]
  7228. # qhasm: 2x t1 = h15 + _0x1000000
  7229. # asm 1: vadd.i64 >t1=reg128#12,<h15=reg128#6,<_0x1000000=reg128#11
  7230. # asm 2: vadd.i64 >t1=q11,<h15=q5,<_0x1000000=q10
  7231. vadd.i64 q11,q5,q10
  7232. # qhasm: 2x h04 -= t0
  7233. # asm 1: vsub.i64 >h04=reg128#3,<h04=reg128#3,<t0=reg128#10
  7234. # asm 2: vsub.i64 >h04=q2,<h04=q2,<t0=q9
  7235. vsub.i64 q2,q2,q9
  7236. # qhasm: 2x c = t1 signed>> 25
  7237. # asm 1: vshr.s64 >c=reg128#10,<t1=reg128#12,#25
  7238. # asm 2: vshr.s64 >c=q9,<t1=q11,#25
  7239. vshr.s64 q9,q11,#25
  7240. # qhasm: h48 = h04[2,3]h48[2,3]
  7241. # asm 1: vext.32 <h48=reg128#7%bot,<h04=reg128#3%top,<h04=reg128#3%bot,#0
  7242. # asm 2: vext.32 <h48=d12,<h04=d5,<h04=d4,#0
  7243. vext.32 d12,d5,d4,#0
  7244. # qhasm: t1 &= mask25
  7245. # asm 1: vand >t1=reg128#12,<t1=reg128#12,<mask25=reg128#5
  7246. # asm 2: vand >t1=q11,<t1=q11,<mask25=q4
  7247. vand q11,q11,q4
  7248. # qhasm: 2x h26 += c
  7249. # asm 1: vadd.i64 >h26=reg128#1,<h26=reg128#1,<c=reg128#10
  7250. # asm 2: vadd.i64 >h26=q0,<h26=q0,<c=q9
  7251. vadd.i64 q0,q0,q9
  7252. # qhasm: new h59
  7253. # qhasm: h59 = h59[0]h19[1]
  7254. # asm 1: vmov <h59=reg128#10%top,<h19=reg128#4%top
  7255. # asm 2: vmov <h59=d19,<h19=d7
  7256. vmov d19,d7
  7257. # qhasm: 2x t0 = h26 + _0x2000000
  7258. # asm 1: vadd.i64 >t0=reg128#4,<h26=reg128#1,<_0x2000000=reg128#8
  7259. # asm 2: vadd.i64 >t0=q3,<h26=q0,<_0x2000000=q7
  7260. vadd.i64 q3,q0,q7
  7261. # qhasm: 2x h15 -= t1
  7262. # asm 1: vsub.i64 >h15=reg128#6,<h15=reg128#6,<t1=reg128#12
  7263. # asm 2: vsub.i64 >h15=q5,<h15=q5,<t1=q11
  7264. vsub.i64 q5,q5,q11
  7265. # qhasm: 2x c = t0 signed>> 26
  7266. # asm 1: vshr.s64 >c=reg128#12,<t0=reg128#4,#26
  7267. # asm 2: vshr.s64 >c=q11,<t0=q3,#26
  7268. vshr.s64 q11,q3,#26
  7269. # qhasm: h59 = h15[2,3]h59[2,3]
  7270. # asm 1: vext.32 <h59=reg128#10%bot,<h15=reg128#6%top,<h15=reg128#6%bot,#0
  7271. # asm 2: vext.32 <h59=d18,<h15=d11,<h15=d10,#0
  7272. vext.32 d18,d11,d10,#0
  7273. # qhasm: t0 &= mask26
  7274. # asm 1: vand >t0=reg128#4,<t0=reg128#4,<mask26=reg128#2
  7275. # asm 2: vand >t0=q3,<t0=q3,<mask26=q1
  7276. vand q3,q3,q1
  7277. # qhasm: 2x h37 += c
  7278. # asm 1: vadd.i64 >h37=reg128#9,<h37=reg128#9,<c=reg128#12
  7279. # asm 2: vadd.i64 >h37=q8,<h37=q8,<c=q11
  7280. vadd.i64 q8,q8,q11
  7281. # qhasm: 2x t1 = h37 + _0x1000000
  7282. # asm 1: vadd.i64 >t1=reg128#12,<h37=reg128#9,<_0x1000000=reg128#11
  7283. # asm 2: vadd.i64 >t1=q11,<h37=q8,<_0x1000000=q10
  7284. vadd.i64 q11,q8,q10
  7285. # qhasm: 2x h26 -= t0
  7286. # asm 1: vsub.i64 >h26=reg128#1,<h26=reg128#1,<t0=reg128#4
  7287. # asm 2: vsub.i64 >h26=q0,<h26=q0,<t0=q3
  7288. vsub.i64 q0,q0,q3
  7289. # qhasm: 2x c = t1 signed>> 25
  7290. # asm 1: vshr.s64 >c=reg128#4,<t1=reg128#12,#25
  7291. # asm 2: vshr.s64 >c=q3,<t1=q11,#25
  7292. vshr.s64 q3,q11,#25
  7293. # qhasm: t1 &= mask25
  7294. # asm 1: vand >t1=reg128#12,<t1=reg128#12,<mask25=reg128#5
  7295. # asm 2: vand >t1=q11,<t1=q11,<mask25=q4
  7296. vand q11,q11,q4
  7297. # qhasm: 2x h48 += c
  7298. # asm 1: vadd.i64 >h48=reg128#4,<h48=reg128#7,<c=reg128#4
  7299. # asm 2: vadd.i64 >h48=q3,<h48=q6,<c=q3
  7300. vadd.i64 q3,q6,q3
  7301. # qhasm: 2x t0 = h48 + _0x2000000
  7302. # asm 1: vadd.i64 >t0=reg128#7,<h48=reg128#4,<_0x2000000=reg128#8
  7303. # asm 2: vadd.i64 >t0=q6,<h48=q3,<_0x2000000=q7
  7304. vadd.i64 q6,q3,q7
  7305. # qhasm: 2x h37 -= t1
  7306. # asm 1: vsub.i64 >h37=reg128#9,<h37=reg128#9,<t1=reg128#12
  7307. # asm 2: vsub.i64 >h37=q8,<h37=q8,<t1=q11
  7308. vsub.i64 q8,q8,q11
  7309. # qhasm: 2x c = t0 signed>> 26
  7310. # asm 1: vshr.s64 >c=reg128#12,<t0=reg128#7,#26
  7311. # asm 2: vshr.s64 >c=q11,<t0=q6,#26
  7312. vshr.s64 q11,q6,#26
  7313. # qhasm: t0 &= mask26
  7314. # asm 1: vand >t0=reg128#7,<t0=reg128#7,<mask26=reg128#2
  7315. # asm 2: vand >t0=q6,<t0=q6,<mask26=q1
  7316. vand q6,q6,q1
  7317. # qhasm: 2x h59 += c
  7318. # asm 1: vadd.i64 >h59=reg128#10,<h59=reg128#10,<c=reg128#12
  7319. # asm 2: vadd.i64 >h59=q9,<h59=q9,<c=q11
  7320. vadd.i64 q9,q9,q11
  7321. # qhasm: new t
  7322. # qhasm: t = t[0], h59[1] + _0x1000000[1]
  7323. # asm 1: vadd.i64 <t=reg128#13%top,<h59=reg128#10%top,<_0x1000000=reg128#11%top
  7324. # asm 2: vadd.i64 <t=d25,<h59=d19,<_0x1000000=d21
  7325. vadd.i64 d25,d19,d21
  7326. # qhasm: 2x h48 -= t0
  7327. # asm 1: vsub.i64 >h48=reg128#4,<h48=reg128#4,<t0=reg128#7
  7328. # asm 2: vsub.i64 >h48=q3,<h48=q3,<t0=q6
  7329. vsub.i64 q3,q3,q6
  7330. # qhasm: c = c[0],t[1] signed>> 25
  7331. # asm 1: vshr.s64 <c=reg128#12%top,<t=reg128#13%top,#25
  7332. # asm 2: vshr.s64 <c=d23,<t=d25,#25
  7333. vshr.s64 d23,d25,#25
  7334. # qhasm: t &= mask25
  7335. # asm 1: vand >t=reg128#5,<t=reg128#13,<mask25=reg128#5
  7336. # asm 2: vand >t=q4,<t=q12,<mask25=q4
  7337. vand q4,q12,q4
  7338. # qhasm: new s2
  7339. # qhasm: s2 = s2[0],c[1] + c[1]
  7340. # asm 1: vadd.i64 <s2=reg128#11%top,<c=reg128#12%top,<c=reg128#12%top
  7341. # asm 2: vadd.i64 <s2=d21,<c=d23,<c=d23
  7342. vadd.i64 d21,d23,d23
  7343. # qhasm: new s
  7344. # qhasm: s = s[0],c[1] << 4
  7345. # asm 1: vshl.i64 <s=reg128#13%top,<c=reg128#12%top,#4
  7346. # asm 2: vshl.i64 <s=d25,<c=d23,#4
  7347. vshl.i64 d25,d23,#4
  7348. # qhasm: s2 = s2[0],s2[1] + c[1]
  7349. # asm 1: vadd.i64 <s2=reg128#11%top,<s2=reg128#11%top,<c=reg128#12%top
  7350. # asm 2: vadd.i64 <s2=d21,<s2=d21,<c=d23
  7351. vadd.i64 d21,d21,d23
  7352. # qhasm: s = s[0],s[1] + s2[1]
  7353. # asm 1: vadd.i64 <s=reg128#13%top,<s=reg128#13%top,<s2=reg128#11%top
  7354. # asm 2: vadd.i64 <s=d25,<s=d25,<s2=d21
  7355. vadd.i64 d25,d25,d21
  7356. # qhasm: h04 = h04[0] + s[1],h04[1]
  7357. # asm 1: vadd.i64 <h04=reg128#3%bot,<h04=reg128#3%bot,<s=reg128#13%top
  7358. # asm 2: vadd.i64 <h04=d4,<h04=d4,<s=d25
  7359. vadd.i64 d4,d4,d25
  7360. # qhasm: h26[0,1,2,3] h37[0,1,2,3] = h26[0]h37[0]h26[1]h37[1] h26[2]h37[2]h26[3]h37[3]
  7361. # asm 1: vzip.i32 <h26=reg128#1,<h37=reg128#9
  7362. # asm 2: vzip.i32 <h26=q0,<h37=q8
  7363. vzip.i32 q0,q8
  7364. # qhasm: t0 = h04[0] + _0x2000000[0],t0[1]
  7365. # asm 1: vadd.i64 <t0=reg128#7%bot,<h04=reg128#3%bot,<_0x2000000=reg128#8%bot
  7366. # asm 2: vadd.i64 <t0=d12,<h04=d4,<_0x2000000=d14
  7367. vadd.i64 d12,d4,d14
  7368. # qhasm: posh += 8
  7369. # asm 1: add >posh=int32#7,<posh=int32#9,#8
  7370. # asm 2: add >posh=r6,<posh=r8,#8
  7371. add r6,r8,#8
  7372. # qhasm: mem64[posh] aligned= h26[0]
  7373. # asm 1: vst1.8 <h26=reg128#1%bot,[<posh=int32#7,: 64]
  7374. # asm 2: vst1.8 <h26=d0,[<posh=r6,: 64]
  7375. vst1.8 d0,[r6,: 64]
  7376. # qhasm: h59 = h59[0],h59[1] - t[1]
  7377. # asm 1: vsub.i64 <h59=reg128#10%top,<h59=reg128#10%top,<t=reg128#5%top
  7378. # asm 2: vsub.i64 <h59=d19,<h59=d19,<t=d9
  7379. vsub.i64 d19,d19,d9
  7380. # qhasm: posh += 16
  7381. # asm 1: add >posh=int32#7,<posh=int32#7,#16
  7382. # asm 2: add >posh=r6,<posh=r6,#16
  7383. add r6,r6,#16
  7384. # qhasm: mem64[posh] aligned= h37[0]
  7385. # asm 1: vst1.8 <h37=reg128#9%bot,[<posh=int32#7,: 64]
  7386. # asm 2: vst1.8 <h37=d16,[<posh=r6,: 64]
  7387. vst1.8 d16,[r6,: 64]
  7388. # qhasm: c = t0[0] signed>> 26,c[1]
  7389. # asm 1: vshr.s64 <c=reg128#12%bot,<t0=reg128#7%bot,#26
  7390. # asm 2: vshr.s64 <c=d22,<t0=d12,#26
  7391. vshr.s64 d22,d12,#26
  7392. # qhasm: t0 &= mask26
  7393. # asm 1: vand >t0=reg128#1,<t0=reg128#7,<mask26=reg128#2
  7394. # asm 2: vand >t0=q0,<t0=q6,<mask26=q1
  7395. vand q0,q6,q1
  7396. # qhasm: h15 = h15[0] + c[0],h15[1]
  7397. # asm 1: vadd.i64 <h15=reg128#6%bot,<h15=reg128#6%bot,<c=reg128#12%bot
  7398. # asm 2: vadd.i64 <h15=d10,<h15=d10,<c=d22
  7399. vadd.i64 d10,d10,d22
  7400. # qhasm: h48[0,1,2,3] h59[0,1,2,3] = h48[0]h59[0]h48[1]h59[1] h48[2]h59[2]h48[3]h59[3]
  7401. # asm 1: vzip.i32 <h48=reg128#4,<h59=reg128#10
  7402. # asm 2: vzip.i32 <h48=q3,<h59=q9
  7403. vzip.i32 q3,q9
  7404. # qhasm: h04 = h04[0] - t0[0],h04[1]
  7405. # asm 1: vsub.i64 <h04=reg128#3%bot,<h04=reg128#3%bot,<t0=reg128#1%bot
  7406. # asm 2: vsub.i64 <h04=d4,<h04=d4,<t0=d0
  7407. vsub.i64 d4,d4,d0
  7408. # qhasm: posh -= 8
  7409. # asm 1: sub >posh=int32#7,<posh=int32#7,#8
  7410. # asm 2: sub >posh=r6,<posh=r6,#8
  7411. sub r6,r6,#8
  7412. # qhasm: mem64[posh] aligned= h48[0]
  7413. # asm 1: vst1.8 <h48=reg128#4%bot,[<posh=int32#7,: 64]
  7414. # asm 2: vst1.8 <h48=d6,[<posh=r6,: 64]
  7415. vst1.8 d6,[r6,: 64]
  7416. # qhasm: posh += 16
  7417. # asm 1: add >posh=int32#7,<posh=int32#7,#16
  7418. # asm 2: add >posh=r6,<posh=r6,#16
  7419. add r6,r6,#16
  7420. # qhasm: mem64[posh] aligned= h59[0]
  7421. # asm 1: vst1.8 <h59=reg128#10%bot,[<posh=int32#7,: 64]
  7422. # asm 2: vst1.8 <h59=d18,[<posh=r6,: 64]
  7423. vst1.8 d18,[r6,: 64]
  7424. # qhasm: h04[0,1,2,3] h15[0,1,2,3] = h04[0]h15[0]h04[1]h15[1] h04[2]h15[2]h04[3]h15[3]
  7425. # asm 1: vzip.i32 <h04=reg128#3,<h15=reg128#6
  7426. # asm 2: vzip.i32 <h04=q2,<h15=q5
  7427. vzip.i32 q2,q5
  7428. # qhasm: posh -= 32
  7429. # asm 1: sub >posh=int32#7,<posh=int32#7,#32
  7430. # asm 2: sub >posh=r6,<posh=r6,#32
  7431. sub r6,r6,#32
  7432. # qhasm: mem64[posh] aligned= h04[0]
  7433. # asm 1: vst1.8 <h04=reg128#3%bot,[<posh=int32#7,: 64]
  7434. # asm 2: vst1.8 <h04=d4,[<posh=r6,: 64]
  7435. vst1.8 d4,[r6,: 64]
  7436. # qhasm: unsigned>? j -= 1
  7437. # asm 1: subs >j=int32#6,<j=int32#6,#1
  7438. # asm 2: subs >j=r5,<j=r5,#1
  7439. subs r5,r5,#1
  7440. # qhasm: goto squaringloop if unsigned>
  7441. bhi ._squaringloop
  7442. # qhasm: skipsquaringloop:
  7443. ._skipsquaringloop:
  7444. # qhasm: posf = mulsource
  7445. # asm 1: mov >posf=int32#3,<mulsource=int32#3
  7446. # asm 2: mov >posf=r2,<mulsource=r2
  7447. mov r2,r2
  7448. # qhasm: posg = playground1_ptr + 288
  7449. # asm 1: add >posg=int32#6,<playground1_ptr=int32#4,#288
  7450. # asm 2: add >posg=r5,<playground1_ptr=r3,#288
  7451. add r5,r3,#288
  7452. # qhasm: posh = playground1_ptr + 144
  7453. # asm 1: add >posh=int32#7,<playground1_ptr=int32#4,#144
  7454. # asm 2: add >posh=r6,<playground1_ptr=r3,#144
  7455. add r6,r3,#144
  7456. # qhasm: 4x _19_19_19_19 = 19
  7457. # asm 1: vmov.i32 >_19_19_19_19=reg128#1,#19
  7458. # asm 2: vmov.i32 >_19_19_19_19=q0,#19
  7459. vmov.i32 q0,#19
  7460. # qhasm: 4x _0_1_0_1 = 0
  7461. # asm 1: vmov.i32 >_0_1_0_1=reg128#2,#0
  7462. # asm 2: vmov.i32 >_0_1_0_1=q1,#0
  7463. vmov.i32 q1,#0
  7464. # qhasm: 4x _1_1_1_1 = 1
  7465. # asm 1: vmov.i32 >_1_1_1_1=reg128#3,#1
  7466. # asm 2: vmov.i32 >_1_1_1_1=q2,#1
  7467. vmov.i32 q2,#1
  7468. # qhasm: _0_1_0_1[0,1,2,3] _1_1_1_1[0,1,2,3] = _0_1_0_1[0]_1_1_1_1[0]_0_1_0_1[1]_1_1_1_1[1] _0_1_0_1[2]_1_1_1_1[2]_0_1_0_1[3]_1_1_1_1[3]
  7469. # asm 1: vzip.i32 <_0_1_0_1=reg128#2,<_1_1_1_1=reg128#3
  7470. # asm 2: vzip.i32 <_0_1_0_1=q1,<_1_1_1_1=q2
  7471. vzip.i32 q1,q2
  7472. # qhasm: g0_g1_g2_g3 aligned= mem128[posg];posg+=16
  7473. # asm 1: vld1.8 {>g0_g1_g2_g3=reg128#3%bot->g0_g1_g2_g3=reg128#3%top},[<posg=int32#6,: 128]!
  7474. # asm 2: vld1.8 {>g0_g1_g2_g3=d4->g0_g1_g2_g3=d5},[<posg=r5,: 128]!
  7475. vld1.8 {d4-d5},[r5,: 128]!
  7476. # qhasm: g4_g5_g6_g7 aligned= mem128[posg];posg+=16
  7477. # asm 1: vld1.8 {>g4_g5_g6_g7=reg128#4%bot->g4_g5_g6_g7=reg128#4%top},[<posg=int32#6,: 128]!
  7478. # asm 2: vld1.8 {>g4_g5_g6_g7=d6->g4_g5_g6_g7=d7},[<posg=r5,: 128]!
  7479. vld1.8 {d6-d7},[r5,: 128]!
  7480. # qhasm: new f8_f9_g8_g9
  7481. # qhasm: f8_f9_g8_g9 aligned= f8_f9_g8_g9[0]mem64[posg]
  7482. # asm 1: vld1.8 {<f8_f9_g8_g9=reg128#5%top},[<posg=int32#6,: 64]
  7483. # asm 2: vld1.8 {<f8_f9_g8_g9=d9},[<posg=r5,: 64]
  7484. vld1.8 {d9},[r5,: 64]
  7485. # qhasm: f0_f1_f2_f3 aligned= mem128[posf];posf+=16
  7486. # asm 1: vld1.8 {>f0_f1_f2_f3=reg128#6%bot->f0_f1_f2_f3=reg128#6%top},[<posf=int32#3,: 128]!
  7487. # asm 2: vld1.8 {>f0_f1_f2_f3=d10->f0_f1_f2_f3=d11},[<posf=r2,: 128]!
  7488. vld1.8 {d10-d11},[r2,: 128]!
  7489. # qhasm: playp = &playground2
  7490. # asm 1: lea >playp=int32#6,<playground2=stack512#1
  7491. # asm 2: lea >playp=r5,<playground2=[sp,#416]
  7492. add r5,sp,#416
  7493. # qhasm: f4_f5_f6_f7 aligned= mem128[posf];posf+=16
  7494. # asm 1: vld1.8 {>f4_f5_f6_f7=reg128#7%bot->f4_f5_f6_f7=reg128#7%top},[<posf=int32#3,: 128]!
  7495. # asm 2: vld1.8 {>f4_f5_f6_f7=d12->f4_f5_f6_f7=d13},[<posf=r2,: 128]!
  7496. vld1.8 {d12-d13},[r2,: 128]!
  7497. # qhasm: 4x 19g0_19g1_19g2_19g3 = g0_g1_g2_g3 * _19_19_19_19
  7498. # asm 1: vmul.i32 >19g0_19g1_19g2_19g3=reg128#8,<g0_g1_g2_g3=reg128#3,<_19_19_19_19=reg128#1
  7499. # asm 2: vmul.i32 >19g0_19g1_19g2_19g3=q7,<g0_g1_g2_g3=q2,<_19_19_19_19=q0
  7500. vmul.i32 q7,q2,q0
  7501. # qhasm: f8_f9_g8_g9 aligned= mem64[posf]f8_f9_g8_g9[1]
  7502. # asm 1: vld1.8 {<f8_f9_g8_g9=reg128#5%bot},[<posf=int32#3,: 64]
  7503. # asm 2: vld1.8 {<f8_f9_g8_g9=d8},[<posf=r2,: 64]
  7504. vld1.8 {d8},[r2,: 64]
  7505. # qhasm: new f1_f8_f3_f0
  7506. # qhasm: f1_f8_f3_f0 = f1_f8_f3_f0[0,1]f0_f1_f2_f3[3]f0_f1_f2_f3[0]
  7507. # asm 1: vext.32 <f1_f8_f3_f0=reg128#9%top,<f0_f1_f2_f3=reg128#6%top,<f0_f1_f2_f3=reg128#6%bot,#1
  7508. # asm 2: vext.32 <f1_f8_f3_f0=d17,<f0_f1_f2_f3=d11,<f0_f1_f2_f3=d10,#1
  7509. vext.32 d17,d11,d10,#1
  7510. # qhasm: 4x 19g4_19g5_19g6_19g7 = g4_g5_g6_g7 * _19_19_19_19
  7511. # asm 1: vmul.i32 >19g4_19g5_19g6_19g7=reg128#10,<g4_g5_g6_g7=reg128#4,<_19_19_19_19=reg128#1
  7512. # asm 2: vmul.i32 >19g4_19g5_19g6_19g7=q9,<g4_g5_g6_g7=q3,<_19_19_19_19=q0
  7513. vmul.i32 q9,q3,q0
  7514. # qhasm: f1_f8_f3_f0 = f0_f1_f2_f3[1]f8_f9_g8_g9[0]f1_f8_f3_f0[2,3]
  7515. # asm 1: vext.32 <f1_f8_f3_f0=reg128#9%bot,<f0_f1_f2_f3=reg128#6%bot,<f8_f9_g8_g9=reg128#5%bot,#1
  7516. # asm 2: vext.32 <f1_f8_f3_f0=d16,<f0_f1_f2_f3=d10,<f8_f9_g8_g9=d8,#1
  7517. vext.32 d16,d10,d8,#1
  7518. # qhasm: 4x f0_2f1_f2_2f3 = f0_f1_f2_f3 << _0_1_0_1
  7519. # asm 1: vshl.u32 >f0_2f1_f2_2f3=reg128#11,<f0_f1_f2_f3=reg128#6,<_0_1_0_1=reg128#2
  7520. # asm 2: vshl.u32 >f0_2f1_f2_2f3=q10,<f0_f1_f2_f3=q5,<_0_1_0_1=q1
  7521. vshl.u32 q10,q5,q1
  7522. # qhasm: new g0_19g1_g2_19g3
  7523. # qhasm: g0_19g1_g2_19g3 = 19g0_19g1_19g2_19g3[1]g0_g1_g2_g3[0]g0_19g1_g2_19g3[2,3]
  7524. # asm 1: vext.32 <g0_19g1_g2_19g3=reg128#12%bot,<19g0_19g1_19g2_19g3=reg128#8%bot,<g0_g1_g2_g3=reg128#3%bot,#1
  7525. # asm 2: vext.32 <g0_19g1_g2_19g3=d22,<19g0_19g1_19g2_19g3=d14,<g0_g1_g2_g3=d4,#1
  7526. vext.32 d22,d14,d4,#1
  7527. # qhasm: new g4_19g5_g6_19g7
  7528. # qhasm: g4_19g5_g6_19g7 = 19g4_19g5_19g6_19g7[1]g4_g5_g6_g7[0]g4_19g5_g6_19g7[2,3]
  7529. # asm 1: vext.32 <g4_19g5_g6_19g7=reg128#13%bot,<19g4_19g5_19g6_19g7=reg128#10%bot,<g4_g5_g6_g7=reg128#4%bot,#1
  7530. # asm 2: vext.32 <g4_19g5_g6_19g7=d24,<19g4_19g5_19g6_19g7=d18,<g4_g5_g6_g7=d6,#1
  7531. vext.32 d24,d18,d6,#1
  7532. # qhasm: 4x f4_2f5_f6_2f7 = f4_f5_f6_f7 << _0_1_0_1
  7533. # asm 1: vshl.u32 >f4_2f5_f6_2f7=reg128#14,<f4_f5_f6_f7=reg128#7,<_0_1_0_1=reg128#2
  7534. # asm 2: vshl.u32 >f4_2f5_f6_2f7=q13,<f4_f5_f6_f7=q6,<_0_1_0_1=q1
  7535. vshl.u32 q13,q6,q1
  7536. # qhasm: new f8_2f9_f9_f6
  7537. # qhasm: f8_2f9_f9_f6 = f8_f9_g8_g9[0] << _0_1_0_1[0],f8_f9_g8_g9[1] << _0_1_0_1[1],f8_2f9_f9_f6[2,3]
  7538. # asm 1: vshl.u32 <f8_2f9_f9_f6=reg128#15%bot,<f8_f9_g8_g9=reg128#5%bot,<_0_1_0_1=reg128#2%bot
  7539. # asm 2: vshl.u32 <f8_2f9_f9_f6=d28,<f8_f9_g8_g9=d8,<_0_1_0_1=d2
  7540. vshl.u32 d28,d8,d2
  7541. # qhasm: g0_19g1_g2_19g3 = g0_19g1_g2_19g3[1]g0_19g1_g2_19g3[0]g0_19g1_g2_19g3[2,3]
  7542. # asm 1: vrev64.i32 <g0_19g1_g2_19g3=reg128#12%bot,<g0_19g1_g2_19g3=reg128#12%bot
  7543. # asm 2: vrev64.i32 <g0_19g1_g2_19g3=d22,<g0_19g1_g2_19g3=d22
  7544. vrev64.i32 d22,d22
  7545. # qhasm: g8_19g9_19g8_19g9[0,1] = g8_19g9_19g8_19g9[0,1];g8_19g9_19g8_19g9[2] = f8_f9_g8_g9[2] * _19_19_19_19[2];g8_19g9_19g8_19g9[3] = f8_f9_g8_g9[3] * _19_19_19_19[3]
  7546. # asm 1: vmul.i32 >g8_19g9_19g8_19g9=reg128#1%top,<f8_f9_g8_g9=reg128#5%top,<_19_19_19_19=reg128#1%top
  7547. # asm 2: vmul.i32 >g8_19g9_19g8_19g9=d1,<f8_f9_g8_g9=d9,<_19_19_19_19=d1
  7548. vmul.i32 d1,d9,d1
  7549. # qhasm: g4_19g5_g6_19g7 = g4_19g5_g6_19g7[1]g4_19g5_g6_19g7[0]g4_19g5_g6_19g7[2,3]
  7550. # asm 1: vrev64.i32 <g4_19g5_g6_19g7=reg128#13%bot,<g4_19g5_g6_19g7=reg128#13%bot
  7551. # asm 2: vrev64.i32 <g4_19g5_g6_19g7=d24,<g4_19g5_g6_19g7=d24
  7552. vrev64.i32 d24,d24
  7553. # qhasm: f8_2f9_f9_f6 = f8_2f9_f9_f6[0,1]f8_f9_g8_g9[1]f4_f5_f6_f7[2]
  7554. # asm 1: vext.32 <f8_2f9_f9_f6=reg128#15%top,<f8_f9_g8_g9=reg128#5%bot,<f4_f5_f6_f7=reg128#7%top,#1
  7555. # asm 2: vext.32 <f8_2f9_f9_f6=d29,<f8_f9_g8_g9=d8,<f4_f5_f6_f7=d13,#1
  7556. vext.32 d29,d8,d13,#1
  7557. # qhasm: g8_19g9_19g8_19g9 = g8_19g9_19g8_19g9[3]f8_f9_g8_g9[2]g8_19g9_19g8_19g9[2,3]
  7558. # asm 1: vext.32 <g8_19g9_19g8_19g9=reg128#1%bot,<g8_19g9_19g8_19g9=reg128#1%top,<f8_f9_g8_g9=reg128#5%top,#1
  7559. # asm 2: vext.32 <g8_19g9_19g8_19g9=d0,<g8_19g9_19g8_19g9=d1,<f8_f9_g8_g9=d9,#1
  7560. vext.32 d0,d1,d9,#1
  7561. # qhasm: g8_19g9_19g8_19g9 = g8_19g9_19g8_19g9[1]g8_19g9_19g8_19g9[0]g8_19g9_19g8_19g9[2,3]
  7562. # asm 1: vrev64.i32 <g8_19g9_19g8_19g9=reg128#1%bot,<g8_19g9_19g8_19g9=reg128#1%bot
  7563. # asm 2: vrev64.i32 <g8_19g9_19g8_19g9=d0,<g8_19g9_19g8_19g9=d0
  7564. vrev64.i32 d0,d0
  7565. # qhasm: new 19g8_g9_19g2_g3
  7566. # qhasm: 19g8_g9_19g2_g3 = f8_f9_g8_g9[3]g8_19g9_19g8_19g9[2]19g8_g9_19g2_g3[2,3]
  7567. # asm 1: vext.32 <19g8_g9_19g2_g3=reg128#2%bot,<f8_f9_g8_g9=reg128#5%top,<g8_19g9_19g8_19g9=reg128#1%top,#1
  7568. # asm 2: vext.32 <19g8_g9_19g2_g3=d2,<f8_f9_g8_g9=d9,<g8_19g9_19g8_19g9=d1,#1
  7569. vext.32 d2,d9,d1,#1
  7570. # qhasm: g0_19g1_g2_19g3 = g0_19g1_g2_19g3[0,1]19g0_19g1_19g2_19g3[3]g0_g1_g2_g3[2]
  7571. # asm 1: vext.32 <g0_19g1_g2_19g3=reg128#12%top,<19g0_19g1_19g2_19g3=reg128#8%top,<g0_g1_g2_g3=reg128#3%top,#1
  7572. # asm 2: vext.32 <g0_19g1_g2_19g3=d23,<19g0_19g1_19g2_19g3=d15,<g0_g1_g2_g3=d5,#1
  7573. vext.32 d23,d15,d5,#1
  7574. # qhasm: h02[0,1] = f0_2f1_f2_2f3[0] signed* g0_g1_g2_g3[0];h02[2,3] = f0_2f1_f2_2f3[1] signed* g0_g1_g2_g3[1]
  7575. # asm 1: vmull.s32 >h02=reg128#5,<f0_2f1_f2_2f3=reg128#11%bot,<g0_g1_g2_g3=reg128#3%bot
  7576. # asm 2: vmull.s32 >h02=q4,<f0_2f1_f2_2f3=d20,<g0_g1_g2_g3=d4
  7577. vmull.s32 q4,d20,d4
  7578. # qhasm: g0_19g1_g2_19g3 = g0_19g1_g2_19g3[0,1]g0_19g1_g2_19g3[3]g0_19g1_g2_19g3[2]
  7579. # asm 1: vrev64.i32 <g0_19g1_g2_19g3=reg128#12%top,<g0_19g1_g2_19g3=reg128#12%top
  7580. # asm 2: vrev64.i32 <g0_19g1_g2_19g3=d23,<g0_19g1_g2_19g3=d23
  7581. vrev64.i32 d23,d23
  7582. # qhasm: h02[0,1] += f0_2f1_f2_2f3[2] signed* g8_19g9_19g8_19g9[2];h02[2,3] += f0_2f1_f2_2f3[3] signed* g8_19g9_19g8_19g9[3]
  7583. # asm 1: vmlal.s32 <h02=reg128#5,<f0_2f1_f2_2f3=reg128#11%top,<g8_19g9_19g8_19g9=reg128#1%top
  7584. # asm 2: vmlal.s32 <h02=q4,<f0_2f1_f2_2f3=d21,<g8_19g9_19g8_19g9=d1
  7585. vmlal.s32 q4,d21,d1
  7586. # qhasm: 19g8_g9_19g2_g3 = 19g8_g9_19g2_g3[1]19g8_g9_19g2_g3[0]19g8_g9_19g2_g3[2,3]
  7587. # asm 1: vrev64.i32 <19g8_g9_19g2_g3=reg128#2%bot,<19g8_g9_19g2_g3=reg128#2%bot
  7588. # asm 2: vrev64.i32 <19g8_g9_19g2_g3=d2,<19g8_g9_19g2_g3=d2
  7589. vrev64.i32 d2,d2
  7590. # qhasm: h02[0,1] += f4_2f5_f6_2f7[0] signed* 19g4_19g5_19g6_19g7[2];h02[2,3] += f4_2f5_f6_2f7[1] signed* 19g4_19g5_19g6_19g7[3]
  7591. # asm 1: vmlal.s32 <h02=reg128#5,<f4_2f5_f6_2f7=reg128#14%bot,<19g4_19g5_19g6_19g7=reg128#10%top
  7592. # asm 2: vmlal.s32 <h02=q4,<f4_2f5_f6_2f7=d26,<19g4_19g5_19g6_19g7=d19
  7593. vmlal.s32 q4,d26,d19
  7594. # qhasm: 19g8_g9_19g2_g3 = 19g8_g9_19g2_g3[0,1]g0_g1_g2_g3[3]19g0_19g1_19g2_19g3[2]
  7595. # asm 1: vext.32 <19g8_g9_19g2_g3=reg128#2%top,<g0_g1_g2_g3=reg128#3%top,<19g0_19g1_19g2_19g3=reg128#8%top,#1
  7596. # asm 2: vext.32 <19g8_g9_19g2_g3=d3,<g0_g1_g2_g3=d5,<19g0_19g1_19g2_19g3=d15,#1
  7597. vext.32 d3,d5,d15,#1
  7598. # qhasm: h02[0,1] += f4_2f5_f6_2f7[2] signed* 19g4_19g5_19g6_19g7[0];h02[2,3] += f4_2f5_f6_2f7[3] signed* 19g4_19g5_19g6_19g7[1]
  7599. # asm 1: vmlal.s32 <h02=reg128#5,<f4_2f5_f6_2f7=reg128#14%top,<19g4_19g5_19g6_19g7=reg128#10%bot
  7600. # asm 2: vmlal.s32 <h02=q4,<f4_2f5_f6_2f7=d27,<19g4_19g5_19g6_19g7=d18
  7601. vmlal.s32 q4,d27,d18
  7602. # qhasm: 19g8_g9_19g2_g3 = 19g8_g9_19g2_g3[0,1]19g8_g9_19g2_g3[3]19g8_g9_19g2_g3[2]
  7603. # asm 1: vrev64.i32 <19g8_g9_19g2_g3=reg128#2%top,<19g8_g9_19g2_g3=reg128#2%top
  7604. # asm 2: vrev64.i32 <19g8_g9_19g2_g3=d3,<19g8_g9_19g2_g3=d3
  7605. vrev64.i32 d3,d3
  7606. # qhasm: h02[0,1] += f8_2f9_f9_f6[0] signed* 19g0_19g1_19g2_19g3[2];h02[2,3] += f8_2f9_f9_f6[1] signed* 19g0_19g1_19g2_19g3[3]
  7607. # asm 1: vmlal.s32 <h02=reg128#5,<f8_2f9_f9_f6=reg128#15%bot,<19g0_19g1_19g2_19g3=reg128#8%top
  7608. # asm 2: vmlal.s32 <h02=q4,<f8_2f9_f9_f6=d28,<19g0_19g1_19g2_19g3=d15
  7609. vmlal.s32 q4,d28,d15
  7610. # qhasm: new f5_f2_f7_f4
  7611. # qhasm: f5_f2_f7_f4 = f4_f5_f6_f7[1]f0_f1_f2_f3[2]f5_f2_f7_f4[2,3]
  7612. # asm 1: vext.32 <f5_f2_f7_f4=reg128#8%bot,<f4_f5_f6_f7=reg128#7%bot,<f0_f1_f2_f3=reg128#6%top,#1
  7613. # asm 2: vext.32 <f5_f2_f7_f4=d14,<f4_f5_f6_f7=d12,<f0_f1_f2_f3=d11,#1
  7614. vext.32 d14,d12,d11,#1
  7615. # qhasm: h31[0,1] = f1_f8_f3_f0[0] signed* g0_19g1_g2_19g3[2];h31[2,3] = f1_f8_f3_f0[1] signed* g0_19g1_g2_19g3[3]
  7616. # asm 1: vmull.s32 >h31=reg128#6,<f1_f8_f3_f0=reg128#9%bot,<g0_19g1_g2_19g3=reg128#12%top
  7617. # asm 2: vmull.s32 >h31=q5,<f1_f8_f3_f0=d16,<g0_19g1_g2_19g3=d23
  7618. vmull.s32 q5,d16,d23
  7619. # qhasm: f5_f2_f7_f4 = f5_f2_f7_f4[0,1]f4_f5_f6_f7[3]f4_f5_f6_f7[0]
  7620. # asm 1: vext.32 <f5_f2_f7_f4=reg128#8%top,<f4_f5_f6_f7=reg128#7%top,<f4_f5_f6_f7=reg128#7%bot,#1
  7621. # asm 2: vext.32 <f5_f2_f7_f4=d15,<f4_f5_f6_f7=d13,<f4_f5_f6_f7=d12,#1
  7622. vext.32 d15,d13,d12,#1
  7623. # qhasm: h31[0,1] += f1_f8_f3_f0[2] signed* g0_g1_g2_g3[0];h31[2,3] += f1_f8_f3_f0[3] signed* g0_g1_g2_g3[1]
  7624. # asm 1: vmlal.s32 <h31=reg128#6,<f1_f8_f3_f0=reg128#9%top,<g0_g1_g2_g3=reg128#3%bot
  7625. # asm 2: vmlal.s32 <h31=q5,<f1_f8_f3_f0=d17,<g0_g1_g2_g3=d4
  7626. vmlal.s32 q5,d17,d4
  7627. # qhasm: mem64[playp] aligned= h02[0];playp+=8
  7628. # asm 1: vst1.8 <h02=reg128#5%bot,[<playp=int32#6,: 64]!
  7629. # asm 2: vst1.8 <h02=d8,[<playp=r5,: 64]!
  7630. vst1.8 d8,[r5,: 64]!
  7631. # qhasm: h31[0,1] += f5_f2_f7_f4[0] signed* g8_19g9_19g8_19g9[2];h31[2,3] += f5_f2_f7_f4[1] signed* g8_19g9_19g8_19g9[3]
  7632. # asm 1: vmlal.s32 <h31=reg128#6,<f5_f2_f7_f4=reg128#8%bot,<g8_19g9_19g8_19g9=reg128#1%top
  7633. # asm 2: vmlal.s32 <h31=q5,<f5_f2_f7_f4=d14,<g8_19g9_19g8_19g9=d1
  7634. vmlal.s32 q5,d14,d1
  7635. # qhasm: new h24
  7636. # qhasm: h24 = h02[2,3]h24[2,3]
  7637. # asm 1: vext.32 <h24=reg128#7%bot,<h02=reg128#5%top,<h02=reg128#5%bot,#0
  7638. # asm 2: vext.32 <h24=d12,<h02=d9,<h02=d8,#0
  7639. vext.32 d12,d9,d8,#0
  7640. # qhasm: h31[0,1] += f5_f2_f7_f4[2] signed* 19g4_19g5_19g6_19g7[2];h31[2,3] += f5_f2_f7_f4[3] signed* 19g4_19g5_19g6_19g7[3]
  7641. # asm 1: vmlal.s32 <h31=reg128#6,<f5_f2_f7_f4=reg128#8%top,<19g4_19g5_19g6_19g7=reg128#10%top
  7642. # asm 2: vmlal.s32 <h31=q5,<f5_f2_f7_f4=d15,<19g4_19g5_19g6_19g7=d19
  7643. vmlal.s32 q5,d15,d19
  7644. # qhasm: h24 = h24[0],0
  7645. # asm 1: vmov.i64 <h24=reg128#7%top,#0
  7646. # asm 2: vmov.i64 <h24=d13,#0
  7647. vmov.i64 d13,#0
  7648. # qhasm: h31[0,1] += f8_2f9_f9_f6[2] signed* 19g4_19g5_19g6_19g7[0];h31[2,3] += f8_2f9_f9_f6[3] signed* 19g4_19g5_19g6_19g7[1]
  7649. # asm 1: vmlal.s32 <h31=reg128#6,<f8_2f9_f9_f6=reg128#15%top,<19g4_19g5_19g6_19g7=reg128#10%bot
  7650. # asm 2: vmlal.s32 <h31=q5,<f8_2f9_f9_f6=d29,<19g4_19g5_19g6_19g7=d18
  7651. vmlal.s32 q5,d29,d18
  7652. # qhasm: g4_19g5_g6_19g7 = g4_19g5_g6_19g7[0,1]19g4_19g5_19g6_19g7[3]g4_g5_g6_g7[2]
  7653. # asm 1: vext.32 <g4_19g5_g6_19g7=reg128#13%top,<19g4_19g5_19g6_19g7=reg128#10%top,<g4_g5_g6_g7=reg128#4%top,#1
  7654. # asm 2: vext.32 <g4_19g5_g6_19g7=d25,<19g4_19g5_19g6_19g7=d19,<g4_g5_g6_g7=d7,#1
  7655. vext.32 d25,d19,d7,#1
  7656. # qhasm: h24[0,1] += f0_2f1_f2_2f3[0] signed* g0_g1_g2_g3[2];h24[2,3] += f0_2f1_f2_2f3[1] signed* g0_g1_g2_g3[3]
  7657. # asm 1: vmlal.s32 <h24=reg128#7,<f0_2f1_f2_2f3=reg128#11%bot,<g0_g1_g2_g3=reg128#3%top
  7658. # asm 2: vmlal.s32 <h24=q6,<f0_2f1_f2_2f3=d20,<g0_g1_g2_g3=d5
  7659. vmlal.s32 q6,d20,d5
  7660. # qhasm: g4_19g5_g6_19g7 = g4_19g5_g6_19g7[0,1]g4_19g5_g6_19g7[3]g4_19g5_g6_19g7[2]
  7661. # asm 1: vrev64.i32 <g4_19g5_g6_19g7=reg128#13%top,<g4_19g5_g6_19g7=reg128#13%top
  7662. # asm 2: vrev64.i32 <g4_19g5_g6_19g7=d25,<g4_19g5_g6_19g7=d25
  7663. vrev64.i32 d25,d25
  7664. # qhasm: h24[0,1] += f0_2f1_f2_2f3[2] signed* g0_g1_g2_g3[0];h24[2,3] += f0_2f1_f2_2f3[3] signed* g0_g1_g2_g3[1]
  7665. # asm 1: vmlal.s32 <h24=reg128#7,<f0_2f1_f2_2f3=reg128#11%top,<g0_g1_g2_g3=reg128#3%bot
  7666. # asm 2: vmlal.s32 <h24=q6,<f0_2f1_f2_2f3=d21,<g0_g1_g2_g3=d4
  7667. vmlal.s32 q6,d21,d4
  7668. # qhasm: mem64[playp] aligned= h31[1];playp+=8
  7669. # asm 1: vst1.8 <h31=reg128#6%top,[<playp=int32#6,: 64]!
  7670. # asm 2: vst1.8 <h31=d11,[<playp=r5,: 64]!
  7671. vst1.8 d11,[r5,: 64]!
  7672. # qhasm: h24[0,1] += f4_2f5_f6_2f7[0] signed* g8_19g9_19g8_19g9[2];h24[2,3] += f4_2f5_f6_2f7[1] signed* g8_19g9_19g8_19g9[3]
  7673. # asm 1: vmlal.s32 <h24=reg128#7,<f4_2f5_f6_2f7=reg128#14%bot,<g8_19g9_19g8_19g9=reg128#1%top
  7674. # asm 2: vmlal.s32 <h24=q6,<f4_2f5_f6_2f7=d26,<g8_19g9_19g8_19g9=d1
  7675. vmlal.s32 q6,d26,d1
  7676. # qhasm: new h53
  7677. # qhasm: h53 = h53[0,1]h31[0,1]
  7678. # asm 1: vext.32 <h53=reg128#5%top,<h31=reg128#6%bot,<h31=reg128#6%bot,#0
  7679. # asm 2: vext.32 <h53=d9,<h31=d10,<h31=d10,#0
  7680. vext.32 d9,d10,d10,#0
  7681. # qhasm: h24[0,1] += f4_2f5_f6_2f7[2] signed* 19g4_19g5_19g6_19g7[2];h24[2,3] += f4_2f5_f6_2f7[3] signed* 19g4_19g5_19g6_19g7[3]
  7682. # asm 1: vmlal.s32 <h24=reg128#7,<f4_2f5_f6_2f7=reg128#14%top,<19g4_19g5_19g6_19g7=reg128#10%top
  7683. # asm 2: vmlal.s32 <h24=q6,<f4_2f5_f6_2f7=d27,<19g4_19g5_19g6_19g7=d19
  7684. vmlal.s32 q6,d27,d19
  7685. # qhasm: h53 = 0,h53[1]
  7686. # asm 1: vmov.i64 <h53=reg128#5%bot,#0
  7687. # asm 2: vmov.i64 <h53=d8,#0
  7688. vmov.i64 d8,#0
  7689. # qhasm: h24[0,1] += f8_2f9_f9_f6[0] signed* 19g4_19g5_19g6_19g7[0];h24[2,3] += f8_2f9_f9_f6[1] signed* 19g4_19g5_19g6_19g7[1]
  7690. # asm 1: vmlal.s32 <h24=reg128#7,<f8_2f9_f9_f6=reg128#15%bot,<19g4_19g5_19g6_19g7=reg128#10%bot
  7691. # asm 2: vmlal.s32 <h24=q6,<f8_2f9_f9_f6=d28,<19g4_19g5_19g6_19g7=d18
  7692. vmlal.s32 q6,d28,d18
  7693. # qhasm: h53[0,1] += f1_f8_f3_f0[0] signed* g4_19g5_g6_19g7[0];h53[2,3] += f1_f8_f3_f0[1] signed* g4_19g5_g6_19g7[1]
  7694. # asm 1: vmlal.s32 <h53=reg128#5,<f1_f8_f3_f0=reg128#9%bot,<g4_19g5_g6_19g7=reg128#13%bot
  7695. # asm 2: vmlal.s32 <h53=q4,<f1_f8_f3_f0=d16,<g4_19g5_g6_19g7=d24
  7696. vmlal.s32 q4,d16,d24
  7697. # qhasm: h53[0,1] += f1_f8_f3_f0[2] signed* g0_g1_g2_g3[2];h53[2,3] += f1_f8_f3_f0[3] signed* g0_g1_g2_g3[3]
  7698. # asm 1: vmlal.s32 <h53=reg128#5,<f1_f8_f3_f0=reg128#9%top,<g0_g1_g2_g3=reg128#3%top
  7699. # asm 2: vmlal.s32 <h53=q4,<f1_f8_f3_f0=d17,<g0_g1_g2_g3=d5
  7700. vmlal.s32 q4,d17,d5
  7701. # qhasm: h53[0,1] += f5_f2_f7_f4[0] signed* g0_g1_g2_g3[0];h53[2,3] += f5_f2_f7_f4[1] signed* g0_g1_g2_g3[1]
  7702. # asm 1: vmlal.s32 <h53=reg128#5,<f5_f2_f7_f4=reg128#8%bot,<g0_g1_g2_g3=reg128#3%bot
  7703. # asm 2: vmlal.s32 <h53=q4,<f5_f2_f7_f4=d14,<g0_g1_g2_g3=d4
  7704. vmlal.s32 q4,d14,d4
  7705. # qhasm: mem64[playp] aligned= h24[0];playp+=8
  7706. # asm 1: vst1.8 <h24=reg128#7%bot,[<playp=int32#6,: 64]!
  7707. # asm 2: vst1.8 <h24=d12,[<playp=r5,: 64]!
  7708. vst1.8 d12,[r5,: 64]!
  7709. # qhasm: h53[0,1] += f5_f2_f7_f4[2] signed* g8_19g9_19g8_19g9[2];h53[2,3] += f5_f2_f7_f4[3] signed* g8_19g9_19g8_19g9[3]
  7710. # asm 1: vmlal.s32 <h53=reg128#5,<f5_f2_f7_f4=reg128#8%top,<g8_19g9_19g8_19g9=reg128#1%top
  7711. # asm 2: vmlal.s32 <h53=q4,<f5_f2_f7_f4=d15,<g8_19g9_19g8_19g9=d1
  7712. vmlal.s32 q4,d15,d1
  7713. # qhasm: new h46
  7714. # qhasm: h46 = h24[2,3]h46[2,3]
  7715. # asm 1: vext.32 <h46=reg128#6%bot,<h24=reg128#7%top,<h24=reg128#7%bot,#0
  7716. # asm 2: vext.32 <h46=d10,<h24=d13,<h24=d12,#0
  7717. vext.32 d10,d13,d12,#0
  7718. # qhasm: h53[0,1] += f8_2f9_f9_f6[2] signed* 19g4_19g5_19g6_19g7[2];h53[2,3] += f8_2f9_f9_f6[3] signed* 19g4_19g5_19g6_19g7[3]
  7719. # asm 1: vmlal.s32 <h53=reg128#5,<f8_2f9_f9_f6=reg128#15%top,<19g4_19g5_19g6_19g7=reg128#10%top
  7720. # asm 2: vmlal.s32 <h53=q4,<f8_2f9_f9_f6=d29,<19g4_19g5_19g6_19g7=d19
  7721. vmlal.s32 q4,d29,d19
  7722. # qhasm: h46 = h46[0],0
  7723. # asm 1: vmov.i64 <h46=reg128#6%top,#0
  7724. # asm 2: vmov.i64 <h46=d11,#0
  7725. vmov.i64 d11,#0
  7726. # qhasm: h46[0,1] += f0_2f1_f2_2f3[0] signed* g4_g5_g6_g7[0];h46[2,3] += f0_2f1_f2_2f3[1] signed* g4_g5_g6_g7[1]
  7727. # asm 1: vmlal.s32 <h46=reg128#6,<f0_2f1_f2_2f3=reg128#11%bot,<g4_g5_g6_g7=reg128#4%bot
  7728. # asm 2: vmlal.s32 <h46=q5,<f0_2f1_f2_2f3=d20,<g4_g5_g6_g7=d6
  7729. vmlal.s32 q5,d20,d6
  7730. # qhasm: h46[0,1] += f0_2f1_f2_2f3[2] signed* g0_g1_g2_g3[2];h46[2,3] += f0_2f1_f2_2f3[3] signed* g0_g1_g2_g3[3]
  7731. # asm 1: vmlal.s32 <h46=reg128#6,<f0_2f1_f2_2f3=reg128#11%top,<g0_g1_g2_g3=reg128#3%top
  7732. # asm 2: vmlal.s32 <h46=q5,<f0_2f1_f2_2f3=d21,<g0_g1_g2_g3=d5
  7733. vmlal.s32 q5,d21,d5
  7734. # qhasm: h46[0,1] += f4_2f5_f6_2f7[0] signed* g0_g1_g2_g3[0];h46[2,3] += f4_2f5_f6_2f7[1] signed* g0_g1_g2_g3[1]
  7735. # asm 1: vmlal.s32 <h46=reg128#6,<f4_2f5_f6_2f7=reg128#14%bot,<g0_g1_g2_g3=reg128#3%bot
  7736. # asm 2: vmlal.s32 <h46=q5,<f4_2f5_f6_2f7=d26,<g0_g1_g2_g3=d4
  7737. vmlal.s32 q5,d26,d4
  7738. # qhasm: new h75
  7739. # qhasm: h75 = h75[0,1]h53[0,1]
  7740. # asm 1: vext.32 <h75=reg128#7%top,<h53=reg128#5%bot,<h53=reg128#5%bot,#0
  7741. # asm 2: vext.32 <h75=d13,<h53=d8,<h53=d8,#0
  7742. vext.32 d13,d8,d8,#0
  7743. # qhasm: h46[0,1] += f4_2f5_f6_2f7[2] signed* g8_19g9_19g8_19g9[2];h46[2,3] += f4_2f5_f6_2f7[3] signed* g8_19g9_19g8_19g9[3]
  7744. # asm 1: vmlal.s32 <h46=reg128#6,<f4_2f5_f6_2f7=reg128#14%top,<g8_19g9_19g8_19g9=reg128#1%top
  7745. # asm 2: vmlal.s32 <h46=q5,<f4_2f5_f6_2f7=d27,<g8_19g9_19g8_19g9=d1
  7746. vmlal.s32 q5,d27,d1
  7747. # qhasm: h75 = 0,h75[1]
  7748. # asm 1: vmov.i64 <h75=reg128#7%bot,#0
  7749. # asm 2: vmov.i64 <h75=d12,#0
  7750. vmov.i64 d12,#0
  7751. # qhasm: h46[0,1] += f8_2f9_f9_f6[0] signed* 19g4_19g5_19g6_19g7[2];h46[2,3] += f8_2f9_f9_f6[1] signed* 19g4_19g5_19g6_19g7[3]
  7752. # asm 1: vmlal.s32 <h46=reg128#6,<f8_2f9_f9_f6=reg128#15%bot,<19g4_19g5_19g6_19g7=reg128#10%top
  7753. # asm 2: vmlal.s32 <h46=q5,<f8_2f9_f9_f6=d28,<19g4_19g5_19g6_19g7=d19
  7754. vmlal.s32 q5,d28,d19
  7755. # qhasm: mem64[playp] aligned= h53[1];playp+=8
  7756. # asm 1: vst1.8 <h53=reg128#5%top,[<playp=int32#6,: 64]!
  7757. # asm 2: vst1.8 <h53=d9,[<playp=r5,: 64]!
  7758. vst1.8 d9,[r5,: 64]!
  7759. # qhasm: h75[0,1] += f1_f8_f3_f0[0] signed* g4_19g5_g6_19g7[2];h75[2,3] += f1_f8_f3_f0[1] signed* g4_19g5_g6_19g7[3]
  7760. # asm 1: vmlal.s32 <h75=reg128#7,<f1_f8_f3_f0=reg128#9%bot,<g4_19g5_g6_19g7=reg128#13%top
  7761. # asm 2: vmlal.s32 <h75=q6,<f1_f8_f3_f0=d16,<g4_19g5_g6_19g7=d25
  7762. vmlal.s32 q6,d16,d25
  7763. # qhasm: h75[0,1] += f1_f8_f3_f0[2] signed* g4_g5_g6_g7[0];h75[2,3] += f1_f8_f3_f0[3] signed* g4_g5_g6_g7[1]
  7764. # asm 1: vmlal.s32 <h75=reg128#7,<f1_f8_f3_f0=reg128#9%top,<g4_g5_g6_g7=reg128#4%bot
  7765. # asm 2: vmlal.s32 <h75=q6,<f1_f8_f3_f0=d17,<g4_g5_g6_g7=d6
  7766. vmlal.s32 q6,d17,d6
  7767. # qhasm: mem64[playp] aligned= h46[0]
  7768. # asm 1: vst1.8 <h46=reg128#6%bot,[<playp=int32#6,: 64]
  7769. # asm 2: vst1.8 <h46=d10,[<playp=r5,: 64]
  7770. vst1.8 d10,[r5,: 64]
  7771. # qhasm: h75[0,1] += f5_f2_f7_f4[0] signed* g0_g1_g2_g3[2];h75[2,3] += f5_f2_f7_f4[1] signed* g0_g1_g2_g3[3]
  7772. # asm 1: vmlal.s32 <h75=reg128#7,<f5_f2_f7_f4=reg128#8%bot,<g0_g1_g2_g3=reg128#3%top
  7773. # asm 2: vmlal.s32 <h75=q6,<f5_f2_f7_f4=d14,<g0_g1_g2_g3=d5
  7774. vmlal.s32 q6,d14,d5
  7775. # qhasm: new h68
  7776. # qhasm: h68 = h46[2,3]h68[2,3]
  7777. # asm 1: vext.32 <h68=reg128#5%bot,<h46=reg128#6%top,<h46=reg128#6%bot,#0
  7778. # asm 2: vext.32 <h68=d8,<h46=d11,<h46=d10,#0
  7779. vext.32 d8,d11,d10,#0
  7780. # qhasm: h75[0,1] += f5_f2_f7_f4[2] signed* g0_g1_g2_g3[0];h75[2,3] += f5_f2_f7_f4[3] signed* g0_g1_g2_g3[1]
  7781. # asm 1: vmlal.s32 <h75=reg128#7,<f5_f2_f7_f4=reg128#8%top,<g0_g1_g2_g3=reg128#3%bot
  7782. # asm 2: vmlal.s32 <h75=q6,<f5_f2_f7_f4=d15,<g0_g1_g2_g3=d4
  7783. vmlal.s32 q6,d15,d4
  7784. # qhasm: h68 = h68[0],0
  7785. # asm 1: vmov.i64 <h68=reg128#5%top,#0
  7786. # asm 2: vmov.i64 <h68=d9,#0
  7787. vmov.i64 d9,#0
  7788. # qhasm: h75[0,1] += f8_2f9_f9_f6[2] signed* g8_19g9_19g8_19g9[2];h75[2,3] += f8_2f9_f9_f6[3] signed* g8_19g9_19g8_19g9[3]
  7789. # asm 1: vmlal.s32 <h75=reg128#7,<f8_2f9_f9_f6=reg128#15%top,<g8_19g9_19g8_19g9=reg128#1%top
  7790. # asm 2: vmlal.s32 <h75=q6,<f8_2f9_f9_f6=d29,<g8_19g9_19g8_19g9=d1
  7791. vmlal.s32 q6,d29,d1
  7792. # qhasm: h68[0,1] += f0_2f1_f2_2f3[0] signed* g4_g5_g6_g7[2];h68[2,3] += f0_2f1_f2_2f3[1] signed* g4_g5_g6_g7[3]
  7793. # asm 1: vmlal.s32 <h68=reg128#5,<f0_2f1_f2_2f3=reg128#11%bot,<g4_g5_g6_g7=reg128#4%top
  7794. # asm 2: vmlal.s32 <h68=q4,<f0_2f1_f2_2f3=d20,<g4_g5_g6_g7=d7
  7795. vmlal.s32 q4,d20,d7
  7796. # qhasm: h68[0,1] += f0_2f1_f2_2f3[2] signed* g4_g5_g6_g7[0];h68[2,3] += f0_2f1_f2_2f3[3] signed* g4_g5_g6_g7[1]
  7797. # asm 1: vmlal.s32 <h68=reg128#5,<f0_2f1_f2_2f3=reg128#11%top,<g4_g5_g6_g7=reg128#4%bot
  7798. # asm 2: vmlal.s32 <h68=q4,<f0_2f1_f2_2f3=d21,<g4_g5_g6_g7=d6
  7799. vmlal.s32 q4,d21,d6
  7800. # qhasm: h68[0,1] += f4_2f5_f6_2f7[0] signed* g0_g1_g2_g3[2];h68[2,3] += f4_2f5_f6_2f7[1] signed* g0_g1_g2_g3[3]
  7801. # asm 1: vmlal.s32 <h68=reg128#5,<f4_2f5_f6_2f7=reg128#14%bot,<g0_g1_g2_g3=reg128#3%top
  7802. # asm 2: vmlal.s32 <h68=q4,<f4_2f5_f6_2f7=d26,<g0_g1_g2_g3=d5
  7803. vmlal.s32 q4,d26,d5
  7804. # qhasm: new h97
  7805. # qhasm: h97 = h97[0,1]h75[0,1]
  7806. # asm 1: vext.32 <h97=reg128#6%top,<h75=reg128#7%bot,<h75=reg128#7%bot,#0
  7807. # asm 2: vext.32 <h97=d11,<h75=d12,<h75=d12,#0
  7808. vext.32 d11,d12,d12,#0
  7809. # qhasm: h68[0,1] += f4_2f5_f6_2f7[2] signed* g0_g1_g2_g3[0];h68[2,3] += f4_2f5_f6_2f7[3] signed* g0_g1_g2_g3[1]
  7810. # asm 1: vmlal.s32 <h68=reg128#5,<f4_2f5_f6_2f7=reg128#14%top,<g0_g1_g2_g3=reg128#3%bot
  7811. # asm 2: vmlal.s32 <h68=q4,<f4_2f5_f6_2f7=d27,<g0_g1_g2_g3=d4
  7812. vmlal.s32 q4,d27,d4
  7813. # qhasm: h97 = 0,h97[1]
  7814. # asm 1: vmov.i64 <h97=reg128#6%bot,#0
  7815. # asm 2: vmov.i64 <h97=d10,#0
  7816. vmov.i64 d10,#0
  7817. # qhasm: h68[0,1] += f8_2f9_f9_f6[0] signed* g8_19g9_19g8_19g9[2];h68[2,3] += f8_2f9_f9_f6[1] signed* g8_19g9_19g8_19g9[3]
  7818. # asm 1: vmlal.s32 <h68=reg128#5,<f8_2f9_f9_f6=reg128#15%bot,<g8_19g9_19g8_19g9=reg128#1%top
  7819. # asm 2: vmlal.s32 <h68=q4,<f8_2f9_f9_f6=d28,<g8_19g9_19g8_19g9=d1
  7820. vmlal.s32 q4,d28,d1
  7821. # qhasm: h97[0,1] += f1_f8_f3_f0[0] signed* g8_19g9_19g8_19g9[0];h97[2,3] += f1_f8_f3_f0[1] signed* g8_19g9_19g8_19g9[1]
  7822. # asm 1: vmlal.s32 <h97=reg128#6,<f1_f8_f3_f0=reg128#9%bot,<g8_19g9_19g8_19g9=reg128#1%bot
  7823. # asm 2: vmlal.s32 <h97=q5,<f1_f8_f3_f0=d16,<g8_19g9_19g8_19g9=d0
  7824. vmlal.s32 q5,d16,d0
  7825. # qhasm: playp -= 32
  7826. # asm 1: sub >playp=int32#3,<playp=int32#6,#32
  7827. # asm 2: sub >playp=r2,<playp=r5,#32
  7828. sub r2,r5,#32
  7829. # qhasm: h97[0,1] += f1_f8_f3_f0[2] signed* g4_g5_g6_g7[2];h97[2,3] += f1_f8_f3_f0[3] signed* g4_g5_g6_g7[3]
  7830. # asm 1: vmlal.s32 <h97=reg128#6,<f1_f8_f3_f0=reg128#9%top,<g4_g5_g6_g7=reg128#4%top
  7831. # asm 2: vmlal.s32 <h97=q5,<f1_f8_f3_f0=d17,<g4_g5_g6_g7=d7
  7832. vmlal.s32 q5,d17,d7
  7833. # qhasm: h97[0,1] += f5_f2_f7_f4[0] signed* g4_g5_g6_g7[0];h97[2,3] += f5_f2_f7_f4[1] signed* g4_g5_g6_g7[1]
  7834. # asm 1: vmlal.s32 <h97=reg128#6,<f5_f2_f7_f4=reg128#8%bot,<g4_g5_g6_g7=reg128#4%bot
  7835. # asm 2: vmlal.s32 <h97=q5,<f5_f2_f7_f4=d14,<g4_g5_g6_g7=d6
  7836. vmlal.s32 q5,d14,d6
  7837. # qhasm: new h80
  7838. # qhasm: h80 = h68[2,3]h80[2,3]
  7839. # asm 1: vext.32 <h80=reg128#16%bot,<h68=reg128#5%top,<h68=reg128#5%bot,#0
  7840. # asm 2: vext.32 <h80=d30,<h68=d9,<h68=d8,#0
  7841. vext.32 d30,d9,d8,#0
  7842. # qhasm: h97[0,1] += f5_f2_f7_f4[2] signed* g0_g1_g2_g3[2];h97[2,3] += f5_f2_f7_f4[3] signed* g0_g1_g2_g3[3]
  7843. # asm 1: vmlal.s32 <h97=reg128#6,<f5_f2_f7_f4=reg128#8%top,<g0_g1_g2_g3=reg128#3%top
  7844. # asm 2: vmlal.s32 <h97=q5,<f5_f2_f7_f4=d15,<g0_g1_g2_g3=d5
  7845. vmlal.s32 q5,d15,d5
  7846. # qhasm: h80 aligned= h80[0]mem64[playp];playp+=8
  7847. # asm 1: vld1.8 {<h80=reg128#16%top},[<playp=int32#3,: 64]!
  7848. # asm 2: vld1.8 {<h80=d31},[<playp=r2,: 64]!
  7849. vld1.8 {d31},[r2,: 64]!
  7850. # qhasm: h97[0,1] += f8_2f9_f9_f6[2] signed* g0_g1_g2_g3[0];h97[2,3] += f8_2f9_f9_f6[3] signed* g0_g1_g2_g3[1]
  7851. # asm 1: vmlal.s32 <h97=reg128#6,<f8_2f9_f9_f6=reg128#15%top,<g0_g1_g2_g3=reg128#3%bot
  7852. # asm 2: vmlal.s32 <h97=q5,<f8_2f9_f9_f6=d29,<g0_g1_g2_g3=d4
  7853. vmlal.s32 q5,d29,d4
  7854. # qhasm: h80[0,1] += f0_2f1_f2_2f3[0] signed* g8_19g9_19g8_19g9[0];h80[2,3] += f0_2f1_f2_2f3[1] signed* g8_19g9_19g8_19g9[1]
  7855. # asm 1: vmlal.s32 <h80=reg128#16,<f0_2f1_f2_2f3=reg128#11%bot,<g8_19g9_19g8_19g9=reg128#1%bot
  7856. # asm 2: vmlal.s32 <h80=q15,<f0_2f1_f2_2f3=d20,<g8_19g9_19g8_19g9=d0
  7857. vmlal.s32 q15,d20,d0
  7858. # qhasm: new 19g4_g5_19g6_g7
  7859. # qhasm: 19g4_g5_19g6_g7 = g4_g5_g6_g7[1]19g4_19g5_19g6_19g7[0]19g4_g5_19g6_g7[2,3]
  7860. # asm 1: vext.32 <19g4_g5_19g6_g7=reg128#1%bot,<g4_g5_g6_g7=reg128#4%bot,<19g4_19g5_19g6_19g7=reg128#10%bot,#1
  7861. # asm 2: vext.32 <19g4_g5_19g6_g7=d0,<g4_g5_g6_g7=d6,<19g4_19g5_19g6_19g7=d18,#1
  7862. vext.32 d0,d6,d18,#1
  7863. # qhasm: h80[0,1] += f0_2f1_f2_2f3[2] signed* g4_19g5_g6_19g7[2];h80[2,3] += f0_2f1_f2_2f3[3] signed* g4_19g5_g6_19g7[3]
  7864. # asm 1: vmlal.s32 <h80=reg128#16,<f0_2f1_f2_2f3=reg128#11%top,<g4_19g5_g6_19g7=reg128#13%top
  7865. # asm 2: vmlal.s32 <h80=q15,<f0_2f1_f2_2f3=d21,<g4_19g5_g6_19g7=d25
  7866. vmlal.s32 q15,d21,d25
  7867. # qhasm: 19g4_g5_19g6_g7 = 19g4_g5_19g6_g7[1]19g4_g5_19g6_g7[0]19g4_g5_19g6_g7[2,3]
  7868. # asm 1: vrev64.i32 <19g4_g5_19g6_g7=reg128#1%bot,<19g4_g5_19g6_g7=reg128#1%bot
  7869. # asm 2: vrev64.i32 <19g4_g5_19g6_g7=d0,<19g4_g5_19g6_g7=d0
  7870. vrev64.i32 d0,d0
  7871. # qhasm: h80[0,1] += f4_2f5_f6_2f7[0] signed* g4_19g5_g6_19g7[0];h80[2,3] += f4_2f5_f6_2f7[1] signed* g4_19g5_g6_19g7[1]
  7872. # asm 1: vmlal.s32 <h80=reg128#16,<f4_2f5_f6_2f7=reg128#14%bot,<g4_19g5_g6_19g7=reg128#13%bot
  7873. # asm 2: vmlal.s32 <h80=q15,<f4_2f5_f6_2f7=d26,<g4_19g5_g6_19g7=d24
  7874. vmlal.s32 q15,d26,d24
  7875. # qhasm: 19g4_g5_19g6_g7 = 19g4_g5_19g6_g7[0,1]g4_g5_g6_g7[3]19g4_19g5_19g6_19g7[2]
  7876. # asm 1: vext.32 <19g4_g5_19g6_g7=reg128#1%top,<g4_g5_g6_g7=reg128#4%top,<19g4_19g5_19g6_19g7=reg128#10%top,#1
  7877. # asm 2: vext.32 <19g4_g5_19g6_g7=d1,<g4_g5_g6_g7=d7,<19g4_19g5_19g6_19g7=d19,#1
  7878. vext.32 d1,d7,d19,#1
  7879. # qhasm: new h19
  7880. # qhasm: h19 = h19[0,1]h97[0,1]
  7881. # asm 1: vext.32 <h19=reg128#4%top,<h97=reg128#6%bot,<h97=reg128#6%bot,#0
  7882. # asm 2: vext.32 <h19=d7,<h97=d10,<h97=d10,#0
  7883. vext.32 d7,d10,d10,#0
  7884. # qhasm: h80[0,1] += f4_2f5_f6_2f7[2] signed* g0_19g1_g2_19g3[2];h80[2,3] += f4_2f5_f6_2f7[3] signed* g0_19g1_g2_19g3[3]
  7885. # asm 1: vmlal.s32 <h80=reg128#16,<f4_2f5_f6_2f7=reg128#14%top,<g0_19g1_g2_19g3=reg128#12%top
  7886. # asm 2: vmlal.s32 <h80=q15,<f4_2f5_f6_2f7=d27,<g0_19g1_g2_19g3=d23
  7887. vmlal.s32 q15,d27,d23
  7888. # qhasm: 19g4_g5_19g6_g7 = 19g4_g5_19g6_g7[0,1]19g4_g5_19g6_g7[3]19g4_g5_19g6_g7[2]
  7889. # asm 1: vrev64.i32 <19g4_g5_19g6_g7=reg128#1%top,<19g4_g5_19g6_g7=reg128#1%top
  7890. # asm 2: vrev64.i32 <19g4_g5_19g6_g7=d1,<19g4_g5_19g6_g7=d1
  7891. vrev64.i32 d1,d1
  7892. # qhasm: h19 aligned= mem64[playp]h19[1]
  7893. # asm 1: vld1.8 {<h19=reg128#4%bot},[<playp=int32#3,: 64]
  7894. # asm 2: vld1.8 {<h19=d6},[<playp=r2,: 64]
  7895. vld1.8 {d6},[r2,: 64]
  7896. # qhasm: h80[0,1] += f8_2f9_f9_f6[0] signed* g0_19g1_g2_19g3[0];h80[2,3] += f8_2f9_f9_f6[1] signed* g0_19g1_g2_19g3[1]
  7897. # asm 1: vmlal.s32 <h80=reg128#16,<f8_2f9_f9_f6=reg128#15%bot,<g0_19g1_g2_19g3=reg128#12%bot
  7898. # asm 2: vmlal.s32 <h80=q15,<f8_2f9_f9_f6=d28,<g0_19g1_g2_19g3=d22
  7899. vmlal.s32 q15,d28,d22
  7900. # qhasm: h19[0,1] += f1_f8_f3_f0[0] signed* g0_g1_g2_g3[0];h19[2,3] += f1_f8_f3_f0[1] signed* g0_g1_g2_g3[1]
  7901. # asm 1: vmlal.s32 <h19=reg128#4,<f1_f8_f3_f0=reg128#9%bot,<g0_g1_g2_g3=reg128#3%bot
  7902. # asm 2: vmlal.s32 <h19=q3,<f1_f8_f3_f0=d16,<g0_g1_g2_g3=d4
  7903. vmlal.s32 q3,d16,d4
  7904. # qhasm: playp+=24
  7905. # asm 1: add >playp=int32#3,<playp=int32#3,#24
  7906. # asm 2: add >playp=r2,<playp=r2,#24
  7907. add r2,r2,#24
  7908. # qhasm: h19[0,1] += f1_f8_f3_f0[2] signed* 19g8_g9_19g2_g3[0];h19[2,3] += f1_f8_f3_f0[3] signed* 19g8_g9_19g2_g3[1]
  7909. # asm 1: vmlal.s32 <h19=reg128#4,<f1_f8_f3_f0=reg128#9%top,<19g8_g9_19g2_g3=reg128#2%bot
  7910. # asm 2: vmlal.s32 <h19=q3,<f1_f8_f3_f0=d17,<19g8_g9_19g2_g3=d2
  7911. vmlal.s32 q3,d17,d2
  7912. # qhasm: new h04
  7913. # qhasm: h04 = h80[2,3]h04[2,3]
  7914. # asm 1: vext.32 <h04=reg128#3%bot,<h80=reg128#16%top,<h80=reg128#16%bot,#0
  7915. # asm 2: vext.32 <h04=d4,<h80=d31,<h80=d30,#0
  7916. vext.32 d4,d31,d30,#0
  7917. # qhasm: new h37
  7918. # qhasm: h37 = h37[0]h97[1]
  7919. # asm 1: vmov <h37=reg128#9%top,<h97=reg128#6%top
  7920. # asm 2: vmov <h37=d17,<h97=d11
  7921. vmov d17,d11
  7922. # qhasm: h19[0,1] += f5_f2_f7_f4[0] signed* 19g4_g5_19g6_g7[2];h19[2,3] += f5_f2_f7_f4[1] signed* 19g4_g5_19g6_g7[3]
  7923. # asm 1: vmlal.s32 <h19=reg128#4,<f5_f2_f7_f4=reg128#8%bot,<19g4_g5_19g6_g7=reg128#1%top
  7924. # asm 2: vmlal.s32 <h19=q3,<f5_f2_f7_f4=d14,<19g4_g5_19g6_g7=d1
  7925. vmlal.s32 q3,d14,d1
  7926. # qhasm: new h15
  7927. # qhasm: h15 = h15[0,1]h75[2,3]
  7928. # asm 1: vext.32 <h15=reg128#6%top,<h75=reg128#7%top,<h75=reg128#7%top,#0
  7929. # asm 2: vext.32 <h15=d11,<h75=d13,<h75=d13,#0
  7930. vext.32 d11,d13,d13,#0
  7931. # qhasm: new h48
  7932. # qhasm: h48 = h48[0,1]h80[0,1]
  7933. # asm 1: vext.32 <h48=reg128#7%top,<h80=reg128#16%bot,<h80=reg128#16%bot,#0
  7934. # asm 2: vext.32 <h48=d13,<h80=d30,<h80=d30,#0
  7935. vext.32 d13,d30,d30,#0
  7936. # qhasm: h19[0,1] += f5_f2_f7_f4[2] signed* 19g4_g5_19g6_g7[0];h19[2,3] += f5_f2_f7_f4[3] signed* 19g4_g5_19g6_g7[1]
  7937. # asm 1: vmlal.s32 <h19=reg128#4,<f5_f2_f7_f4=reg128#8%top,<19g4_g5_19g6_g7=reg128#1%bot
  7938. # asm 2: vmlal.s32 <h19=q3,<f5_f2_f7_f4=d15,<19g4_g5_19g6_g7=d0
  7939. vmlal.s32 q3,d15,d0
  7940. # qhasm: new h26
  7941. # qhasm: h26 = h26[0,1]h68[0,1]
  7942. # asm 1: vext.32 <h26=reg128#1%top,<h68=reg128#5%bot,<h68=reg128#5%bot,#0
  7943. # asm 2: vext.32 <h26=d1,<h68=d8,<h68=d8,#0
  7944. vext.32 d1,d8,d8,#0
  7945. # qhasm: h19[0,1] += f8_2f9_f9_f6[2] signed* 19g8_g9_19g2_g3[2];h19[2,3] += f8_2f9_f9_f6[3] signed* 19g8_g9_19g2_g3[3]
  7946. # asm 1: vmlal.s32 <h19=reg128#4,<f8_2f9_f9_f6=reg128#15%top,<19g8_g9_19g2_g3=reg128#2%top
  7947. # asm 2: vmlal.s32 <h19=q3,<f8_2f9_f9_f6=d29,<19g8_g9_19g2_g3=d3
  7948. vmlal.s32 q3,d29,d3
  7949. # qhasm: h04 aligned= h04[0]mem64[playp]
  7950. # asm 1: vld1.8 {<h04=reg128#3%top},[<playp=int32#3,: 64]
  7951. # asm 2: vld1.8 {<h04=d5},[<playp=r2,: 64]
  7952. vld1.8 {d5},[r2,: 64]
  7953. # qhasm: playp -= 16
  7954. # asm 1: sub >playp=int32#3,<playp=int32#3,#16
  7955. # asm 2: sub >playp=r2,<playp=r2,#16
  7956. sub r2,r2,#16
  7957. # qhasm: h15 = h19[0,1]h15[2,3]
  7958. # asm 1: vext.32 <h15=reg128#6%bot,<h19=reg128#4%bot,<h19=reg128#4%bot,#0
  7959. # asm 2: vext.32 <h15=d10,<h19=d6,<h19=d6,#0
  7960. vext.32 d10,d6,d6,#0
  7961. # qhasm: 4x mask26 = 0xffffffff
  7962. # asm 1: vmov.i32 >mask26=reg128#2,#0xffffffff
  7963. # asm 2: vmov.i32 >mask26=q1,#0xffffffff
  7964. vmov.i32 q1,#0xffffffff
  7965. # qhasm: 2x mask25 = mask26 << 25
  7966. # asm 1: vshl.i64 >mask25=reg128#5,<mask26=reg128#2,#25
  7967. # asm 2: vshl.i64 >mask25=q4,<mask26=q1,#25
  7968. vshl.i64 q4,q1,#25
  7969. # qhasm: ptr = &_0x2000000_stack
  7970. # asm 1: lea >ptr=int32#6,<_0x2000000_stack=stack128#1
  7971. # asm 2: lea >ptr=r5,<_0x2000000_stack=[sp,#512]
  7972. add r5,sp,#512
  7973. # qhasm: _0x2000000 aligned= mem128[ptr]
  7974. # asm 1: vld1.8 {>_0x2000000=reg128#8%bot->_0x2000000=reg128#8%top},[<ptr=int32#6,: 128]
  7975. # asm 2: vld1.8 {>_0x2000000=d14->_0x2000000=d15},[<ptr=r5,: 128]
  7976. vld1.8 {d14-d15},[r5,: 128]
  7977. # qhasm: 2x t0 = h04 + _0x2000000
  7978. # asm 1: vadd.i64 >t0=reg128#10,<h04=reg128#3,<_0x2000000=reg128#8
  7979. # asm 2: vadd.i64 >t0=q9,<h04=q2,<_0x2000000=q7
  7980. vadd.i64 q9,q2,q7
  7981. # qhasm: 2x mask26 <<= 26
  7982. # asm 1: vshl.i64 >mask26=reg128#2,<mask26=reg128#2,#26
  7983. # asm 2: vshl.i64 >mask26=q1,<mask26=q1,#26
  7984. vshl.i64 q1,q1,#26
  7985. # qhasm: 2x c = t0 signed>> 26
  7986. # asm 1: vshr.s64 >c=reg128#11,<t0=reg128#10,#26
  7987. # asm 2: vshr.s64 >c=q10,<t0=q9,#26
  7988. vshr.s64 q10,q9,#26
  7989. # qhasm: h26 aligned= mem64[playp]h26[1];playp += 8
  7990. # asm 1: vld1.8 {<h26=reg128#1%bot},[<playp=int32#3,: 64]!
  7991. # asm 2: vld1.8 {<h26=d0},[<playp=r2,: 64]!
  7992. vld1.8 {d0},[r2,: 64]!
  7993. # qhasm: 2x h15 += c
  7994. # asm 1: vadd.i64 >h15=reg128#6,<h15=reg128#6,<c=reg128#11
  7995. # asm 2: vadd.i64 >h15=q5,<h15=q5,<c=q10
  7996. vadd.i64 q5,q5,q10
  7997. # qhasm: t0 &= mask26
  7998. # asm 1: vand >t0=reg128#10,<t0=reg128#10,<mask26=reg128#2
  7999. # asm 2: vand >t0=q9,<t0=q9,<mask26=q1
  8000. vand q9,q9,q1
  8001. # qhasm: h37 aligned= mem64[playp]h37[1];playp += 8
  8002. # asm 1: vld1.8 {<h37=reg128#9%bot},[<playp=int32#3,: 64]!
  8003. # asm 2: vld1.8 {<h37=d16},[<playp=r2,: 64]!
  8004. vld1.8 {d16},[r2,: 64]!
  8005. # qhasm: ptr = &_0x1000000_stack
  8006. # asm 1: lea >ptr=int32#3,<_0x1000000_stack=stack128#2
  8007. # asm 2: lea >ptr=r2,<_0x1000000_stack=[sp,#528]
  8008. add r2,sp,#528
  8009. # qhasm: _0x1000000 aligned= mem128[ptr]
  8010. # asm 1: vld1.8 {>_0x1000000=reg128#11%bot->_0x1000000=reg128#11%top},[<ptr=int32#3,: 128]
  8011. # asm 2: vld1.8 {>_0x1000000=d20->_0x1000000=d21},[<ptr=r2,: 128]
  8012. vld1.8 {d20-d21},[r2,: 128]
  8013. # qhasm: 2x t1 = h15 + _0x1000000
  8014. # asm 1: vadd.i64 >t1=reg128#12,<h15=reg128#6,<_0x1000000=reg128#11
  8015. # asm 2: vadd.i64 >t1=q11,<h15=q5,<_0x1000000=q10
  8016. vadd.i64 q11,q5,q10
  8017. # qhasm: 2x h04 -= t0
  8018. # asm 1: vsub.i64 >h04=reg128#3,<h04=reg128#3,<t0=reg128#10
  8019. # asm 2: vsub.i64 >h04=q2,<h04=q2,<t0=q9
  8020. vsub.i64 q2,q2,q9
  8021. # qhasm: 2x c = t1 signed>> 25
  8022. # asm 1: vshr.s64 >c=reg128#10,<t1=reg128#12,#25
  8023. # asm 2: vshr.s64 >c=q9,<t1=q11,#25
  8024. vshr.s64 q9,q11,#25
  8025. # qhasm: h48 = h04[2,3]h48[2,3]
  8026. # asm 1: vext.32 <h48=reg128#7%bot,<h04=reg128#3%top,<h04=reg128#3%bot,#0
  8027. # asm 2: vext.32 <h48=d12,<h04=d5,<h04=d4,#0
  8028. vext.32 d12,d5,d4,#0
  8029. # qhasm: t1 &= mask25
  8030. # asm 1: vand >t1=reg128#12,<t1=reg128#12,<mask25=reg128#5
  8031. # asm 2: vand >t1=q11,<t1=q11,<mask25=q4
  8032. vand q11,q11,q4
  8033. # qhasm: 2x h26 += c
  8034. # asm 1: vadd.i64 >h26=reg128#1,<h26=reg128#1,<c=reg128#10
  8035. # asm 2: vadd.i64 >h26=q0,<h26=q0,<c=q9
  8036. vadd.i64 q0,q0,q9
  8037. # qhasm: new h59
  8038. # qhasm: h59 = h59[0]h19[1]
  8039. # asm 1: vmov <h59=reg128#10%top,<h19=reg128#4%top
  8040. # asm 2: vmov <h59=d19,<h19=d7
  8041. vmov d19,d7
  8042. # qhasm: 2x t0 = h26 + _0x2000000
  8043. # asm 1: vadd.i64 >t0=reg128#4,<h26=reg128#1,<_0x2000000=reg128#8
  8044. # asm 2: vadd.i64 >t0=q3,<h26=q0,<_0x2000000=q7
  8045. vadd.i64 q3,q0,q7
  8046. # qhasm: 2x h15 -= t1
  8047. # asm 1: vsub.i64 >h15=reg128#6,<h15=reg128#6,<t1=reg128#12
  8048. # asm 2: vsub.i64 >h15=q5,<h15=q5,<t1=q11
  8049. vsub.i64 q5,q5,q11
  8050. # qhasm: 2x c = t0 signed>> 26
  8051. # asm 1: vshr.s64 >c=reg128#12,<t0=reg128#4,#26
  8052. # asm 2: vshr.s64 >c=q11,<t0=q3,#26
  8053. vshr.s64 q11,q3,#26
  8054. # qhasm: h59 = h15[2,3]h59[2,3]
  8055. # asm 1: vext.32 <h59=reg128#10%bot,<h15=reg128#6%top,<h15=reg128#6%bot,#0
  8056. # asm 2: vext.32 <h59=d18,<h15=d11,<h15=d10,#0
  8057. vext.32 d18,d11,d10,#0
  8058. # qhasm: t0 &= mask26
  8059. # asm 1: vand >t0=reg128#4,<t0=reg128#4,<mask26=reg128#2
  8060. # asm 2: vand >t0=q3,<t0=q3,<mask26=q1
  8061. vand q3,q3,q1
  8062. # qhasm: 2x h37 += c
  8063. # asm 1: vadd.i64 >h37=reg128#9,<h37=reg128#9,<c=reg128#12
  8064. # asm 2: vadd.i64 >h37=q8,<h37=q8,<c=q11
  8065. vadd.i64 q8,q8,q11
  8066. # qhasm: 2x t1 = h37 + _0x1000000
  8067. # asm 1: vadd.i64 >t1=reg128#12,<h37=reg128#9,<_0x1000000=reg128#11
  8068. # asm 2: vadd.i64 >t1=q11,<h37=q8,<_0x1000000=q10
  8069. vadd.i64 q11,q8,q10
  8070. # qhasm: 2x h26 -= t0
  8071. # asm 1: vsub.i64 >h26=reg128#1,<h26=reg128#1,<t0=reg128#4
  8072. # asm 2: vsub.i64 >h26=q0,<h26=q0,<t0=q3
  8073. vsub.i64 q0,q0,q3
  8074. # qhasm: 2x c = t1 signed>> 25
  8075. # asm 1: vshr.s64 >c=reg128#4,<t1=reg128#12,#25
  8076. # asm 2: vshr.s64 >c=q3,<t1=q11,#25
  8077. vshr.s64 q3,q11,#25
  8078. # qhasm: t1 &= mask25
  8079. # asm 1: vand >t1=reg128#12,<t1=reg128#12,<mask25=reg128#5
  8080. # asm 2: vand >t1=q11,<t1=q11,<mask25=q4
  8081. vand q11,q11,q4
  8082. # qhasm: 2x h48 += c
  8083. # asm 1: vadd.i64 >h48=reg128#4,<h48=reg128#7,<c=reg128#4
  8084. # asm 2: vadd.i64 >h48=q3,<h48=q6,<c=q3
  8085. vadd.i64 q3,q6,q3
  8086. # qhasm: 2x t0 = h48 + _0x2000000
  8087. # asm 1: vadd.i64 >t0=reg128#7,<h48=reg128#4,<_0x2000000=reg128#8
  8088. # asm 2: vadd.i64 >t0=q6,<h48=q3,<_0x2000000=q7
  8089. vadd.i64 q6,q3,q7
  8090. # qhasm: 2x h37 -= t1
  8091. # asm 1: vsub.i64 >h37=reg128#9,<h37=reg128#9,<t1=reg128#12
  8092. # asm 2: vsub.i64 >h37=q8,<h37=q8,<t1=q11
  8093. vsub.i64 q8,q8,q11
  8094. # qhasm: 2x c = t0 signed>> 26
  8095. # asm 1: vshr.s64 >c=reg128#12,<t0=reg128#7,#26
  8096. # asm 2: vshr.s64 >c=q11,<t0=q6,#26
  8097. vshr.s64 q11,q6,#26
  8098. # qhasm: t0 &= mask26
  8099. # asm 1: vand >t0=reg128#7,<t0=reg128#7,<mask26=reg128#2
  8100. # asm 2: vand >t0=q6,<t0=q6,<mask26=q1
  8101. vand q6,q6,q1
  8102. # qhasm: 2x h59 += c
  8103. # asm 1: vadd.i64 >h59=reg128#10,<h59=reg128#10,<c=reg128#12
  8104. # asm 2: vadd.i64 >h59=q9,<h59=q9,<c=q11
  8105. vadd.i64 q9,q9,q11
  8106. # qhasm: new t
  8107. # qhasm: t = t[0], h59[1] + _0x1000000[1]
  8108. # asm 1: vadd.i64 <t=reg128#13%top,<h59=reg128#10%top,<_0x1000000=reg128#11%top
  8109. # asm 2: vadd.i64 <t=d25,<h59=d19,<_0x1000000=d21
  8110. vadd.i64 d25,d19,d21
  8111. # qhasm: 2x h48 -= t0
  8112. # asm 1: vsub.i64 >h48=reg128#4,<h48=reg128#4,<t0=reg128#7
  8113. # asm 2: vsub.i64 >h48=q3,<h48=q3,<t0=q6
  8114. vsub.i64 q3,q3,q6
  8115. # qhasm: c = c[0],t[1] signed>> 25
  8116. # asm 1: vshr.s64 <c=reg128#12%top,<t=reg128#13%top,#25
  8117. # asm 2: vshr.s64 <c=d23,<t=d25,#25
  8118. vshr.s64 d23,d25,#25
  8119. # qhasm: t &= mask25
  8120. # asm 1: vand >t=reg128#5,<t=reg128#13,<mask25=reg128#5
  8121. # asm 2: vand >t=q4,<t=q12,<mask25=q4
  8122. vand q4,q12,q4
  8123. # qhasm: new s2
  8124. # qhasm: s2 = s2[0],c[1] + c[1]
  8125. # asm 1: vadd.i64 <s2=reg128#11%top,<c=reg128#12%top,<c=reg128#12%top
  8126. # asm 2: vadd.i64 <s2=d21,<c=d23,<c=d23
  8127. vadd.i64 d21,d23,d23
  8128. # qhasm: new s
  8129. # qhasm: s = s[0],c[1] << 4
  8130. # asm 1: vshl.i64 <s=reg128#13%top,<c=reg128#12%top,#4
  8131. # asm 2: vshl.i64 <s=d25,<c=d23,#4
  8132. vshl.i64 d25,d23,#4
  8133. # qhasm: s2 = s2[0],s2[1] + c[1]
  8134. # asm 1: vadd.i64 <s2=reg128#11%top,<s2=reg128#11%top,<c=reg128#12%top
  8135. # asm 2: vadd.i64 <s2=d21,<s2=d21,<c=d23
  8136. vadd.i64 d21,d21,d23
  8137. # qhasm: s = s[0],s[1] + s2[1]
  8138. # asm 1: vadd.i64 <s=reg128#13%top,<s=reg128#13%top,<s2=reg128#11%top
  8139. # asm 2: vadd.i64 <s=d25,<s=d25,<s2=d21
  8140. vadd.i64 d25,d25,d21
  8141. # qhasm: h04 = h04[0] + s[1],h04[1]
  8142. # asm 1: vadd.i64 <h04=reg128#3%bot,<h04=reg128#3%bot,<s=reg128#13%top
  8143. # asm 2: vadd.i64 <h04=d4,<h04=d4,<s=d25
  8144. vadd.i64 d4,d4,d25
  8145. # qhasm: h26[0,1,2,3] h37[0,1,2,3] = h26[0]h37[0]h26[1]h37[1] h26[2]h37[2]h26[3]h37[3]
  8146. # asm 1: vzip.i32 <h26=reg128#1,<h37=reg128#9
  8147. # asm 2: vzip.i32 <h26=q0,<h37=q8
  8148. vzip.i32 q0,q8
  8149. # qhasm: t0 = h04[0] + _0x2000000[0],t0[1]
  8150. # asm 1: vadd.i64 <t0=reg128#7%bot,<h04=reg128#3%bot,<_0x2000000=reg128#8%bot
  8151. # asm 2: vadd.i64 <t0=d12,<h04=d4,<_0x2000000=d14
  8152. vadd.i64 d12,d4,d14
  8153. # qhasm: posh += 8
  8154. # asm 1: add >posh=int32#3,<posh=int32#7,#8
  8155. # asm 2: add >posh=r2,<posh=r6,#8
  8156. add r2,r6,#8
  8157. # qhasm: mem64[posh] aligned= h26[0]
  8158. # asm 1: vst1.8 <h26=reg128#1%bot,[<posh=int32#3,: 64]
  8159. # asm 2: vst1.8 <h26=d0,[<posh=r2,: 64]
  8160. vst1.8 d0,[r2,: 64]
  8161. # qhasm: h59 = h59[0],h59[1] - t[1]
  8162. # asm 1: vsub.i64 <h59=reg128#10%top,<h59=reg128#10%top,<t=reg128#5%top
  8163. # asm 2: vsub.i64 <h59=d19,<h59=d19,<t=d9
  8164. vsub.i64 d19,d19,d9
  8165. # qhasm: posh += 16
  8166. # asm 1: add >posh=int32#3,<posh=int32#3,#16
  8167. # asm 2: add >posh=r2,<posh=r2,#16
  8168. add r2,r2,#16
  8169. # qhasm: mem64[posh] aligned= h37[0]
  8170. # asm 1: vst1.8 <h37=reg128#9%bot,[<posh=int32#3,: 64]
  8171. # asm 2: vst1.8 <h37=d16,[<posh=r2,: 64]
  8172. vst1.8 d16,[r2,: 64]
  8173. # qhasm: c = t0[0] signed>> 26,c[1]
  8174. # asm 1: vshr.s64 <c=reg128#12%bot,<t0=reg128#7%bot,#26
  8175. # asm 2: vshr.s64 <c=d22,<t0=d12,#26
  8176. vshr.s64 d22,d12,#26
  8177. # qhasm: t0 &= mask26
  8178. # asm 1: vand >t0=reg128#1,<t0=reg128#7,<mask26=reg128#2
  8179. # asm 2: vand >t0=q0,<t0=q6,<mask26=q1
  8180. vand q0,q6,q1
  8181. # qhasm: h15 = h15[0] + c[0],h15[1]
  8182. # asm 1: vadd.i64 <h15=reg128#6%bot,<h15=reg128#6%bot,<c=reg128#12%bot
  8183. # asm 2: vadd.i64 <h15=d10,<h15=d10,<c=d22
  8184. vadd.i64 d10,d10,d22
  8185. # qhasm: h48[0,1,2,3] h59[0,1,2,3] = h48[0]h59[0]h48[1]h59[1] h48[2]h59[2]h48[3]h59[3]
  8186. # asm 1: vzip.i32 <h48=reg128#4,<h59=reg128#10
  8187. # asm 2: vzip.i32 <h48=q3,<h59=q9
  8188. vzip.i32 q3,q9
  8189. # qhasm: h04 = h04[0] - t0[0],h04[1]
  8190. # asm 1: vsub.i64 <h04=reg128#3%bot,<h04=reg128#3%bot,<t0=reg128#1%bot
  8191. # asm 2: vsub.i64 <h04=d4,<h04=d4,<t0=d0
  8192. vsub.i64 d4,d4,d0
  8193. # qhasm: posh -= 8
  8194. # asm 1: sub >posh=int32#3,<posh=int32#3,#8
  8195. # asm 2: sub >posh=r2,<posh=r2,#8
  8196. sub r2,r2,#8
  8197. # qhasm: mem64[posh] aligned= h48[0]
  8198. # asm 1: vst1.8 <h48=reg128#4%bot,[<posh=int32#3,: 64]
  8199. # asm 2: vst1.8 <h48=d6,[<posh=r2,: 64]
  8200. vst1.8 d6,[r2,: 64]
  8201. # qhasm: posh += 16
  8202. # asm 1: add >posh=int32#3,<posh=int32#3,#16
  8203. # asm 2: add >posh=r2,<posh=r2,#16
  8204. add r2,r2,#16
  8205. # qhasm: mem64[posh] aligned= h59[0]
  8206. # asm 1: vst1.8 <h59=reg128#10%bot,[<posh=int32#3,: 64]
  8207. # asm 2: vst1.8 <h59=d18,[<posh=r2,: 64]
  8208. vst1.8 d18,[r2,: 64]
  8209. # qhasm: h04[0,1,2,3] h15[0,1,2,3] = h04[0]h15[0]h04[1]h15[1] h04[2]h15[2]h04[3]h15[3]
  8210. # asm 1: vzip.i32 <h04=reg128#3,<h15=reg128#6
  8211. # asm 2: vzip.i32 <h04=q2,<h15=q5
  8212. vzip.i32 q2,q5
  8213. # qhasm: posh -= 32
  8214. # asm 1: sub >posh=int32#3,<posh=int32#3,#32
  8215. # asm 2: sub >posh=r2,<posh=r2,#32
  8216. sub r2,r2,#32
  8217. # qhasm: mem64[posh] aligned= h04[0]
  8218. # asm 1: vst1.8 <h04=reg128#3%bot,[<posh=int32#3,: 64]
  8219. # asm 2: vst1.8 <h04=d4,[<posh=r2,: 64]
  8220. vst1.8 d4,[r2,: 64]
  8221. # qhasm: =? postcopy - 0
  8222. # asm 1: cmp <postcopy=int32#5,#0
  8223. # asm 2: cmp <postcopy=r4,#0
  8224. cmp r4,#0
  8225. # qhasm: goto skippostcopy if =
  8226. beq ._skippostcopy
  8227. # qhasm: posy = playground1_ptr + 144
  8228. # asm 1: add >posy=int32#3,<playground1_ptr=int32#4,#144
  8229. # asm 2: add >posy=r2,<playground1_ptr=r3,#144
  8230. add r2,r3,#144
  8231. # qhasm: posx = postcopy
  8232. # asm 1: mov >posx=int32#5,<postcopy=int32#5
  8233. # asm 2: mov >posx=r4,<postcopy=r4
  8234. mov r4,r4
  8235. # qhasm: f0 aligned= mem128[posy];posy += 16
  8236. # asm 1: vld1.8 {>f0=reg128#1%bot->f0=reg128#1%top},[<posy=int32#3,: 128]!
  8237. # asm 2: vld1.8 {>f0=d0->f0=d1},[<posy=r2,: 128]!
  8238. vld1.8 {d0-d1},[r2,: 128]!
  8239. # qhasm: f4 aligned= mem128[posy];posy += 16
  8240. # asm 1: vld1.8 {>f4=reg128#2%bot->f4=reg128#2%top},[<posy=int32#3,: 128]!
  8241. # asm 2: vld1.8 {>f4=d2->f4=d3},[<posy=r2,: 128]!
  8242. vld1.8 {d2-d3},[r2,: 128]!
  8243. # qhasm: new f8
  8244. # qhasm: f8 aligned= mem64[posy] f8[1]
  8245. # asm 1: vld1.8 {<f8=reg128#3%bot},[<posy=int32#3,: 64]
  8246. # asm 2: vld1.8 {<f8=d4},[<posy=r2,: 64]
  8247. vld1.8 {d4},[r2,: 64]
  8248. # qhasm: mem128[posx] aligned= f0;posx += 16
  8249. # asm 1: vst1.8 {<f0=reg128#1%bot-<f0=reg128#1%top},[<posx=int32#5,: 128]!
  8250. # asm 2: vst1.8 {<f0=d0-<f0=d1},[<posx=r4,: 128]!
  8251. vst1.8 {d0-d1},[r4,: 128]!
  8252. # qhasm: mem128[posx] aligned= f4;posx += 16
  8253. # asm 1: vst1.8 {<f4=reg128#2%bot-<f4=reg128#2%top},[<posx=int32#5,: 128]!
  8254. # asm 2: vst1.8 {<f4=d2-<f4=d3},[<posx=r4,: 128]!
  8255. vst1.8 {d2-d3},[r4,: 128]!
  8256. # qhasm: mem64[posx] aligned= f8[0]
  8257. # asm 1: vst1.8 <f8=reg128#3%bot,[<posx=int32#5,: 64]
  8258. # asm 2: vst1.8 <f8=d4,[<posx=r4,: 64]
  8259. vst1.8 d4,[r4,: 64]
  8260. # qhasm: skippostcopy:
  8261. ._skippostcopy:
  8262. # qhasm: =? i - 1
  8263. # asm 1: cmp <i=int32#2,#1
  8264. # asm 2: cmp <i=r1,#1
  8265. cmp r1,#1
  8266. # qhasm: goto skipfinalcopy if !=
  8267. bne ._skipfinalcopy
  8268. # qhasm: posy = playground1_ptr + 288
  8269. # asm 1: add >posy=int32#3,<playground1_ptr=int32#4,#288
  8270. # asm 2: add >posy=r2,<playground1_ptr=r3,#288
  8271. add r2,r3,#288
  8272. # qhasm: posx = playground1_ptr + 144
  8273. # asm 1: add >posx=int32#5,<playground1_ptr=int32#4,#144
  8274. # asm 2: add >posx=r4,<playground1_ptr=r3,#144
  8275. add r4,r3,#144
  8276. # qhasm: f0 aligned= mem128[posy];posy += 16
  8277. # asm 1: vld1.8 {>f0=reg128#1%bot->f0=reg128#1%top},[<posy=int32#3,: 128]!
  8278. # asm 2: vld1.8 {>f0=d0->f0=d1},[<posy=r2,: 128]!
  8279. vld1.8 {d0-d1},[r2,: 128]!
  8280. # qhasm: f4 aligned= mem128[posy];posy += 16
  8281. # asm 1: vld1.8 {>f4=reg128#2%bot->f4=reg128#2%top},[<posy=int32#3,: 128]!
  8282. # asm 2: vld1.8 {>f4=d2->f4=d3},[<posy=r2,: 128]!
  8283. vld1.8 {d2-d3},[r2,: 128]!
  8284. # qhasm: new f8
  8285. # qhasm: f8 aligned= mem64[posy] f8[1]
  8286. # asm 1: vld1.8 {<f8=reg128#3%bot},[<posy=int32#3,: 64]
  8287. # asm 2: vld1.8 {<f8=d4},[<posy=r2,: 64]
  8288. vld1.8 {d4},[r2,: 64]
  8289. # qhasm: mem128[posx] aligned= f0;posx += 16
  8290. # asm 1: vst1.8 {<f0=reg128#1%bot-<f0=reg128#1%top},[<posx=int32#5,: 128]!
  8291. # asm 2: vst1.8 {<f0=d0-<f0=d1},[<posx=r4,: 128]!
  8292. vst1.8 {d0-d1},[r4,: 128]!
  8293. # qhasm: mem128[posx] aligned= f4;posx += 16
  8294. # asm 1: vst1.8 {<f4=reg128#2%bot-<f4=reg128#2%top},[<posx=int32#5,: 128]!
  8295. # asm 2: vst1.8 {<f4=d2-<f4=d3},[<posx=r4,: 128]!
  8296. vst1.8 {d2-d3},[r4,: 128]!
  8297. # qhasm: mem64[posx] aligned= f8[0]
  8298. # asm 1: vst1.8 <f8=reg128#3%bot,[<posx=int32#5,: 64]
  8299. # asm 2: vst1.8 <f8=d4,[<posx=r4,: 64]
  8300. vst1.8 d4,[r4,: 64]
  8301. # qhasm: skipfinalcopy:
  8302. ._skipfinalcopy:
  8303. # qhasm: i += 1
  8304. # asm 1: add >i=int32#2,<i=int32#2,#1
  8305. # asm 2: add >i=r1,<i=r1,#1
  8306. add r1,r1,#1
  8307. # qhasm: unsigned<? i - 12
  8308. # asm 1: cmp <i=int32#2,#12
  8309. # asm 2: cmp <i=r1,#12
  8310. cmp r1,#12
  8311. # qhasm: goto invertloop if unsigned<
  8312. blo ._invertloop
  8313. # qhasm: posf = playground1_ptr + 144
  8314. # asm 1: add >posf=int32#2,<playground1_ptr=int32#4,#144
  8315. # asm 2: add >posf=r1,<playground1_ptr=r3,#144
  8316. add r1,r3,#144
  8317. # qhasm: out0 = mem32[posf];posf += 4
  8318. # asm 1: ldr >out0=int32#3,[<posf=int32#2],#4
  8319. # asm 2: ldr >out0=r2,[<posf=r1],#4
  8320. ldr r2,[r1],#4
  8321. # qhasm: out1 = mem32[posf];posf += 4
  8322. # asm 1: ldr >out1=int32#4,[<posf=int32#2],#4
  8323. # asm 2: ldr >out1=r3,[<posf=r1],#4
  8324. ldr r3,[r1],#4
  8325. # qhasm: out2 = mem32[posf];posf += 4
  8326. # asm 1: ldr >out2=int32#5,[<posf=int32#2],#4
  8327. # asm 2: ldr >out2=r4,[<posf=r1],#4
  8328. ldr r4,[r1],#4
  8329. # qhasm: out3 = mem32[posf];posf += 4
  8330. # asm 1: ldr >out3=int32#6,[<posf=int32#2],#4
  8331. # asm 2: ldr >out3=r5,[<posf=r1],#4
  8332. ldr r5,[r1],#4
  8333. # qhasm: out4 = mem32[posf];posf += 4
  8334. # asm 1: ldr >out4=int32#7,[<posf=int32#2],#4
  8335. # asm 2: ldr >out4=r6,[<posf=r1],#4
  8336. ldr r6,[r1],#4
  8337. # qhasm: out5 = mem32[posf];posf += 4
  8338. # asm 1: ldr >out5=int32#8,[<posf=int32#2],#4
  8339. # asm 2: ldr >out5=r7,[<posf=r1],#4
  8340. ldr r7,[r1],#4
  8341. # qhasm: out6 = mem32[posf];posf += 4
  8342. # asm 1: ldr >out6=int32#9,[<posf=int32#2],#4
  8343. # asm 2: ldr >out6=r8,[<posf=r1],#4
  8344. ldr r8,[r1],#4
  8345. # qhasm: out7 = mem32[posf];posf += 4
  8346. # asm 1: ldr >out7=int32#10,[<posf=int32#2],#4
  8347. # asm 2: ldr >out7=r9,[<posf=r1],#4
  8348. ldr r9,[r1],#4
  8349. # qhasm: out8 = mem32[posf];posf += 4
  8350. # asm 1: ldr >out8=int32#11,[<posf=int32#2],#4
  8351. # asm 2: ldr >out8=r10,[<posf=r1],#4
  8352. ldr r10,[r1],#4
  8353. # qhasm: out9 = mem32[posf]
  8354. # asm 1: ldr >out9=int32#2,[<posf=int32#2]
  8355. # asm 2: ldr >out9=r1,[<posf=r1]
  8356. ldr r1,[r1]
  8357. # qhasm: carry = out9 + (out9 << 4)
  8358. # asm 1: add >carry=int32#12,<out9=int32#2,<out9=int32#2,LSL #4
  8359. # asm 2: add >carry=r11,<out9=r1,<out9=r1,LSL #4
  8360. add r11,r1,r1,LSL #4
  8361. # qhasm: carry = carry + (out9 << 1)
  8362. # asm 1: add >carry=int32#12,<carry=int32#12,<out9=int32#2,LSL #1
  8363. # asm 2: add >carry=r11,<carry=r11,<out9=r1,LSL #1
  8364. add r11,r11,r1,LSL #1
  8365. # qhasm: carry += 16777216
  8366. # asm 1: add >carry=int32#12,<carry=int32#12,#16777216
  8367. # asm 2: add >carry=r11,<carry=r11,#16777216
  8368. add r11,r11,#16777216
  8369. # qhasm: carry signed>>= 25
  8370. # asm 1: mov >carry=int32#12,<carry=int32#12,ASR #25
  8371. # asm 2: mov >carry=r11,<carry=r11,ASR #25
  8372. mov r11,r11,ASR #25
  8373. # qhasm: carry += out0
  8374. # asm 1: add >carry=int32#12,<carry=int32#12,<out0=int32#3
  8375. # asm 2: add >carry=r11,<carry=r11,<out0=r2
  8376. add r11,r11,r2
  8377. # qhasm: carry signed>>= 26
  8378. # asm 1: mov >carry=int32#12,<carry=int32#12,ASR #26
  8379. # asm 2: mov >carry=r11,<carry=r11,ASR #26
  8380. mov r11,r11,ASR #26
  8381. # qhasm: carry += out1
  8382. # asm 1: add >carry=int32#12,<carry=int32#12,<out1=int32#4
  8383. # asm 2: add >carry=r11,<carry=r11,<out1=r3
  8384. add r11,r11,r3
  8385. # qhasm: carry signed>>= 25
  8386. # asm 1: mov >carry=int32#12,<carry=int32#12,ASR #25
  8387. # asm 2: mov >carry=r11,<carry=r11,ASR #25
  8388. mov r11,r11,ASR #25
  8389. # qhasm: carry += out2
  8390. # asm 1: add >carry=int32#12,<carry=int32#12,<out2=int32#5
  8391. # asm 2: add >carry=r11,<carry=r11,<out2=r4
  8392. add r11,r11,r4
  8393. # qhasm: carry signed>>= 26
  8394. # asm 1: mov >carry=int32#12,<carry=int32#12,ASR #26
  8395. # asm 2: mov >carry=r11,<carry=r11,ASR #26
  8396. mov r11,r11,ASR #26
  8397. # qhasm: carry += out3
  8398. # asm 1: add >carry=int32#12,<carry=int32#12,<out3=int32#6
  8399. # asm 2: add >carry=r11,<carry=r11,<out3=r5
  8400. add r11,r11,r5
  8401. # qhasm: carry signed>>= 25
  8402. # asm 1: mov >carry=int32#12,<carry=int32#12,ASR #25
  8403. # asm 2: mov >carry=r11,<carry=r11,ASR #25
  8404. mov r11,r11,ASR #25
  8405. # qhasm: carry += out4
  8406. # asm 1: add >carry=int32#12,<carry=int32#12,<out4=int32#7
  8407. # asm 2: add >carry=r11,<carry=r11,<out4=r6
  8408. add r11,r11,r6
  8409. # qhasm: carry signed>>= 26
  8410. # asm 1: mov >carry=int32#12,<carry=int32#12,ASR #26
  8411. # asm 2: mov >carry=r11,<carry=r11,ASR #26
  8412. mov r11,r11,ASR #26
  8413. # qhasm: carry += out5
  8414. # asm 1: add >carry=int32#12,<carry=int32#12,<out5=int32#8
  8415. # asm 2: add >carry=r11,<carry=r11,<out5=r7
  8416. add r11,r11,r7
  8417. # qhasm: carry signed>>= 25
  8418. # asm 1: mov >carry=int32#12,<carry=int32#12,ASR #25
  8419. # asm 2: mov >carry=r11,<carry=r11,ASR #25
  8420. mov r11,r11,ASR #25
  8421. # qhasm: carry += out6
  8422. # asm 1: add >carry=int32#12,<carry=int32#12,<out6=int32#9
  8423. # asm 2: add >carry=r11,<carry=r11,<out6=r8
  8424. add r11,r11,r8
  8425. # qhasm: carry signed>>= 26
  8426. # asm 1: mov >carry=int32#12,<carry=int32#12,ASR #26
  8427. # asm 2: mov >carry=r11,<carry=r11,ASR #26
  8428. mov r11,r11,ASR #26
  8429. # qhasm: carry += out7
  8430. # asm 1: add >carry=int32#12,<carry=int32#12,<out7=int32#10
  8431. # asm 2: add >carry=r11,<carry=r11,<out7=r9
  8432. add r11,r11,r9
  8433. # qhasm: carry signed>>= 25
  8434. # asm 1: mov >carry=int32#12,<carry=int32#12,ASR #25
  8435. # asm 2: mov >carry=r11,<carry=r11,ASR #25
  8436. mov r11,r11,ASR #25
  8437. # qhasm: carry += out8
  8438. # asm 1: add >carry=int32#12,<carry=int32#12,<out8=int32#11
  8439. # asm 2: add >carry=r11,<carry=r11,<out8=r10
  8440. add r11,r11,r10
  8441. # qhasm: carry signed>>= 26
  8442. # asm 1: mov >carry=int32#12,<carry=int32#12,ASR #26
  8443. # asm 2: mov >carry=r11,<carry=r11,ASR #26
  8444. mov r11,r11,ASR #26
  8445. # qhasm: carry += out9
  8446. # asm 1: add >carry=int32#12,<carry=int32#12,<out9=int32#2
  8447. # asm 2: add >carry=r11,<carry=r11,<out9=r1
  8448. add r11,r11,r1
  8449. # qhasm: carry signed>>= 25
  8450. # asm 1: mov >carry=int32#12,<carry=int32#12,ASR #25
  8451. # asm 2: mov >carry=r11,<carry=r11,ASR #25
  8452. mov r11,r11,ASR #25
  8453. # qhasm: out0 += carry
  8454. # asm 1: add >out0=int32#3,<out0=int32#3,<carry=int32#12
  8455. # asm 2: add >out0=r2,<out0=r2,<carry=r11
  8456. add r2,r2,r11
  8457. # qhasm: out0 += (carry << 1)
  8458. # asm 1: add >out0=int32#3,<out0=int32#3,<carry=int32#12,LSL #1
  8459. # asm 2: add >out0=r2,<out0=r2,<carry=r11,LSL #1
  8460. add r2,r2,r11,LSL #1
  8461. # qhasm: out0 += (carry << 4)
  8462. # asm 1: add >out0=int32#3,<out0=int32#3,<carry=int32#12,LSL #4
  8463. # asm 2: add >out0=r2,<out0=r2,<carry=r11,LSL #4
  8464. add r2,r2,r11,LSL #4
  8465. # qhasm: carry0 = (out0 signed>> 26)
  8466. # asm 1: mov >carry0=int32#12,<out0=int32#3,ASR #26
  8467. # asm 2: mov >carry0=r11,<out0=r2,ASR #26
  8468. mov r11,r2,ASR #26
  8469. # qhasm: out1 += carry0
  8470. # asm 1: add >out1=int32#4,<out1=int32#4,<carry0=int32#12
  8471. # asm 2: add >out1=r3,<out1=r3,<carry0=r11
  8472. add r3,r3,r11
  8473. # qhasm: out0 -= (carry0 << 26)
  8474. # asm 1: sub >out0=int32#3,<out0=int32#3,<carry0=int32#12,LSL #26
  8475. # asm 2: sub >out0=r2,<out0=r2,<carry0=r11,LSL #26
  8476. sub r2,r2,r11,LSL #26
  8477. # qhasm: carry1 = (out1 signed>> 25)
  8478. # asm 1: mov >carry1=int32#12,<out1=int32#4,ASR #25
  8479. # asm 2: mov >carry1=r11,<out1=r3,ASR #25
  8480. mov r11,r3,ASR #25
  8481. # qhasm: out2 += carry1
  8482. # asm 1: add >out2=int32#5,<out2=int32#5,<carry1=int32#12
  8483. # asm 2: add >out2=r4,<out2=r4,<carry1=r11
  8484. add r4,r4,r11
  8485. # qhasm: out1 -= (carry1 << 25)
  8486. # asm 1: sub >out1=int32#4,<out1=int32#4,<carry1=int32#12,LSL #25
  8487. # asm 2: sub >out1=r3,<out1=r3,<carry1=r11,LSL #25
  8488. sub r3,r3,r11,LSL #25
  8489. # qhasm: carry2 = (out2 signed>> 26)
  8490. # asm 1: mov >carry2=int32#12,<out2=int32#5,ASR #26
  8491. # asm 2: mov >carry2=r11,<out2=r4,ASR #26
  8492. mov r11,r4,ASR #26
  8493. # qhasm: out3 += carry2
  8494. # asm 1: add >out3=int32#6,<out3=int32#6,<carry2=int32#12
  8495. # asm 2: add >out3=r5,<out3=r5,<carry2=r11
  8496. add r5,r5,r11
  8497. # qhasm: out2 -= (carry2 << 26)
  8498. # asm 1: sub >out2=int32#5,<out2=int32#5,<carry2=int32#12,LSL #26
  8499. # asm 2: sub >out2=r4,<out2=r4,<carry2=r11,LSL #26
  8500. sub r4,r4,r11,LSL #26
  8501. # qhasm: carry3 = (out3 signed>> 25)
  8502. # asm 1: mov >carry3=int32#12,<out3=int32#6,ASR #25
  8503. # asm 2: mov >carry3=r11,<out3=r5,ASR #25
  8504. mov r11,r5,ASR #25
  8505. # qhasm: out4 += carry3
  8506. # asm 1: add >out4=int32#7,<out4=int32#7,<carry3=int32#12
  8507. # asm 2: add >out4=r6,<out4=r6,<carry3=r11
  8508. add r6,r6,r11
  8509. # qhasm: out3 -= (carry3 << 25)
  8510. # asm 1: sub >out3=int32#6,<out3=int32#6,<carry3=int32#12,LSL #25
  8511. # asm 2: sub >out3=r5,<out3=r5,<carry3=r11,LSL #25
  8512. sub r5,r5,r11,LSL #25
  8513. # qhasm: carry4 = (out4 signed>> 26)
  8514. # asm 1: mov >carry4=int32#12,<out4=int32#7,ASR #26
  8515. # asm 2: mov >carry4=r11,<out4=r6,ASR #26
  8516. mov r11,r6,ASR #26
  8517. # qhasm: out5 += carry4
  8518. # asm 1: add >out5=int32#8,<out5=int32#8,<carry4=int32#12
  8519. # asm 2: add >out5=r7,<out5=r7,<carry4=r11
  8520. add r7,r7,r11
  8521. # qhasm: out4 -= (carry4 << 26)
  8522. # asm 1: sub >out4=int32#7,<out4=int32#7,<carry4=int32#12,LSL #26
  8523. # asm 2: sub >out4=r6,<out4=r6,<carry4=r11,LSL #26
  8524. sub r6,r6,r11,LSL #26
  8525. # qhasm: carry5 = (out5 signed>> 25)
  8526. # asm 1: mov >carry5=int32#12,<out5=int32#8,ASR #25
  8527. # asm 2: mov >carry5=r11,<out5=r7,ASR #25
  8528. mov r11,r7,ASR #25
  8529. # qhasm: out6 += carry5
  8530. # asm 1: add >out6=int32#9,<out6=int32#9,<carry5=int32#12
  8531. # asm 2: add >out6=r8,<out6=r8,<carry5=r11
  8532. add r8,r8,r11
  8533. # qhasm: out5 -= (carry5 << 25)
  8534. # asm 1: sub >out5=int32#8,<out5=int32#8,<carry5=int32#12,LSL #25
  8535. # asm 2: sub >out5=r7,<out5=r7,<carry5=r11,LSL #25
  8536. sub r7,r7,r11,LSL #25
  8537. # qhasm: carry6 = (out6 signed>> 26)
  8538. # asm 1: mov >carry6=int32#12,<out6=int32#9,ASR #26
  8539. # asm 2: mov >carry6=r11,<out6=r8,ASR #26
  8540. mov r11,r8,ASR #26
  8541. # qhasm: out7 += carry6
  8542. # asm 1: add >out7=int32#10,<out7=int32#10,<carry6=int32#12
  8543. # asm 2: add >out7=r9,<out7=r9,<carry6=r11
  8544. add r9,r9,r11
  8545. # qhasm: out6 -= (carry6 << 26)
  8546. # asm 1: sub >out6=int32#9,<out6=int32#9,<carry6=int32#12,LSL #26
  8547. # asm 2: sub >out6=r8,<out6=r8,<carry6=r11,LSL #26
  8548. sub r8,r8,r11,LSL #26
  8549. # qhasm: carry7 = (out7 signed>> 25)
  8550. # asm 1: mov >carry7=int32#12,<out7=int32#10,ASR #25
  8551. # asm 2: mov >carry7=r11,<out7=r9,ASR #25
  8552. mov r11,r9,ASR #25
  8553. # qhasm: out8 += carry7
  8554. # asm 1: add >out8=int32#11,<out8=int32#11,<carry7=int32#12
  8555. # asm 2: add >out8=r10,<out8=r10,<carry7=r11
  8556. add r10,r10,r11
  8557. # qhasm: out7 -= (carry7 << 25)
  8558. # asm 1: sub >out7=int32#10,<out7=int32#10,<carry7=int32#12,LSL #25
  8559. # asm 2: sub >out7=r9,<out7=r9,<carry7=r11,LSL #25
  8560. sub r9,r9,r11,LSL #25
  8561. # qhasm: carry8 = (out8 signed>> 26)
  8562. # asm 1: mov >carry8=int32#12,<out8=int32#11,ASR #26
  8563. # asm 2: mov >carry8=r11,<out8=r10,ASR #26
  8564. mov r11,r10,ASR #26
  8565. # qhasm: out9 += carry8
  8566. # asm 1: add >out9=int32#2,<out9=int32#2,<carry8=int32#12
  8567. # asm 2: add >out9=r1,<out9=r1,<carry8=r11
  8568. add r1,r1,r11
  8569. # qhasm: out8 -= (carry8 << 26)
  8570. # asm 1: sub >out8=int32#11,<out8=int32#11,<carry8=int32#12,LSL #26
  8571. # asm 2: sub >out8=r10,<out8=r10,<carry8=r11,LSL #26
  8572. sub r10,r10,r11,LSL #26
  8573. # qhasm: carry9 = (out9 signed>> 25)
  8574. # asm 1: mov >carry9=int32#12,<out9=int32#2,ASR #25
  8575. # asm 2: mov >carry9=r11,<out9=r1,ASR #25
  8576. mov r11,r1,ASR #25
  8577. # qhasm: out9 -= (carry9 << 25)
  8578. # asm 1: sub >out9=int32#2,<out9=int32#2,<carry9=int32#12,LSL #25
  8579. # asm 2: sub >out9=r1,<out9=r1,<carry9=r11,LSL #25
  8580. sub r1,r1,r11,LSL #25
  8581. # qhasm: out0 += (out1 << 26)
  8582. # asm 1: add >out0=int32#3,<out0=int32#3,<out1=int32#4,LSL #26
  8583. # asm 2: add >out0=r2,<out0=r2,<out1=r3,LSL #26
  8584. add r2,r2,r3,LSL #26
  8585. # qhasm: out1 unsigned>>= 6
  8586. # asm 1: mov >out1=int32#4,<out1=int32#4,LSR #6
  8587. # asm 2: mov >out1=r3,<out1=r3,LSR #6
  8588. mov r3,r3,LSR #6
  8589. # qhasm: out1 += (out2 << 19)
  8590. # asm 1: add >out1=int32#4,<out1=int32#4,<out2=int32#5,LSL #19
  8591. # asm 2: add >out1=r3,<out1=r3,<out2=r4,LSL #19
  8592. add r3,r3,r4,LSL #19
  8593. # qhasm: out2 unsigned>>= 13
  8594. # asm 1: mov >out2=int32#5,<out2=int32#5,LSR #13
  8595. # asm 2: mov >out2=r4,<out2=r4,LSR #13
  8596. mov r4,r4,LSR #13
  8597. # qhasm: out2 += (out3 << 13)
  8598. # asm 1: add >out2=int32#5,<out2=int32#5,<out3=int32#6,LSL #13
  8599. # asm 2: add >out2=r4,<out2=r4,<out3=r5,LSL #13
  8600. add r4,r4,r5,LSL #13
  8601. # qhasm: out3 unsigned>>= 19
  8602. # asm 1: mov >out3=int32#6,<out3=int32#6,LSR #19
  8603. # asm 2: mov >out3=r5,<out3=r5,LSR #19
  8604. mov r5,r5,LSR #19
  8605. # qhasm: out3 += (out4 << 6)
  8606. # asm 1: add >out3=int32#6,<out3=int32#6,<out4=int32#7,LSL #6
  8607. # asm 2: add >out3=r5,<out3=r5,<out4=r6,LSL #6
  8608. add r5,r5,r6,LSL #6
  8609. # qhasm: out5 += (out6 << 25)
  8610. # asm 1: add >out5=int32#7,<out5=int32#8,<out6=int32#9,LSL #25
  8611. # asm 2: add >out5=r6,<out5=r7,<out6=r8,LSL #25
  8612. add r6,r7,r8,LSL #25
  8613. # qhasm: out6 unsigned>>= 7
  8614. # asm 1: mov >out6=int32#8,<out6=int32#9,LSR #7
  8615. # asm 2: mov >out6=r7,<out6=r8,LSR #7
  8616. mov r7,r8,LSR #7
  8617. # qhasm: out6 += (out7 << 19)
  8618. # asm 1: add >out6=int32#8,<out6=int32#8,<out7=int32#10,LSL #19
  8619. # asm 2: add >out6=r7,<out6=r7,<out7=r9,LSL #19
  8620. add r7,r7,r9,LSL #19
  8621. # qhasm: out7 unsigned>>= 13
  8622. # asm 1: mov >out7=int32#9,<out7=int32#10,LSR #13
  8623. # asm 2: mov >out7=r8,<out7=r9,LSR #13
  8624. mov r8,r9,LSR #13
  8625. # qhasm: out7 += (out8 << 12)
  8626. # asm 1: add >out7=int32#9,<out7=int32#9,<out8=int32#11,LSL #12
  8627. # asm 2: add >out7=r8,<out7=r8,<out8=r10,LSL #12
  8628. add r8,r8,r10,LSL #12
  8629. # qhasm: out8 unsigned>>= 20
  8630. # asm 1: mov >out8=int32#10,<out8=int32#11,LSR #20
  8631. # asm 2: mov >out8=r9,<out8=r10,LSR #20
  8632. mov r9,r10,LSR #20
  8633. # qhasm: out8 += (out9 << 6)
  8634. # asm 1: add >out8=int32#2,<out8=int32#10,<out9=int32#2,LSL #6
  8635. # asm 2: add >out8=r1,<out8=r9,<out9=r1,LSL #6
  8636. add r1,r9,r1,LSL #6
  8637. # qhasm: mem32[q] = out0;q += 4
  8638. # asm 1: str <out0=int32#3,[<q=int32#1],#4
  8639. # asm 2: str <out0=r2,[<q=r0],#4
  8640. str r2,[r0],#4
  8641. # qhasm: mem32[q] = out1;q += 4
  8642. # asm 1: str <out1=int32#4,[<q=int32#1],#4
  8643. # asm 2: str <out1=r3,[<q=r0],#4
  8644. str r3,[r0],#4
  8645. # qhasm: mem32[q] = out2;q += 4
  8646. # asm 1: str <out2=int32#5,[<q=int32#1],#4
  8647. # asm 2: str <out2=r4,[<q=r0],#4
  8648. str r4,[r0],#4
  8649. # qhasm: mem32[q] = out3;q += 4
  8650. # asm 1: str <out3=int32#6,[<q=int32#1],#4
  8651. # asm 2: str <out3=r5,[<q=r0],#4
  8652. str r5,[r0],#4
  8653. # qhasm: mem32[q] = out5;q += 4
  8654. # asm 1: str <out5=int32#7,[<q=int32#1],#4
  8655. # asm 2: str <out5=r6,[<q=r0],#4
  8656. str r6,[r0],#4
  8657. # qhasm: mem32[q] = out6;q += 4
  8658. # asm 1: str <out6=int32#8,[<q=int32#1],#4
  8659. # asm 2: str <out6=r7,[<q=r0],#4
  8660. str r7,[r0],#4
  8661. # qhasm: mem32[q] = out7;q += 4
  8662. # asm 1: str <out7=int32#9,[<q=int32#1],#4
  8663. # asm 2: str <out7=r8,[<q=r0],#4
  8664. str r8,[r0],#4
  8665. # qhasm: mem32[q] = out8
  8666. # asm 1: str <out8=int32#2,[<q=int32#1]
  8667. # asm 2: str <out8=r1,[<q=r0]
  8668. str r1,[r0]
  8669. # qhasm: assign r4 r5 to caller_r4 caller_r5 = stack_r45
  8670. # asm 1: ldrd >caller_r4=int32#5,<stack_r45=stack64#1
  8671. # asm 2: ldrd >caller_r4=r4,<stack_r45=[sp,#0]
  8672. ldrd r4,[sp,#0]
  8673. # qhasm: assign r6 r7 to caller_r6 caller_r7 = stack_r67
  8674. # asm 1: ldrd >caller_r6=int32#7,<stack_r67=stack64#2
  8675. # asm 2: ldrd >caller_r6=r6,<stack_r67=[sp,#8]
  8676. ldrd r6,[sp,#8]
  8677. # qhasm: assign r8 r9 to caller_r8 caller_r9 = stack_r89
  8678. # asm 1: ldrd >caller_r8=int32#9,<stack_r89=stack64#3
  8679. # asm 2: ldrd >caller_r8=r8,<stack_r89=[sp,#16]
  8680. ldrd r8,[sp,#16]
  8681. # qhasm: assign r10 r11 to caller_r10 caller_r11 = stack_r1011
  8682. # asm 1: ldrd >caller_r10=int32#11,<stack_r1011=stack64#4
  8683. # asm 2: ldrd >caller_r10=r10,<stack_r1011=[sp,#24]
  8684. ldrd r10,[sp,#24]
  8685. # qhasm: caller_r12 = stack_r12
  8686. # asm 1: ldr >caller_r12=int32#13,<stack_r12=stack32#1
  8687. # asm 2: ldr >caller_r12=r12,<stack_r12=[sp,#480]
  8688. ldr r12,[sp,#480]
  8689. # qhasm: caller_r14 = stack_r14
  8690. # asm 1: ldr >caller_r14=int32#14,<stack_r14=stack32#2
  8691. # asm 2: ldr >caller_r14=r14,<stack_r14=[sp,#484]
  8692. ldr r14,[sp,#484]
  8693. # qhasm: int32 result
  8694. # qhasm: result = 0
  8695. # asm 1: ldr >result=int32#1,=0
  8696. # asm 2: ldr >result=r0,=0
  8697. ldr r0,=0
  8698. # qhasm: qpopreturn result
  8699. mov sp,r12
  8700. vpop {q4,q5,q6,q7}
  8701. bx lr