instructions_0f.rs 180 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646
  1. #![allow(non_snake_case, non_upper_case_globals, unused_variables)]
  2. extern "C" {
  3. #[no_mangle]
  4. fn get_rand_int() -> i32;
  5. #[no_mangle]
  6. fn cpuid();
  7. #[no_mangle]
  8. fn lsl(r: i32, v: i32) -> i32;
  9. #[no_mangle]
  10. fn lar(r: i32, v: i32) -> i32;
  11. #[no_mangle]
  12. fn verw(r: i32);
  13. #[no_mangle]
  14. fn verr(r: i32);
  15. #[no_mangle]
  16. fn load_tr(v: i32);
  17. #[no_mangle]
  18. fn load_ldt(v: i32);
  19. }
  20. unsafe fn undefined_instruction() {
  21. dbg_assert!(false, "Undefined instructions");
  22. trigger_ud()
  23. }
  24. unsafe fn unimplemented_sse() {
  25. dbg_assert!(false, "Unimplemented SSE instruction");
  26. trigger_ud()
  27. }
  28. use cpu::arith::{
  29. bsf16, bsf32, bsr16, bsr32, bt_mem, bt_reg, btc_mem, btc_reg, btr_mem, btr_reg, bts_mem,
  30. bts_reg, cmpxchg8, cmpxchg16, cmpxchg32, popcnt, shld16, shld32, shrd16, shrd32, xadd8, xadd16,
  31. xadd32,
  32. };
  33. use cpu::arith::{
  34. imul_reg16, imul_reg32, saturate_sd_to_sb, saturate_sd_to_sw, saturate_sd_to_ub,
  35. saturate_sw_to_sb, saturate_sw_to_ub, saturate_ud_to_ub, saturate_uw,
  36. };
  37. use cpu::cpu::*;
  38. use cpu::fpu::fpu_load_m32;
  39. use cpu::fpu::fpu_set_tag_word;
  40. use cpu::global_pointers::*;
  41. use cpu::misc_instr::{
  42. adjust_stack_reg, bswap, cmovcc16, cmovcc32, fxrstor, fxsave, get_stack_pointer, jmpcc16,
  43. jmpcc32, push16, push32, setcc_mem, setcc_reg, test_b, test_be, test_l, test_le, test_o,
  44. test_p, test_s, test_z,
  45. };
  46. use cpu::misc_instr::{lss16, lss32};
  47. use cpu::sse_instr::*;
  48. #[no_mangle]
  49. pub unsafe fn instr16_0F00_0_mem(addr: i32) {
  50. // sldt
  51. if !*protected_mode || vm86_mode() {
  52. trigger_ud();
  53. return;
  54. }
  55. return_on_pagefault!(safe_write16(addr, *sreg.offset(LDTR as isize) as i32));
  56. }
  57. #[no_mangle]
  58. pub unsafe fn instr32_0F00_0_mem(addr: i32) { instr16_0F00_0_mem(addr) }
  59. #[no_mangle]
  60. pub unsafe fn instr16_0F00_0_reg(r: i32) {
  61. if !*protected_mode || vm86_mode() {
  62. trigger_ud();
  63. return;
  64. }
  65. write_reg16(r, *sreg.offset(LDTR as isize) as i32);
  66. }
  67. #[no_mangle]
  68. pub unsafe fn instr32_0F00_0_reg(r: i32) {
  69. if !*protected_mode || vm86_mode() {
  70. trigger_ud();
  71. return;
  72. }
  73. write_reg32(r, *sreg.offset(LDTR as isize) as i32);
  74. }
  75. #[no_mangle]
  76. pub unsafe fn instr16_0F00_1_mem(addr: i32) {
  77. // str
  78. if !*protected_mode || vm86_mode() {
  79. trigger_ud();
  80. return;
  81. }
  82. return_on_pagefault!(safe_write16(addr, *sreg.offset(TR as isize) as i32));
  83. }
  84. #[no_mangle]
  85. pub unsafe fn instr32_0F00_1_mem(addr: i32) { instr16_0F00_1_mem(addr) }
  86. #[no_mangle]
  87. pub unsafe fn instr16_0F00_1_reg(r: i32) {
  88. if !*protected_mode || vm86_mode() {
  89. trigger_ud();
  90. return;
  91. }
  92. write_reg16(r, *sreg.offset(TR as isize) as i32);
  93. }
  94. #[no_mangle]
  95. pub unsafe fn instr32_0F00_1_reg(r: i32) {
  96. if !*protected_mode || vm86_mode() {
  97. trigger_ud();
  98. return;
  99. }
  100. write_reg32(r, *sreg.offset(TR as isize) as i32);
  101. }
  102. #[no_mangle]
  103. pub unsafe fn instr16_0F00_2_mem(addr: i32) {
  104. // lldt
  105. if !*protected_mode || vm86_mode() {
  106. trigger_ud();
  107. }
  108. else if 0 != *cpl {
  109. trigger_gp(0);
  110. }
  111. else {
  112. load_ldt(return_on_pagefault!(safe_read16(addr)));
  113. };
  114. }
  115. #[no_mangle]
  116. pub unsafe fn instr32_0F00_2_mem(addr: i32) { instr16_0F00_2_mem(addr) }
  117. #[no_mangle]
  118. pub unsafe fn instr16_0F00_2_reg(r: i32) {
  119. if !*protected_mode || vm86_mode() {
  120. trigger_ud();
  121. }
  122. else if 0 != *cpl {
  123. trigger_gp(0);
  124. }
  125. else {
  126. load_ldt(read_reg16(r));
  127. };
  128. }
  129. #[no_mangle]
  130. pub unsafe fn instr32_0F00_2_reg(r: i32) { instr16_0F00_2_reg(r) }
  131. #[no_mangle]
  132. pub unsafe fn instr16_0F00_3_mem(addr: i32) {
  133. // ltr
  134. if !*protected_mode || vm86_mode() {
  135. trigger_ud();
  136. }
  137. else if 0 != *cpl {
  138. trigger_gp(0);
  139. }
  140. else {
  141. load_tr(return_on_pagefault!(safe_read16(addr)));
  142. };
  143. }
  144. #[no_mangle]
  145. pub unsafe fn instr32_0F00_3_mem(addr: i32) { instr16_0F00_3_mem(addr); }
  146. #[no_mangle]
  147. pub unsafe fn instr16_0F00_3_reg(r: i32) {
  148. if !*protected_mode || vm86_mode() {
  149. trigger_ud();
  150. }
  151. else if 0 != *cpl {
  152. trigger_gp(0);
  153. }
  154. else {
  155. load_tr(read_reg16(r));
  156. };
  157. }
  158. #[no_mangle]
  159. pub unsafe fn instr32_0F00_3_reg(r: i32) { instr16_0F00_3_reg(r) }
  160. #[no_mangle]
  161. pub unsafe fn instr16_0F00_4_mem(addr: i32) {
  162. if !*protected_mode || vm86_mode() {
  163. dbg_log!("verr #ud");
  164. trigger_ud();
  165. return;
  166. }
  167. verr(return_on_pagefault!(safe_read16(addr)));
  168. }
  169. #[no_mangle]
  170. pub unsafe fn instr32_0F00_4_mem(addr: i32) { instr16_0F00_4_mem(addr) }
  171. #[no_mangle]
  172. pub unsafe fn instr16_0F00_4_reg(r: i32) {
  173. if !*protected_mode || vm86_mode() {
  174. dbg_log!("verr #ud");
  175. trigger_ud();
  176. return;
  177. }
  178. verr(read_reg16(r));
  179. }
  180. #[no_mangle]
  181. pub unsafe fn instr32_0F00_4_reg(r: i32) { instr16_0F00_4_reg(r) }
  182. #[no_mangle]
  183. pub unsafe fn instr16_0F00_5_mem(addr: i32) {
  184. if !*protected_mode || vm86_mode() {
  185. dbg_log!("verw #ud");
  186. trigger_ud();
  187. return;
  188. }
  189. verw(return_on_pagefault!(safe_read16(addr)));
  190. }
  191. #[no_mangle]
  192. pub unsafe fn instr32_0F00_5_mem(addr: i32) { instr16_0F00_5_mem(addr) }
  193. #[no_mangle]
  194. pub unsafe fn instr16_0F00_5_reg(r: i32) {
  195. if !*protected_mode || vm86_mode() {
  196. dbg_log!("verw #ud");
  197. trigger_ud();
  198. return;
  199. }
  200. verw(read_reg16(r));
  201. }
  202. #[no_mangle]
  203. pub unsafe fn instr32_0F00_5_reg(r: i32) { instr16_0F00_5_reg(r) }
  204. #[no_mangle]
  205. pub unsafe fn instr16_0F01_0_reg(r: i32) { trigger_ud(); }
  206. #[no_mangle]
  207. pub unsafe fn instr32_0F01_0_reg(r: i32) { trigger_ud(); }
  208. unsafe fn sgdt(addr: i32, mask: i32) {
  209. return_on_pagefault!(writable_or_pagefault(addr, 6));
  210. safe_write16(addr, *gdtr_size).unwrap();
  211. safe_write32(addr + 2, *gdtr_offset & mask).unwrap();
  212. }
  213. #[no_mangle]
  214. pub unsafe fn instr16_0F01_0_mem(addr: i32) { sgdt(addr, 0xFFFFFF) }
  215. #[no_mangle]
  216. pub unsafe fn instr32_0F01_0_mem(addr: i32) { sgdt(addr, -1) }
  217. #[no_mangle]
  218. pub unsafe fn instr16_0F01_1_reg(r: i32) { trigger_ud(); }
  219. #[no_mangle]
  220. pub unsafe fn instr32_0F01_1_reg(r: i32) { trigger_ud(); }
  221. unsafe fn sidt(addr: i32, mask: i32) {
  222. return_on_pagefault!(writable_or_pagefault(addr, 6));
  223. safe_write16(addr, *idtr_size).unwrap();
  224. safe_write32(addr + 2, *idtr_offset & mask).unwrap();
  225. }
  226. #[no_mangle]
  227. pub unsafe fn instr16_0F01_1_mem(addr: i32) { sidt(addr, 0xFFFFFF) }
  228. #[no_mangle]
  229. pub unsafe fn instr32_0F01_1_mem(addr: i32) { sidt(addr, -1) }
  230. #[no_mangle]
  231. pub unsafe fn instr16_0F01_2_reg(r: i32) { trigger_ud(); }
  232. #[no_mangle]
  233. pub unsafe fn instr32_0F01_2_reg(r: i32) { trigger_ud(); }
  234. unsafe fn lgdt(addr: i32, mask: i32) {
  235. if 0 != *cpl {
  236. trigger_gp(0);
  237. return;
  238. }
  239. let size = return_on_pagefault!(safe_read16(addr));
  240. let offset = return_on_pagefault!(safe_read32s(addr + 2));
  241. *gdtr_size = size;
  242. *gdtr_offset = offset & mask;
  243. }
  244. #[no_mangle]
  245. pub unsafe fn instr16_0F01_2_mem(addr: i32) { lgdt(addr, 0xFFFFFF); }
  246. #[no_mangle]
  247. pub unsafe fn instr32_0F01_2_mem(addr: i32) { lgdt(addr, -1); }
  248. #[no_mangle]
  249. pub unsafe fn instr16_0F01_3_reg(r: i32) { trigger_ud(); }
  250. #[no_mangle]
  251. pub unsafe fn instr32_0F01_3_reg(r: i32) { trigger_ud(); }
  252. unsafe fn lidt(addr: i32, mask: i32) {
  253. if 0 != *cpl {
  254. trigger_gp(0);
  255. return;
  256. }
  257. let size = return_on_pagefault!(safe_read16(addr));
  258. let offset = return_on_pagefault!(safe_read32s(addr + 2));
  259. *idtr_size = size;
  260. *idtr_offset = offset & mask;
  261. }
  262. #[no_mangle]
  263. pub unsafe fn instr16_0F01_3_mem(addr: i32) { lidt(addr, 0xFFFFFF); }
  264. #[no_mangle]
  265. pub unsafe fn instr32_0F01_3_mem(addr: i32) { lidt(addr, -1); }
  266. #[no_mangle]
  267. pub unsafe fn instr16_0F01_4_reg(r: i32) {
  268. // smsw
  269. write_reg16(r, *cr);
  270. }
  271. #[no_mangle]
  272. pub unsafe fn instr32_0F01_4_reg(r: i32) { write_reg32(r, *cr); }
  273. #[no_mangle]
  274. pub unsafe fn instr16_0F01_4_mem(addr: i32) {
  275. return_on_pagefault!(safe_write16(addr, *cr));
  276. }
  277. #[no_mangle]
  278. pub unsafe fn instr32_0F01_4_mem(addr: i32) {
  279. return_on_pagefault!(safe_write16(addr, *cr));
  280. }
  281. #[no_mangle]
  282. pub unsafe fn lmsw(mut new_cr0: i32) {
  283. new_cr0 = *cr & !15 | new_cr0 & 15;
  284. if *protected_mode {
  285. // lmsw cannot be used to switch back
  286. new_cr0 |= CR0_PE
  287. }
  288. set_cr0(new_cr0);
  289. }
  290. #[no_mangle]
  291. pub unsafe fn instr16_0F01_6_reg(r: i32) {
  292. if 0 != *cpl {
  293. trigger_gp(0);
  294. return;
  295. }
  296. lmsw(read_reg16(r));
  297. }
  298. #[no_mangle]
  299. pub unsafe fn instr32_0F01_6_reg(r: i32) { instr16_0F01_6_reg(r); }
  300. #[no_mangle]
  301. pub unsafe fn instr16_0F01_6_mem(addr: i32) {
  302. if 0 != *cpl {
  303. trigger_gp(0);
  304. return;
  305. }
  306. lmsw(return_on_pagefault!(safe_read16(addr)));
  307. }
  308. #[no_mangle]
  309. pub unsafe fn instr32_0F01_6_mem(addr: i32) { instr16_0F01_6_mem(addr) }
  310. #[no_mangle]
  311. pub unsafe fn instr16_0F01_7_reg(r: i32) { trigger_ud(); }
  312. #[no_mangle]
  313. pub unsafe fn instr32_0F01_7_reg(r: i32) { trigger_ud(); }
  314. #[no_mangle]
  315. pub unsafe fn instr16_0F01_7_mem(addr: i32) {
  316. // invlpg
  317. if 0 != *cpl {
  318. trigger_gp(0);
  319. return;
  320. }
  321. invlpg(addr);
  322. }
  323. #[no_mangle]
  324. pub unsafe fn instr32_0F01_7_mem(addr: i32) { instr16_0F01_7_mem(addr) }
  325. #[no_mangle]
  326. pub unsafe fn instr16_0F02_mem(addr: i32, r: i32) {
  327. if !*protected_mode || vm86_mode() {
  328. dbg_log!("lar #ud");
  329. trigger_ud();
  330. return;
  331. }
  332. write_reg16(
  333. r,
  334. lar(return_on_pagefault!(safe_read16(addr)), read_reg16(r)),
  335. );
  336. }
  337. #[no_mangle]
  338. pub unsafe fn instr16_0F02_reg(r1: i32, r: i32) {
  339. if !*protected_mode || vm86_mode() {
  340. dbg_log!("lar #ud");
  341. trigger_ud();
  342. return;
  343. }
  344. write_reg16(r, lar(read_reg16(r1), read_reg16(r)));
  345. }
  346. #[no_mangle]
  347. pub unsafe fn instr32_0F02_mem(addr: i32, r: i32) {
  348. if !*protected_mode || vm86_mode() {
  349. dbg_log!("lar #ud");
  350. trigger_ud();
  351. return;
  352. }
  353. write_reg32(
  354. r,
  355. lar(return_on_pagefault!(safe_read16(addr)), read_reg32(r)),
  356. );
  357. }
  358. #[no_mangle]
  359. pub unsafe fn instr32_0F02_reg(r1: i32, r: i32) {
  360. if !*protected_mode || vm86_mode() {
  361. dbg_log!("lar #ud");
  362. trigger_ud();
  363. return;
  364. }
  365. write_reg32(r, lar(read_reg16(r1), read_reg32(r)));
  366. }
  367. #[no_mangle]
  368. pub unsafe fn instr16_0F03_mem(addr: i32, r: i32) {
  369. if !*protected_mode || vm86_mode() {
  370. dbg_log!("lsl #ud");
  371. trigger_ud();
  372. return;
  373. }
  374. write_reg16(
  375. r,
  376. lsl(return_on_pagefault!(safe_read16(addr)), read_reg16(r)),
  377. );
  378. }
  379. #[no_mangle]
  380. pub unsafe fn instr16_0F03_reg(r1: i32, r: i32) {
  381. if !*protected_mode || vm86_mode() {
  382. dbg_log!("lsl #ud");
  383. trigger_ud();
  384. return;
  385. }
  386. write_reg16(r, lsl(read_reg16(r1), read_reg16(r)));
  387. }
  388. #[no_mangle]
  389. pub unsafe fn instr32_0F03_mem(addr: i32, r: i32) {
  390. if !*protected_mode || vm86_mode() {
  391. dbg_log!("lsl #ud");
  392. trigger_ud();
  393. return;
  394. }
  395. write_reg32(
  396. r,
  397. lsl(return_on_pagefault!(safe_read16(addr)), read_reg32(r)),
  398. );
  399. }
  400. #[no_mangle]
  401. pub unsafe fn instr32_0F03_reg(r1: i32, r: i32) {
  402. if !*protected_mode || vm86_mode() {
  403. dbg_log!("lsl #ud");
  404. trigger_ud();
  405. return;
  406. }
  407. write_reg32(r, lsl(read_reg16(r1), read_reg32(r)));
  408. }
  409. #[no_mangle]
  410. pub unsafe fn instr_0F04() { undefined_instruction(); }
  411. #[no_mangle]
  412. pub unsafe fn instr_0F05() { undefined_instruction(); }
  413. #[no_mangle]
  414. pub unsafe fn instr_0F06() {
  415. // clts
  416. if 0 != *cpl {
  417. dbg_log!("clts #gp");
  418. trigger_gp(0);
  419. }
  420. else {
  421. if false {
  422. dbg_log!("clts");
  423. }
  424. *cr &= !CR0_TS;
  425. };
  426. }
  427. #[no_mangle]
  428. pub unsafe fn instr_0F07() { undefined_instruction(); }
  429. #[no_mangle]
  430. pub unsafe fn instr_0F08() {
  431. // invd
  432. undefined_instruction();
  433. }
  434. #[no_mangle]
  435. pub unsafe fn instr_0F09() {
  436. if 0 != *cpl {
  437. dbg_log!("wbinvd #gp");
  438. trigger_gp(0);
  439. }
  440. else {
  441. // wbinvd
  442. };
  443. }
  444. #[no_mangle]
  445. pub unsafe fn instr_0F0A() { undefined_instruction(); }
  446. #[no_mangle]
  447. pub unsafe fn instr_0F0B() {
  448. // UD2
  449. trigger_ud();
  450. }
  451. #[no_mangle]
  452. pub unsafe fn instr_0F0C() { undefined_instruction(); }
  453. #[no_mangle]
  454. pub unsafe fn instr_0F0D() {
  455. // nop
  456. undefined_instruction();
  457. }
  458. #[no_mangle]
  459. pub unsafe fn instr_0F0E() { undefined_instruction(); }
  460. #[no_mangle]
  461. pub unsafe fn instr_0F0F() { undefined_instruction(); }
  462. #[no_mangle]
  463. pub unsafe fn instr_0F10(source: reg128, r: i32) {
  464. // movups xmm, xmm/m128
  465. mov_rm_r128(source, r);
  466. }
  467. #[no_mangle]
  468. pub unsafe fn instr_0F10_reg(r1: i32, r2: i32) { instr_0F10(read_xmm128s(r1), r2); }
  469. #[no_mangle]
  470. pub unsafe fn instr_0F10_mem(addr: i32, r: i32) {
  471. instr_0F10(return_on_pagefault!(safe_read128s(addr)), r);
  472. }
  473. #[no_mangle]
  474. pub unsafe fn instr_F30F10_reg(r1: i32, r2: i32) {
  475. // movss xmm, xmm/m32
  476. let data = read_xmm128s(r1);
  477. let orig = read_xmm128s(r2);
  478. write_xmm128(
  479. r2,
  480. data.u32_0[0] as i32,
  481. orig.u32_0[1] as i32,
  482. orig.u32_0[2] as i32,
  483. orig.u32_0[3] as i32,
  484. );
  485. }
  486. #[no_mangle]
  487. pub unsafe fn instr_F30F10_mem(addr: i32, r: i32) {
  488. // movss xmm, xmm/m32
  489. let data = return_on_pagefault!(safe_read32s(addr));
  490. write_xmm128(r, data, 0, 0, 0);
  491. }
  492. #[no_mangle]
  493. pub unsafe fn instr_660F10(source: reg128, r: i32) {
  494. // movupd xmm, xmm/m128
  495. mov_rm_r128(source, r);
  496. }
  497. #[no_mangle]
  498. pub unsafe fn instr_660F10_reg(r1: i32, r2: i32) { instr_660F10(read_xmm128s(r1), r2); }
  499. #[no_mangle]
  500. pub unsafe fn instr_660F10_mem(addr: i32, r: i32) {
  501. instr_660F10(return_on_pagefault!(safe_read128s(addr)), r);
  502. }
  503. #[no_mangle]
  504. pub unsafe fn instr_F20F10_reg(r1: i32, r2: i32) {
  505. // movsd xmm, xmm/m64
  506. let data = read_xmm128s(r1);
  507. let orig = read_xmm128s(r2);
  508. write_xmm128(
  509. r2,
  510. data.u32_0[0] as i32,
  511. data.u32_0[1] as i32,
  512. orig.u32_0[2] as i32,
  513. orig.u32_0[3] as i32,
  514. );
  515. }
  516. #[no_mangle]
  517. pub unsafe fn instr_F20F10_mem(addr: i32, r: i32) {
  518. // movsd xmm, xmm/m64
  519. let data = return_on_pagefault!(safe_read64s(addr));
  520. write_xmm128_2(r, data, 0);
  521. }
  522. #[no_mangle]
  523. pub unsafe fn instr_0F11_reg(r1: i32, r2: i32) {
  524. // movups xmm/m128, xmm
  525. mov_r_r128(r1, r2);
  526. }
  527. #[no_mangle]
  528. pub unsafe fn instr_0F11_mem(addr: i32, r: i32) {
  529. // movups xmm/m128, xmm
  530. mov_r_m128(addr, r);
  531. }
  532. #[no_mangle]
  533. pub unsafe fn instr_F30F11_reg(rm_dest: i32, reg_src: i32) {
  534. // movss xmm/m32, xmm
  535. let data = read_xmm128s(reg_src);
  536. let orig = read_xmm128s(rm_dest);
  537. write_xmm128(
  538. rm_dest,
  539. data.u32_0[0] as i32,
  540. orig.u32_0[1] as i32,
  541. orig.u32_0[2] as i32,
  542. orig.u32_0[3] as i32,
  543. );
  544. }
  545. #[no_mangle]
  546. pub unsafe fn instr_F30F11_mem(addr: i32, r: i32) {
  547. // movss xmm/m32, xmm
  548. let data = read_xmm128s(r);
  549. return_on_pagefault!(safe_write32(addr, data.u32_0[0] as i32));
  550. }
  551. #[no_mangle]
  552. pub unsafe fn instr_660F11_reg(r1: i32, r2: i32) {
  553. // movupd xmm/m128, xmm
  554. mov_r_r128(r1, r2);
  555. }
  556. #[no_mangle]
  557. pub unsafe fn instr_660F11_mem(addr: i32, r: i32) {
  558. // movupd xmm/m128, xmm
  559. mov_r_m128(addr, r);
  560. }
  561. #[no_mangle]
  562. pub unsafe fn instr_F20F11_reg(r1: i32, r2: i32) {
  563. // movsd xmm/m64, xmm
  564. let data = read_xmm128s(r2);
  565. let orig = read_xmm128s(r1);
  566. write_xmm128(
  567. r1,
  568. data.u32_0[0] as i32,
  569. data.u32_0[1] as i32,
  570. orig.u32_0[2] as i32,
  571. orig.u32_0[3] as i32,
  572. );
  573. }
  574. #[no_mangle]
  575. pub unsafe fn instr_F20F11_mem(addr: i32, r: i32) {
  576. // movsd xmm/m64, xmm
  577. let data = read_xmm64s(r);
  578. return_on_pagefault!(safe_write64(addr, data));
  579. }
  580. #[no_mangle]
  581. pub unsafe fn instr_0F12_mem(addr: i32, r: i32) {
  582. // movlps xmm, m64
  583. let data = return_on_pagefault!(safe_read64s(addr));
  584. let orig = read_xmm128s(r);
  585. write_xmm128_2(r, data, orig.u64_0[1]);
  586. }
  587. #[no_mangle]
  588. pub unsafe fn instr_0F12_reg(r1: i32, r2: i32) {
  589. // movhlps xmm, xmm
  590. let data = read_xmm128s(r1);
  591. let orig = read_xmm128s(r2);
  592. write_xmm128(
  593. r2,
  594. data.u32_0[2] as i32,
  595. data.u32_0[3] as i32,
  596. orig.u32_0[2] as i32,
  597. orig.u32_0[3] as i32,
  598. );
  599. }
  600. #[no_mangle]
  601. pub unsafe fn instr_660F12_reg(r1: i32, r: i32) { trigger_ud(); }
  602. #[no_mangle]
  603. pub unsafe fn instr_660F12_mem(addr: i32, r: i32) {
  604. // movlpd xmm, m64
  605. let data = return_on_pagefault!(safe_read64s(addr));
  606. write_xmm64(r, data);
  607. }
  608. #[no_mangle]
  609. pub unsafe fn instr_F20F12_mem(addr: i32, r: i32) { unimplemented_sse(); }
  610. #[no_mangle]
  611. pub unsafe fn instr_F20F12_reg(r1: i32, r2: i32) { unimplemented_sse(); }
  612. #[no_mangle]
  613. pub unsafe fn instr_F30F12_mem(addr: i32, r: i32) { unimplemented_sse(); }
  614. #[no_mangle]
  615. pub unsafe fn instr_F30F12_reg(r1: i32, r2: i32) { unimplemented_sse(); }
  616. #[no_mangle]
  617. pub unsafe fn instr_0F13_mem(addr: i32, r: i32) {
  618. // movlps m64, xmm
  619. movl_r128_m64(addr, r);
  620. }
  621. #[no_mangle]
  622. pub unsafe fn instr_0F13_reg(r1: i32, r2: i32) { trigger_ud(); }
  623. #[no_mangle]
  624. pub unsafe fn instr_660F13_reg(r1: i32, r: i32) { trigger_ud(); }
  625. #[no_mangle]
  626. pub unsafe fn instr_660F13_mem(addr: i32, r: i32) {
  627. // movlpd xmm/m64, xmm
  628. movl_r128_m64(addr, r);
  629. }
  630. #[no_mangle]
  631. pub unsafe fn instr_0F14(source: u64, r: i32) {
  632. // unpcklps xmm, xmm/m128
  633. // XXX: Aligned access or #gp
  634. let destination = read_xmm64s(r);
  635. write_xmm128(
  636. r,
  637. destination as i32,
  638. source as i32,
  639. (destination >> 32) as i32,
  640. (source >> 32) as i32,
  641. );
  642. }
  643. #[no_mangle]
  644. pub unsafe fn instr_0F14_reg(r1: i32, r2: i32) { instr_0F14(read_xmm64s(r1), r2); }
  645. #[no_mangle]
  646. pub unsafe fn instr_0F14_mem(addr: i32, r: i32) {
  647. instr_0F14(return_on_pagefault!(safe_read64s(addr)), r);
  648. }
  649. #[no_mangle]
  650. pub unsafe fn instr_660F14(source: u64, r: i32) {
  651. // unpcklpd xmm, xmm/m128
  652. // XXX: Aligned access or #gp
  653. let destination = read_xmm64s(r);
  654. write_xmm128(
  655. r,
  656. destination as i32,
  657. (destination >> 32) as i32,
  658. source as i32,
  659. (source >> 32) as i32,
  660. );
  661. }
  662. #[no_mangle]
  663. pub unsafe fn instr_660F14_reg(r1: i32, r2: i32) { instr_660F14(read_xmm64s(r1), r2); }
  664. #[no_mangle]
  665. pub unsafe fn instr_660F14_mem(addr: i32, r: i32) {
  666. instr_660F14(return_on_pagefault!(safe_read64s(addr)), r);
  667. }
  668. #[no_mangle]
  669. pub unsafe fn instr_0F15(source: reg128, r: i32) {
  670. // unpckhps xmm, xmm/m128
  671. // XXX: Aligned access or #gp
  672. let destination = read_xmm128s(r);
  673. write_xmm128(
  674. r,
  675. destination.u32_0[2] as i32,
  676. source.u32_0[2] as i32,
  677. destination.u32_0[3] as i32,
  678. source.u32_0[3] as i32,
  679. );
  680. }
  681. #[no_mangle]
  682. pub unsafe fn instr_0F15_reg(r1: i32, r2: i32) { instr_0F15(read_xmm128s(r1), r2); }
  683. #[no_mangle]
  684. pub unsafe fn instr_0F15_mem(addr: i32, r: i32) {
  685. instr_0F15(return_on_pagefault!(safe_read128s(addr)), r);
  686. }
  687. #[no_mangle]
  688. pub unsafe fn instr_660F15(source: reg128, r: i32) {
  689. // unpckhpd xmm, xmm/m128
  690. // XXX: Aligned access or #gp
  691. let destination = read_xmm128s(r);
  692. write_xmm128(
  693. r,
  694. destination.u32_0[2] as i32,
  695. destination.u32_0[3] as i32,
  696. source.u32_0[2] as i32,
  697. source.u32_0[3] as i32,
  698. );
  699. }
  700. #[no_mangle]
  701. pub unsafe fn instr_660F15_reg(r1: i32, r2: i32) { instr_660F15(read_xmm128s(r1), r2); }
  702. #[no_mangle]
  703. pub unsafe fn instr_660F15_mem(addr: i32, r: i32) {
  704. instr_660F15(return_on_pagefault!(safe_read128s(addr)), r);
  705. }
  706. #[no_mangle]
  707. pub unsafe fn instr_0F16_mem(addr: i32, r: i32) {
  708. // movhps xmm, m64
  709. movh_m64_r128(addr, r);
  710. }
  711. #[no_mangle]
  712. pub unsafe fn instr_0F16_reg(r1: i32, r2: i32) {
  713. // movlhps xmm, xmm
  714. let data = read_xmm128s(r1);
  715. let orig = read_xmm128s(r2);
  716. write_xmm128(
  717. r2,
  718. orig.u32_0[0] as i32,
  719. orig.u32_0[1] as i32,
  720. data.u32_0[0] as i32,
  721. data.u32_0[1] as i32,
  722. );
  723. }
  724. #[no_mangle]
  725. pub unsafe fn instr_660F16_mem(addr: i32, r: i32) {
  726. // movhpd xmm, m64
  727. movh_m64_r128(addr, r);
  728. }
  729. #[no_mangle]
  730. pub unsafe fn instr_660F16_reg(r1: i32, r2: i32) { trigger_ud(); }
  731. #[no_mangle]
  732. pub unsafe fn instr_0F17_mem(addr: i32, r: i32) {
  733. // movhps m64, xmm
  734. movh_r128_m64(addr, r);
  735. }
  736. #[no_mangle]
  737. pub unsafe fn instr_0F17_reg(r1: i32, r2: i32) { trigger_ud(); }
  738. #[no_mangle]
  739. pub unsafe fn instr_660F17_mem(addr: i32, r: i32) {
  740. // movhpd m64, xmm
  741. movh_r128_m64(addr, r);
  742. }
  743. #[no_mangle]
  744. pub unsafe fn instr_660F17_reg(r1: i32, r2: i32) { trigger_ud(); }
  745. #[no_mangle]
  746. pub unsafe fn instr_0F18_reg(r1: i32, r2: i32) {
  747. // reserved nop
  748. }
  749. #[no_mangle]
  750. pub unsafe fn instr_0F18_mem(addr: i32, r: i32) {
  751. // prefetch
  752. // nop for us
  753. }
  754. #[no_mangle]
  755. pub unsafe fn instr_0F1A() { undefined_instruction(); }
  756. #[no_mangle]
  757. pub unsafe fn instr_0F1B() { undefined_instruction(); }
  758. #[no_mangle]
  759. pub unsafe fn instr_0F1F_reg(r1: i32, r2: i32) {}
  760. #[no_mangle]
  761. pub unsafe fn instr_0F1F_mem(addr: i32, r: i32) {}
  762. #[no_mangle]
  763. pub unsafe fn instr_0F20(r: i32, creg: i32) {
  764. if 0 != *cpl {
  765. trigger_gp(0);
  766. }
  767. match creg {
  768. 0 => {
  769. write_reg32(r, *cr);
  770. },
  771. 2 => {
  772. write_reg32(r, *cr.offset(2));
  773. },
  774. 3 => {
  775. write_reg32(r, *cr.offset(3));
  776. },
  777. 4 => {
  778. write_reg32(r, *cr.offset(4));
  779. },
  780. _ => {
  781. dbg_log!("{}", creg);
  782. undefined_instruction();
  783. },
  784. }
  785. }
  786. #[no_mangle]
  787. pub unsafe fn instr_0F21(r: i32, mut dreg_index: i32) {
  788. if 0 != *cpl {
  789. trigger_gp(0);
  790. return;
  791. }
  792. if dreg_index == 4 || dreg_index == 5 {
  793. if 0 != *cr.offset(4) & CR4_DE {
  794. dbg_log!("#ud mov dreg 4/5 with cr4.DE set");
  795. trigger_ud();
  796. return;
  797. }
  798. else {
  799. // DR4 and DR5 refer to DR6 and DR7 respectively
  800. dreg_index += 2
  801. }
  802. }
  803. write_reg32(r, *dreg.offset(dreg_index as isize));
  804. if false {
  805. dbg_log!(
  806. "read dr{}: {:x}",
  807. dreg_index,
  808. *dreg.offset(dreg_index as isize)
  809. );
  810. }
  811. }
  812. #[no_mangle]
  813. pub unsafe fn instr_0F22(r: i32, creg: i32) {
  814. if 0 != *cpl {
  815. trigger_gp(0);
  816. return;
  817. }
  818. let mut data: i32 = read_reg32(r);
  819. // mov cr, addr
  820. match creg {
  821. 0 => {
  822. if false {
  823. dbg_log!("cr0 <- {:x}", data);
  824. }
  825. set_cr0(data);
  826. },
  827. 2 => {
  828. dbg_log!("cr2 <- {:x}", data);
  829. *cr.offset(2) = data
  830. },
  831. 3 => {
  832. if false {
  833. dbg_log!("cr3 <- {:x}", data);
  834. }
  835. data &= !0b111111100111;
  836. dbg_assert!(data & 0xFFF == 0, "TODO");
  837. *cr.offset(3) = data;
  838. clear_tlb();
  839. },
  840. 4 => {
  841. dbg_log!("cr4 <- {:x}", *cr.offset(4));
  842. if 0 != data as u32
  843. & ((1 << 11 | 1 << 12 | 1 << 15 | 1 << 16 | 1 << 19) as u32 | 0xFFC00000)
  844. {
  845. dbg_log!("trigger_gp: Invalid cr4 bit");
  846. trigger_gp(0);
  847. return;
  848. }
  849. else {
  850. if 0 != (*cr.offset(4) ^ data) & (CR4_PGE | CR4_PSE) {
  851. full_clear_tlb();
  852. }
  853. *cr.offset(4) = data;
  854. if 0 != *cr.offset(4) & CR4_PAE {
  855. dbg_assert!(false);
  856. }
  857. }
  858. },
  859. _ => {
  860. dbg_log!("{}", creg);
  861. undefined_instruction();
  862. },
  863. }
  864. }
  865. #[no_mangle]
  866. pub unsafe fn instr_0F23(r: i32, mut dreg_index: i32) {
  867. if 0 != *cpl {
  868. trigger_gp(0);
  869. return;
  870. }
  871. if dreg_index == 4 || dreg_index == 5 {
  872. if 0 != *cr.offset(4) & CR4_DE {
  873. dbg_log!("#ud mov dreg 4/5 with cr4.DE set");
  874. trigger_ud();
  875. return;
  876. }
  877. else {
  878. // DR4 and DR5 refer to DR6 and DR7 respectively
  879. dreg_index += 2
  880. }
  881. }
  882. *dreg.offset(dreg_index as isize) = read_reg32(r);
  883. if false {
  884. dbg_log!(
  885. "write dr{}: {:x}",
  886. dreg_index,
  887. *dreg.offset(dreg_index as isize)
  888. );
  889. }
  890. }
  891. #[no_mangle]
  892. pub unsafe fn instr_0F24() { undefined_instruction(); }
  893. #[no_mangle]
  894. pub unsafe fn instr_0F25() { undefined_instruction(); }
  895. #[no_mangle]
  896. pub unsafe fn instr_0F26() { undefined_instruction(); }
  897. #[no_mangle]
  898. pub unsafe fn instr_0F27() { undefined_instruction(); }
  899. #[no_mangle]
  900. pub unsafe fn instr_0F28(source: reg128, r: i32) {
  901. // movaps xmm, xmm/m128
  902. // XXX: Aligned read or #gp
  903. mov_rm_r128(source, r);
  904. }
  905. #[no_mangle]
  906. pub unsafe fn instr_0F28_reg(r1: i32, r2: i32) { instr_0F28(read_xmm128s(r1), r2); }
  907. #[no_mangle]
  908. pub unsafe fn instr_0F28_mem(addr: i32, r: i32) {
  909. instr_0F28(return_on_pagefault!(safe_read128s(addr)), r);
  910. }
  911. #[no_mangle]
  912. pub unsafe fn instr_660F28(source: reg128, r: i32) {
  913. // movapd xmm, xmm/m128
  914. // XXX: Aligned read or #gp
  915. // Note: Same as movdqa (660F6F)
  916. mov_rm_r128(source, r);
  917. }
  918. #[no_mangle]
  919. pub unsafe fn instr_660F28_reg(r1: i32, r2: i32) { instr_660F28(read_xmm128s(r1), r2); }
  920. #[no_mangle]
  921. pub unsafe fn instr_660F28_mem(addr: i32, r: i32) {
  922. instr_660F28(return_on_pagefault!(safe_read128s(addr)), r);
  923. }
  924. #[no_mangle]
  925. pub unsafe fn instr_0F29_mem(addr: i32, r: i32) {
  926. // movaps m128, xmm
  927. let data = read_xmm128s(r);
  928. // XXX: Aligned write or #gp
  929. return_on_pagefault!(safe_write128(addr, data));
  930. }
  931. #[no_mangle]
  932. pub unsafe fn instr_0F29_reg(r1: i32, r2: i32) {
  933. // movaps xmm, xmm
  934. mov_r_r128(r1, r2);
  935. }
  936. #[no_mangle]
  937. pub unsafe fn instr_660F29_mem(addr: i32, r: i32) {
  938. // movapd m128, xmm
  939. let data = read_xmm128s(r);
  940. // XXX: Aligned write or #gp
  941. return_on_pagefault!(safe_write128(addr, data));
  942. }
  943. #[no_mangle]
  944. pub unsafe fn instr_660F29_reg(r1: i32, r2: i32) {
  945. // movapd xmm, xmm
  946. mov_r_r128(r1, r2);
  947. }
  948. #[no_mangle]
  949. pub unsafe fn instr_0F2B_reg(r1: i32, r2: i32) { trigger_ud(); }
  950. #[no_mangle]
  951. pub unsafe fn instr_0F2B_mem(addr: i32, r: i32) {
  952. // movntps m128, xmm
  953. // XXX: Aligned write or #gp
  954. mov_r_m128(addr, r);
  955. }
  956. #[no_mangle]
  957. pub unsafe fn instr_660F2B_reg(r1: i32, r2: i32) { trigger_ud(); }
  958. #[no_mangle]
  959. pub unsafe fn instr_660F2B_mem(addr: i32, r: i32) {
  960. // movntpd m128, xmm
  961. // XXX: Aligned write or #gp
  962. mov_r_m128(addr, r);
  963. }
  964. #[no_mangle]
  965. pub unsafe fn instr_0F2C(source: u64, r: i32) {
  966. // cvttps2pi mm, xmm/m64
  967. let low = f32::from_bits(source as u32);
  968. let high = f32::from_bits((source >> 32) as u32);
  969. write_mmx_reg64(
  970. r,
  971. sse_convert_with_truncation_f32_to_i32(low) as u32 as u64
  972. | (sse_convert_with_truncation_f32_to_i32(high) as u32 as u64) << 32,
  973. );
  974. transition_fpu_to_mmx();
  975. }
  976. #[no_mangle]
  977. pub unsafe fn instr_0F2C_mem(addr: i32, r: i32) {
  978. instr_0F2C(return_on_pagefault!(safe_read64s(addr)), r);
  979. }
  980. #[no_mangle]
  981. pub unsafe fn instr_0F2C_reg(r1: i32, r2: i32) { instr_0F2C(read_xmm64s(r1), r2); }
  982. #[no_mangle]
  983. pub unsafe fn instr_660F2C(source: reg128, r: i32) {
  984. // cvttpd2pi mm, xmm/m128
  985. write_mmx_reg64(
  986. r,
  987. sse_convert_with_truncation_f64_to_i32(source.f64_0[0]) as u32 as u64
  988. | (sse_convert_with_truncation_f64_to_i32(source.f64_0[1]) as u32 as u64) << 32,
  989. );
  990. transition_fpu_to_mmx();
  991. }
  992. #[no_mangle]
  993. pub unsafe fn instr_660F2C_mem(addr: i32, r: i32) {
  994. instr_660F2C(return_on_pagefault!(safe_read128s(addr)), r);
  995. }
  996. #[no_mangle]
  997. pub unsafe fn instr_660F2C_reg(r1: i32, r2: i32) { instr_660F2C(read_xmm128s(r1), r2); }
  998. #[no_mangle]
  999. pub unsafe fn instr_F20F2C(source: u64, r: i32) {
  1000. // cvttsd2si r32, xmm/m64
  1001. let source = f64::from_bits(source);
  1002. write_reg32(r, sse_convert_with_truncation_f64_to_i32(source));
  1003. }
  1004. #[no_mangle]
  1005. pub unsafe fn instr_F20F2C_reg(r1: i32, r2: i32) { instr_F20F2C(read_xmm64s(r1), r2); }
  1006. #[no_mangle]
  1007. pub unsafe fn instr_F20F2C_mem(addr: i32, r: i32) {
  1008. instr_F20F2C(return_on_pagefault!(safe_read64s(addr)), r);
  1009. }
  1010. #[no_mangle]
  1011. pub unsafe fn instr_F30F2C(source: f32, r: i32) {
  1012. // cvttss2si
  1013. write_reg32(r, sse_convert_with_truncation_f32_to_i32(source));
  1014. }
  1015. #[no_mangle]
  1016. pub unsafe fn instr_F30F2C_mem(addr: i32, r: i32) {
  1017. instr_F30F2C(return_on_pagefault!(fpu_load_m32(addr)) as f32, r);
  1018. }
  1019. #[no_mangle]
  1020. pub unsafe fn instr_F30F2C_reg(r1: i32, r2: i32) { instr_F30F2C(read_xmm_f32(r1), r2); }
  1021. pub unsafe fn instr_0F2E(source: f32, r: i32) {
  1022. // ucomiss xmm1, xmm2/m32
  1023. let destination = read_xmm_f32(r);
  1024. *flags_changed = 0;
  1025. *flags &= !FLAGS_ALL;
  1026. if destination == source {
  1027. *flags |= FLAG_ZERO
  1028. }
  1029. else if destination < source {
  1030. *flags |= FLAG_CARRY
  1031. }
  1032. else if destination > source {
  1033. // all flags cleared
  1034. }
  1035. else {
  1036. // TODO: Signal on SNaN
  1037. *flags |= FLAG_ZERO | FLAG_PARITY | FLAG_CARRY
  1038. }
  1039. }
  1040. #[no_mangle]
  1041. pub unsafe fn instr_0F2E_reg(r1: i32, r2: i32) { instr_0F2E(read_xmm_f32(r1), r2) }
  1042. #[no_mangle]
  1043. pub unsafe fn instr_0F2E_mem(addr: i32, r: i32) {
  1044. instr_0F2E(return_on_pagefault!(fpu_load_m32(addr)) as f32, r);
  1045. }
  1046. pub unsafe fn instr_660F2E(source: u64, r: i32) {
  1047. // ucomisd xmm1, xmm2/m64
  1048. let destination = f64::from_bits(read_xmm64s(r));
  1049. let source = f64::from_bits(source);
  1050. *flags_changed = 0;
  1051. *flags &= !FLAGS_ALL;
  1052. if destination == source {
  1053. *flags |= FLAG_ZERO
  1054. }
  1055. else if destination < source {
  1056. *flags |= FLAG_CARRY
  1057. }
  1058. else if destination > source {
  1059. // all flags cleared
  1060. }
  1061. else {
  1062. // TODO: Signal on SNaN
  1063. *flags |= FLAG_ZERO | FLAG_PARITY | FLAG_CARRY
  1064. }
  1065. }
  1066. #[no_mangle]
  1067. pub unsafe fn instr_660F2E_reg(r1: i32, r: i32) { instr_660F2E(read_xmm64s(r1), r); }
  1068. #[no_mangle]
  1069. pub unsafe fn instr_660F2E_mem(addr: i32, r: i32) {
  1070. instr_660F2E(return_on_pagefault!(safe_read64s(addr)), r)
  1071. }
  1072. pub unsafe fn instr_0F2F(source: f32, r: i32) {
  1073. // comiss xmm1, xmm2/m32
  1074. let destination = read_xmm_f32(r);
  1075. *flags_changed = 0;
  1076. *flags &= !FLAGS_ALL;
  1077. if destination == source {
  1078. *flags |= FLAG_ZERO
  1079. }
  1080. else if destination < source {
  1081. *flags |= FLAG_CARRY
  1082. }
  1083. else if destination > source {
  1084. // all flags cleared
  1085. }
  1086. else {
  1087. // TODO: Signal on SNaN or QNaN
  1088. *flags |= FLAG_ZERO | FLAG_PARITY | FLAG_CARRY
  1089. }
  1090. }
  1091. #[no_mangle]
  1092. pub unsafe fn instr_0F2F_reg(r1: i32, r2: i32) { instr_0F2F(read_xmm_f32(r1), r2) }
  1093. #[no_mangle]
  1094. pub unsafe fn instr_0F2F_mem(addr: i32, r: i32) {
  1095. instr_0F2F(return_on_pagefault!(fpu_load_m32(addr)) as f32, r);
  1096. }
  1097. pub unsafe fn instr_660F2F(source: u64, r: i32) {
  1098. // comisd xmm1, xmm2/m64
  1099. let destination = f64::from_bits(read_xmm64s(r));
  1100. let source = f64::from_bits(source);
  1101. *flags_changed = 0;
  1102. *flags &= !FLAGS_ALL;
  1103. if destination == source {
  1104. *flags |= FLAG_ZERO
  1105. }
  1106. else if destination < source {
  1107. *flags |= FLAG_CARRY
  1108. }
  1109. else if destination > source {
  1110. // all flags cleared
  1111. }
  1112. else {
  1113. // TODO: Signal on SNaN or QNaN
  1114. *flags |= FLAG_ZERO | FLAG_PARITY | FLAG_CARRY
  1115. }
  1116. }
  1117. #[no_mangle]
  1118. pub unsafe fn instr_660F2F_reg(r1: i32, r: i32) { instr_660F2F(read_xmm64s(r1), r); }
  1119. #[no_mangle]
  1120. pub unsafe fn instr_660F2F_mem(addr: i32, r: i32) {
  1121. instr_660F2F(return_on_pagefault!(safe_read64s(addr)), r)
  1122. }
  1123. #[no_mangle]
  1124. pub unsafe fn instr_0F30() {
  1125. // wrmsr - write maschine specific register
  1126. if 0 != *cpl {
  1127. trigger_gp(0);
  1128. return;
  1129. }
  1130. let index = read_reg32(ECX);
  1131. let low = read_reg32(EAX);
  1132. let high = read_reg32(EDX);
  1133. if index != IA32_SYSENTER_ESP {
  1134. dbg_log!("wrmsr ecx={:x} data={:x}:{:x}", index, high, low);
  1135. }
  1136. if index == IA32_SYSENTER_CS {
  1137. *sysenter_cs = low & 0xFFFF
  1138. }
  1139. else if index == IA32_SYSENTER_EIP {
  1140. *sysenter_eip = low
  1141. }
  1142. else if index == IA32_SYSENTER_ESP {
  1143. *sysenter_esp = low
  1144. }
  1145. else if index == MSR_IA32_FEAT_CTL {
  1146. // linux 5.x
  1147. }
  1148. else if index == MSR_TEST_CTRL {
  1149. // linux 5.x
  1150. }
  1151. else if index == IA32_APIC_BASE_MSR {
  1152. dbg_assert!(
  1153. high == 0,
  1154. ("Changing APIC address (high 32 bits) not supported")
  1155. );
  1156. let address = low & !(IA32_APIC_BASE_BSP | IA32_APIC_BASE_EXTD | IA32_APIC_BASE_EN);
  1157. dbg_assert!(
  1158. address == APIC_ADDRESS,
  1159. ("Changing APIC address not supported")
  1160. );
  1161. dbg_assert!(low & IA32_APIC_BASE_EXTD == 0, "x2apic not supported");
  1162. apic_enabled = low & IA32_APIC_BASE_EN == IA32_APIC_BASE_EN
  1163. }
  1164. else if index == IA32_TIME_STAMP_COUNTER {
  1165. set_tsc(low as u32, high as u32);
  1166. }
  1167. else if index == IA32_BIOS_SIGN_ID {
  1168. //
  1169. }
  1170. else if index == MSR_MISC_FEATURE_ENABLES {
  1171. // Linux 4, see: https://patchwork.kernel.org/patch/9528279/
  1172. }
  1173. else if index == IA32_MISC_ENABLE {
  1174. // Enable Misc. Processor Features
  1175. }
  1176. else if index == IA32_MCG_CAP {
  1177. // netbsd
  1178. }
  1179. else if index == IA32_KERNEL_GS_BASE {
  1180. // Only used in 64 bit mode (by SWAPGS), but set by kvm-unit-test
  1181. dbg_log!("GS Base written");
  1182. }
  1183. else {
  1184. dbg_log!("Unknown msr: {:x}", index);
  1185. dbg_assert!(false);
  1186. }
  1187. }
  1188. #[no_mangle]
  1189. pub unsafe fn instr_0F31() {
  1190. // rdtsc - read timestamp counter
  1191. if 0 == *cpl || 0 == *cr.offset(4) & CR4_TSD {
  1192. let tsc = read_tsc();
  1193. write_reg32(EAX, tsc as i32);
  1194. write_reg32(EDX, (tsc >> 32) as i32);
  1195. if false {
  1196. dbg_log!("rdtsc edx:eax={:x}:{:x}", read_reg32(EDX), read_reg32(EAX));
  1197. }
  1198. }
  1199. else {
  1200. trigger_gp(0);
  1201. };
  1202. }
  1203. #[no_mangle]
  1204. pub unsafe fn instr_0F32() {
  1205. // rdmsr - read maschine specific register
  1206. if 0 != *cpl {
  1207. trigger_gp(0);
  1208. return;
  1209. }
  1210. let index = read_reg32(ECX);
  1211. dbg_log!("rdmsr ecx={:x}", index);
  1212. let mut low: i32 = 0;
  1213. let mut high: i32 = 0;
  1214. if index == IA32_SYSENTER_CS {
  1215. low = *sysenter_cs
  1216. }
  1217. else if index == IA32_SYSENTER_EIP {
  1218. low = *sysenter_eip
  1219. }
  1220. else if index == IA32_SYSENTER_ESP {
  1221. low = *sysenter_esp
  1222. }
  1223. else if index == IA32_TIME_STAMP_COUNTER {
  1224. let tsc = read_tsc();
  1225. low = tsc as i32;
  1226. high = (tsc >> 32) as i32
  1227. }
  1228. else if index == MSR_IA32_FEAT_CTL {
  1229. // linux 5.x
  1230. }
  1231. else if index == MSR_TEST_CTRL {
  1232. // linux 5.x
  1233. }
  1234. else if index == IA32_PLATFORM_ID {
  1235. }
  1236. else if index == IA32_APIC_BASE_MSR {
  1237. if ::config::ENABLE_ACPI {
  1238. low = APIC_ADDRESS;
  1239. if apic_enabled {
  1240. low |= IA32_APIC_BASE_EN
  1241. }
  1242. }
  1243. }
  1244. else if index == IA32_BIOS_SIGN_ID {
  1245. }
  1246. else if index == MSR_PLATFORM_INFO {
  1247. low = 1 << 8
  1248. }
  1249. else if index == MSR_MISC_FEATURE_ENABLES {
  1250. }
  1251. else if index == IA32_MISC_ENABLE {
  1252. // Enable Misc. Processor Features
  1253. low = 1 << 0; // fast string
  1254. }
  1255. else if index == IA32_RTIT_CTL {
  1256. // linux4
  1257. }
  1258. else if index == MSR_SMI_COUNT {
  1259. }
  1260. else if index == IA32_MCG_CAP {
  1261. // netbsd
  1262. }
  1263. else if !(index == MSR_PKG_C2_RESIDENCY) {
  1264. dbg_log!("Unknown msr: {:x}", index);
  1265. dbg_assert!(false);
  1266. }
  1267. write_reg32(EAX, low);
  1268. write_reg32(EDX, high);
  1269. }
  1270. #[no_mangle]
  1271. pub unsafe fn instr_0F33() {
  1272. // rdpmc
  1273. undefined_instruction();
  1274. }
  1275. #[no_mangle]
  1276. pub unsafe fn instr_0F34() {
  1277. // sysenter
  1278. let seg = *sysenter_cs & 0xFFFC;
  1279. if !*protected_mode || seg == 0 {
  1280. trigger_gp(0);
  1281. return;
  1282. }
  1283. else {
  1284. *flags &= !FLAG_VM & !FLAG_INTERRUPT;
  1285. *instruction_pointer = *sysenter_eip;
  1286. write_reg32(ESP, *sysenter_esp);
  1287. *sreg.offset(CS as isize) = seg as u16;
  1288. *segment_is_null.offset(CS as isize) = false;
  1289. *segment_limits.offset(CS as isize) = -1i32 as u32;
  1290. *segment_offsets.offset(CS as isize) = 0;
  1291. update_cs_size(true);
  1292. *cpl = 0;
  1293. cpl_changed();
  1294. *sreg.offset(SS as isize) = (seg + 8) as u16;
  1295. *segment_is_null.offset(SS as isize) = false;
  1296. *segment_limits.offset(SS as isize) = -1i32 as u32;
  1297. *segment_offsets.offset(SS as isize) = 0;
  1298. *stack_size_32 = true;
  1299. return;
  1300. };
  1301. }
  1302. #[no_mangle]
  1303. pub unsafe fn instr_0F35() {
  1304. // sysexit
  1305. let seg = *sysenter_cs & 0xFFFC;
  1306. if !*protected_mode || 0 != *cpl || seg == 0 {
  1307. trigger_gp(0);
  1308. return;
  1309. }
  1310. else {
  1311. *instruction_pointer = read_reg32(EDX);
  1312. write_reg32(ESP, read_reg32(ECX));
  1313. *sreg.offset(CS as isize) = (seg + 16 | 3) as u16;
  1314. *segment_is_null.offset(CS as isize) = false;
  1315. *segment_limits.offset(CS as isize) = -1i32 as u32;
  1316. *segment_offsets.offset(CS as isize) = 0;
  1317. update_cs_size(true);
  1318. *cpl = 3;
  1319. cpl_changed();
  1320. *sreg.offset(SS as isize) = (seg + 24 | 3) as u16;
  1321. *segment_is_null.offset(SS as isize) = false;
  1322. *segment_limits.offset(SS as isize) = -1i32 as u32;
  1323. *segment_offsets.offset(SS as isize) = 0;
  1324. *stack_size_32 = true;
  1325. return;
  1326. };
  1327. }
  1328. #[no_mangle]
  1329. pub unsafe fn instr_0F36() { undefined_instruction(); }
  1330. #[no_mangle]
  1331. pub unsafe fn instr_0F37() {
  1332. // getsec
  1333. undefined_instruction();
  1334. }
  1335. #[no_mangle]
  1336. pub unsafe fn instr_0F38() { unimplemented_sse(); }
  1337. #[no_mangle]
  1338. pub unsafe fn instr_0F39() { unimplemented_sse(); }
  1339. #[no_mangle]
  1340. pub unsafe fn instr_0F3A() { unimplemented_sse(); }
  1341. #[no_mangle]
  1342. pub unsafe fn instr_0F3B() { unimplemented_sse(); }
  1343. #[no_mangle]
  1344. pub unsafe fn instr_0F3C() { unimplemented_sse(); }
  1345. #[no_mangle]
  1346. pub unsafe fn instr_0F3D() { unimplemented_sse(); }
  1347. #[no_mangle]
  1348. pub unsafe fn instr_0F3E() { unimplemented_sse(); }
  1349. #[no_mangle]
  1350. pub unsafe fn instr_0F3F() { unimplemented_sse(); }
  1351. pub unsafe fn instr16_0F40_mem(addr: i32, r: i32) {
  1352. cmovcc16(test_o(), return_on_pagefault!(safe_read16(addr)), r);
  1353. }
  1354. pub unsafe fn instr16_0F40_reg(r1: i32, r: i32) { cmovcc16(test_o(), read_reg16(r1), r); }
  1355. pub unsafe fn instr32_0F40_mem(addr: i32, r: i32) {
  1356. cmovcc32(test_o(), return_on_pagefault!(safe_read32s(addr)), r);
  1357. }
  1358. pub unsafe fn instr32_0F40_reg(r1: i32, r: i32) { cmovcc32(test_o(), read_reg32(r1), r); }
  1359. pub unsafe fn instr16_0F41_mem(addr: i32, r: i32) {
  1360. cmovcc16(!test_o(), return_on_pagefault!(safe_read16(addr)), r);
  1361. }
  1362. pub unsafe fn instr16_0F41_reg(r1: i32, r: i32) { cmovcc16(!test_o(), read_reg16(r1), r); }
  1363. pub unsafe fn instr32_0F41_mem(addr: i32, r: i32) {
  1364. cmovcc32(!test_o(), return_on_pagefault!(safe_read32s(addr)), r);
  1365. }
  1366. pub unsafe fn instr32_0F41_reg(r1: i32, r: i32) { cmovcc32(!test_o(), read_reg32(r1), r); }
  1367. pub unsafe fn instr16_0F42_mem(addr: i32, r: i32) {
  1368. cmovcc16(test_b(), return_on_pagefault!(safe_read16(addr)), r);
  1369. }
  1370. pub unsafe fn instr16_0F42_reg(r1: i32, r: i32) { cmovcc16(test_b(), read_reg16(r1), r); }
  1371. pub unsafe fn instr32_0F42_mem(addr: i32, r: i32) {
  1372. cmovcc32(test_b(), return_on_pagefault!(safe_read32s(addr)), r);
  1373. }
  1374. pub unsafe fn instr32_0F42_reg(r1: i32, r: i32) { cmovcc32(test_b(), read_reg32(r1), r); }
  1375. pub unsafe fn instr16_0F43_mem(addr: i32, r: i32) {
  1376. cmovcc16(!test_b(), return_on_pagefault!(safe_read16(addr)), r);
  1377. }
  1378. pub unsafe fn instr16_0F43_reg(r1: i32, r: i32) { cmovcc16(!test_b(), read_reg16(r1), r); }
  1379. pub unsafe fn instr32_0F43_mem(addr: i32, r: i32) {
  1380. cmovcc32(!test_b(), return_on_pagefault!(safe_read32s(addr)), r);
  1381. }
  1382. pub unsafe fn instr32_0F43_reg(r1: i32, r: i32) { cmovcc32(!test_b(), read_reg32(r1), r); }
  1383. pub unsafe fn instr16_0F44_mem(addr: i32, r: i32) {
  1384. cmovcc16(test_z(), return_on_pagefault!(safe_read16(addr)), r);
  1385. }
  1386. pub unsafe fn instr16_0F44_reg(r1: i32, r: i32) { cmovcc16(test_z(), read_reg16(r1), r); }
  1387. pub unsafe fn instr32_0F44_mem(addr: i32, r: i32) {
  1388. cmovcc32(test_z(), return_on_pagefault!(safe_read32s(addr)), r);
  1389. }
  1390. pub unsafe fn instr32_0F44_reg(r1: i32, r: i32) { cmovcc32(test_z(), read_reg32(r1), r); }
  1391. pub unsafe fn instr16_0F45_mem(addr: i32, r: i32) {
  1392. cmovcc16(!test_z(), return_on_pagefault!(safe_read16(addr)), r);
  1393. }
  1394. pub unsafe fn instr16_0F45_reg(r1: i32, r: i32) { cmovcc16(!test_z(), read_reg16(r1), r); }
  1395. pub unsafe fn instr32_0F45_mem(addr: i32, r: i32) {
  1396. cmovcc32(!test_z(), return_on_pagefault!(safe_read32s(addr)), r);
  1397. }
  1398. pub unsafe fn instr32_0F45_reg(r1: i32, r: i32) { cmovcc32(!test_z(), read_reg32(r1), r); }
  1399. pub unsafe fn instr16_0F46_mem(addr: i32, r: i32) {
  1400. cmovcc16(test_be(), return_on_pagefault!(safe_read16(addr)), r);
  1401. }
  1402. pub unsafe fn instr16_0F46_reg(r1: i32, r: i32) { cmovcc16(test_be(), read_reg16(r1), r); }
  1403. pub unsafe fn instr32_0F46_mem(addr: i32, r: i32) {
  1404. cmovcc32(test_be(), return_on_pagefault!(safe_read32s(addr)), r);
  1405. }
  1406. pub unsafe fn instr32_0F46_reg(r1: i32, r: i32) { cmovcc32(test_be(), read_reg32(r1), r); }
  1407. pub unsafe fn instr16_0F47_mem(addr: i32, r: i32) {
  1408. cmovcc16(!test_be(), return_on_pagefault!(safe_read16(addr)), r);
  1409. }
  1410. pub unsafe fn instr16_0F47_reg(r1: i32, r: i32) { cmovcc16(!test_be(), read_reg16(r1), r); }
  1411. pub unsafe fn instr32_0F47_mem(addr: i32, r: i32) {
  1412. cmovcc32(!test_be(), return_on_pagefault!(safe_read32s(addr)), r);
  1413. }
  1414. pub unsafe fn instr32_0F47_reg(r1: i32, r: i32) { cmovcc32(!test_be(), read_reg32(r1), r); }
  1415. pub unsafe fn instr16_0F48_mem(addr: i32, r: i32) {
  1416. cmovcc16(test_s(), return_on_pagefault!(safe_read16(addr)), r);
  1417. }
  1418. pub unsafe fn instr16_0F48_reg(r1: i32, r: i32) { cmovcc16(test_s(), read_reg16(r1), r); }
  1419. pub unsafe fn instr32_0F48_mem(addr: i32, r: i32) {
  1420. cmovcc32(test_s(), return_on_pagefault!(safe_read32s(addr)), r);
  1421. }
  1422. pub unsafe fn instr32_0F48_reg(r1: i32, r: i32) { cmovcc32(test_s(), read_reg32(r1), r); }
  1423. pub unsafe fn instr16_0F49_mem(addr: i32, r: i32) {
  1424. cmovcc16(!test_s(), return_on_pagefault!(safe_read16(addr)), r);
  1425. }
  1426. pub unsafe fn instr16_0F49_reg(r1: i32, r: i32) { cmovcc16(!test_s(), read_reg16(r1), r); }
  1427. pub unsafe fn instr32_0F49_mem(addr: i32, r: i32) {
  1428. cmovcc32(!test_s(), return_on_pagefault!(safe_read32s(addr)), r);
  1429. }
  1430. pub unsafe fn instr32_0F49_reg(r1: i32, r: i32) { cmovcc32(!test_s(), read_reg32(r1), r); }
  1431. pub unsafe fn instr16_0F4A_mem(addr: i32, r: i32) {
  1432. cmovcc16(test_p(), return_on_pagefault!(safe_read16(addr)), r);
  1433. }
  1434. pub unsafe fn instr16_0F4A_reg(r1: i32, r: i32) { cmovcc16(test_p(), read_reg16(r1), r); }
  1435. pub unsafe fn instr32_0F4A_mem(addr: i32, r: i32) {
  1436. cmovcc32(test_p(), return_on_pagefault!(safe_read32s(addr)), r);
  1437. }
  1438. pub unsafe fn instr32_0F4A_reg(r1: i32, r: i32) { cmovcc32(test_p(), read_reg32(r1), r); }
  1439. pub unsafe fn instr16_0F4B_mem(addr: i32, r: i32) {
  1440. cmovcc16(!test_p(), return_on_pagefault!(safe_read16(addr)), r);
  1441. }
  1442. pub unsafe fn instr16_0F4B_reg(r1: i32, r: i32) { cmovcc16(!test_p(), read_reg16(r1), r); }
  1443. pub unsafe fn instr32_0F4B_mem(addr: i32, r: i32) {
  1444. cmovcc32(!test_p(), return_on_pagefault!(safe_read32s(addr)), r);
  1445. }
  1446. pub unsafe fn instr32_0F4B_reg(r1: i32, r: i32) { cmovcc32(!test_p(), read_reg32(r1), r); }
  1447. pub unsafe fn instr16_0F4C_mem(addr: i32, r: i32) {
  1448. cmovcc16(test_l(), return_on_pagefault!(safe_read16(addr)), r);
  1449. }
  1450. pub unsafe fn instr16_0F4C_reg(r1: i32, r: i32) { cmovcc16(test_l(), read_reg16(r1), r); }
  1451. pub unsafe fn instr32_0F4C_mem(addr: i32, r: i32) {
  1452. cmovcc32(test_l(), return_on_pagefault!(safe_read32s(addr)), r);
  1453. }
  1454. pub unsafe fn instr32_0F4C_reg(r1: i32, r: i32) { cmovcc32(test_l(), read_reg32(r1), r); }
  1455. pub unsafe fn instr16_0F4D_mem(addr: i32, r: i32) {
  1456. cmovcc16(!test_l(), return_on_pagefault!(safe_read16(addr)), r);
  1457. }
  1458. pub unsafe fn instr16_0F4D_reg(r1: i32, r: i32) { cmovcc16(!test_l(), read_reg16(r1), r); }
  1459. pub unsafe fn instr32_0F4D_mem(addr: i32, r: i32) {
  1460. cmovcc32(!test_l(), return_on_pagefault!(safe_read32s(addr)), r);
  1461. }
  1462. pub unsafe fn instr32_0F4D_reg(r1: i32, r: i32) { cmovcc32(!test_l(), read_reg32(r1), r); }
  1463. pub unsafe fn instr16_0F4E_mem(addr: i32, r: i32) {
  1464. cmovcc16(test_le(), return_on_pagefault!(safe_read16(addr)), r);
  1465. }
  1466. pub unsafe fn instr16_0F4E_reg(r1: i32, r: i32) { cmovcc16(test_le(), read_reg16(r1), r); }
  1467. pub unsafe fn instr32_0F4E_mem(addr: i32, r: i32) {
  1468. cmovcc32(test_le(), return_on_pagefault!(safe_read32s(addr)), r);
  1469. }
  1470. pub unsafe fn instr32_0F4E_reg(r1: i32, r: i32) { cmovcc32(test_le(), read_reg32(r1), r); }
  1471. pub unsafe fn instr16_0F4F_mem(addr: i32, r: i32) {
  1472. cmovcc16(!test_le(), return_on_pagefault!(safe_read16(addr)), r);
  1473. }
  1474. pub unsafe fn instr16_0F4F_reg(r1: i32, r: i32) { cmovcc16(!test_le(), read_reg16(r1), r); }
  1475. pub unsafe fn instr32_0F4F_mem(addr: i32, r: i32) {
  1476. cmovcc32(!test_le(), return_on_pagefault!(safe_read32s(addr)), r);
  1477. }
  1478. pub unsafe fn instr32_0F4F_reg(r1: i32, r: i32) { cmovcc32(!test_le(), read_reg32(r1), r); }
  1479. #[no_mangle]
  1480. pub unsafe fn instr_0F50_reg(r1: i32, r2: i32) {
  1481. // movmskps r, xmm
  1482. let source = read_xmm128s(r1);
  1483. let data = (source.u32_0[0] >> 31
  1484. | source.u32_0[1] >> 31 << 1
  1485. | source.u32_0[2] >> 31 << 2
  1486. | source.u32_0[3] >> 31 << 3) as i32;
  1487. write_reg32(r2, data);
  1488. }
  1489. #[no_mangle]
  1490. pub unsafe fn instr_0F50_mem(addr: i32, r1: i32) { trigger_ud(); }
  1491. #[no_mangle]
  1492. pub unsafe fn instr_660F50_reg(r1: i32, r2: i32) {
  1493. // movmskpd r, xmm
  1494. let source = read_xmm128s(r1);
  1495. let data = (source.u32_0[1] >> 31 | source.u32_0[3] >> 31 << 1) as i32;
  1496. write_reg32(r2, data);
  1497. }
  1498. #[no_mangle]
  1499. pub unsafe fn instr_660F50_mem(addr: i32, r1: i32) { trigger_ud(); }
  1500. #[no_mangle]
  1501. pub unsafe fn instr_0F54(source: reg128, r: i32) {
  1502. // andps xmm, xmm/mem128
  1503. // XXX: Aligned access or #gp
  1504. pand_r128(source, r);
  1505. }
  1506. #[no_mangle]
  1507. pub unsafe fn instr_0F54_reg(r1: i32, r2: i32) { instr_0F54(read_xmm128s(r1), r2); }
  1508. #[no_mangle]
  1509. pub unsafe fn instr_0F54_mem(addr: i32, r: i32) {
  1510. instr_0F54(return_on_pagefault!(safe_read128s(addr)), r);
  1511. }
  1512. #[no_mangle]
  1513. pub unsafe fn instr_660F54(source: reg128, r: i32) {
  1514. // andpd xmm, xmm/mem128
  1515. // XXX: Aligned access or #gp
  1516. pand_r128(source, r);
  1517. }
  1518. #[no_mangle]
  1519. pub unsafe fn instr_660F54_reg(r1: i32, r2: i32) { instr_660F54(read_xmm128s(r1), r2); }
  1520. #[no_mangle]
  1521. pub unsafe fn instr_660F54_mem(addr: i32, r: i32) {
  1522. instr_660F54(return_on_pagefault!(safe_read128s(addr)), r);
  1523. }
  1524. #[no_mangle]
  1525. pub unsafe fn instr_0F55(source: reg128, r: i32) {
  1526. // andnps xmm, xmm/mem128
  1527. // XXX: Aligned access or #gp
  1528. pandn_r128(source, r);
  1529. }
  1530. #[no_mangle]
  1531. pub unsafe fn instr_0F55_reg(r1: i32, r2: i32) { instr_0F55(read_xmm128s(r1), r2); }
  1532. #[no_mangle]
  1533. pub unsafe fn instr_0F55_mem(addr: i32, r: i32) {
  1534. instr_0F55(return_on_pagefault!(safe_read128s(addr)), r);
  1535. }
  1536. #[no_mangle]
  1537. pub unsafe fn instr_660F55(source: reg128, r: i32) {
  1538. // andnpd xmm, xmm/mem128
  1539. // XXX: Aligned access or #gp
  1540. pandn_r128(source, r);
  1541. }
  1542. #[no_mangle]
  1543. pub unsafe fn instr_660F55_reg(r1: i32, r2: i32) { instr_660F55(read_xmm128s(r1), r2); }
  1544. #[no_mangle]
  1545. pub unsafe fn instr_660F55_mem(addr: i32, r: i32) {
  1546. instr_660F55(return_on_pagefault!(safe_read128s(addr)), r);
  1547. }
  1548. #[no_mangle]
  1549. pub unsafe fn instr_0F56(source: reg128, r: i32) {
  1550. // orps xmm, xmm/mem128
  1551. // XXX: Aligned access or #gp
  1552. por_r128(source, r);
  1553. }
  1554. #[no_mangle]
  1555. pub unsafe fn instr_0F56_reg(r1: i32, r2: i32) { instr_0F56(read_xmm128s(r1), r2); }
  1556. #[no_mangle]
  1557. pub unsafe fn instr_0F56_mem(addr: i32, r: i32) {
  1558. instr_0F56(return_on_pagefault!(safe_read128s(addr)), r);
  1559. }
  1560. #[no_mangle]
  1561. pub unsafe fn instr_660F56(source: reg128, r: i32) {
  1562. // orpd xmm, xmm/mem128
  1563. // XXX: Aligned access or #gp
  1564. por_r128(source, r);
  1565. }
  1566. #[no_mangle]
  1567. pub unsafe fn instr_660F56_reg(r1: i32, r2: i32) { instr_660F56(read_xmm128s(r1), r2); }
  1568. #[no_mangle]
  1569. pub unsafe fn instr_660F56_mem(addr: i32, r: i32) {
  1570. instr_660F56(return_on_pagefault!(safe_read128s(addr)), r);
  1571. }
  1572. #[no_mangle]
  1573. pub unsafe fn instr_0F57(source: reg128, r: i32) {
  1574. // xorps xmm, xmm/mem128
  1575. // XXX: Aligned access or #gp
  1576. pxor_r128(source, r);
  1577. }
  1578. #[no_mangle]
  1579. pub unsafe fn instr_0F57_reg(r1: i32, r2: i32) { instr_0F57(read_xmm128s(r1), r2); }
  1580. #[no_mangle]
  1581. pub unsafe fn instr_0F57_mem(addr: i32, r: i32) {
  1582. instr_0F57(return_on_pagefault!(safe_read128s(addr)), r);
  1583. }
  1584. #[no_mangle]
  1585. pub unsafe fn instr_660F57(source: reg128, r: i32) {
  1586. // xorpd xmm, xmm/mem128
  1587. // XXX: Aligned access or #gp
  1588. pxor_r128(source, r);
  1589. }
  1590. #[no_mangle]
  1591. pub unsafe fn instr_660F57_reg(r1: i32, r2: i32) { instr_660F57(read_xmm128s(r1), r2); }
  1592. #[no_mangle]
  1593. pub unsafe fn instr_660F57_mem(addr: i32, r: i32) {
  1594. instr_660F57(return_on_pagefault!(safe_read128s(addr)), r);
  1595. }
  1596. #[no_mangle]
  1597. pub unsafe fn instr_0F60(source: i32, r: i32) {
  1598. // punpcklbw mm, mm/m32
  1599. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  1600. let source: [u8; 4] = std::mem::transmute(source);
  1601. let mut result = [0; 8];
  1602. for i in 0..4 {
  1603. result[2 * i + 0] = destination[i];
  1604. result[2 * i + 1] = source[i];
  1605. }
  1606. write_mmx_reg64(r, std::mem::transmute(result));
  1607. transition_fpu_to_mmx();
  1608. }
  1609. #[no_mangle]
  1610. pub unsafe fn instr_0F60_reg(r1: i32, r2: i32) { instr_0F60(read_mmx32s(r1), r2); }
  1611. #[no_mangle]
  1612. pub unsafe fn instr_0F60_mem(addr: i32, r: i32) {
  1613. instr_0F60(return_on_pagefault!(safe_read32s(addr)), r);
  1614. }
  1615. #[no_mangle]
  1616. pub unsafe fn instr_660F60(source: reg128, r: i32) {
  1617. // punpcklbw xmm, xmm/m128
  1618. // XXX: Aligned access or #gp
  1619. let destination: [u8; 8] = std::mem::transmute(read_xmm64s(r));
  1620. let mut result = reg128 { i8_0: [0; 16] };
  1621. for i in 0..8 {
  1622. result.u8_0[2 * i + 0] = destination[i];
  1623. result.u8_0[2 * i + 1] = source.u8_0[i];
  1624. }
  1625. write_xmm_reg128(r, result);
  1626. }
  1627. #[no_mangle]
  1628. pub unsafe fn instr_660F60_reg(r1: i32, r2: i32) { instr_660F60(read_xmm128s(r1), r2); }
  1629. #[no_mangle]
  1630. pub unsafe fn instr_660F60_mem(addr: i32, r: i32) {
  1631. instr_660F60(return_on_pagefault!(safe_read128s(addr)), r);
  1632. }
  1633. #[no_mangle]
  1634. pub unsafe fn instr_0F61(source: i32, r: i32) {
  1635. // punpcklwd mm, mm/m32
  1636. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  1637. let source: [u16; 2] = std::mem::transmute(source);
  1638. let mut result = [0; 4];
  1639. for i in 0..2 {
  1640. result[2 * i + 0] = destination[i];
  1641. result[2 * i + 1] = source[i];
  1642. }
  1643. write_mmx_reg64(r, std::mem::transmute(result));
  1644. transition_fpu_to_mmx();
  1645. }
  1646. #[no_mangle]
  1647. pub unsafe fn instr_0F61_reg(r1: i32, r2: i32) { instr_0F61(read_mmx32s(r1), r2); }
  1648. #[no_mangle]
  1649. pub unsafe fn instr_0F61_mem(addr: i32, r: i32) {
  1650. instr_0F61(return_on_pagefault!(safe_read32s(addr)), r);
  1651. }
  1652. #[no_mangle]
  1653. pub unsafe fn instr_660F61(source: reg128, r: i32) {
  1654. // punpcklwd xmm, xmm/m128
  1655. // XXX: Aligned access or #gp
  1656. let destination: [u16; 4] = std::mem::transmute(read_xmm64s(r));
  1657. let mut result = reg128 { i8_0: [0; 16] };
  1658. for i in 0..4 {
  1659. result.u16_0[2 * i + 0] = destination[i];
  1660. result.u16_0[2 * i + 1] = source.u16_0[i];
  1661. }
  1662. write_xmm_reg128(r, result);
  1663. }
  1664. #[no_mangle]
  1665. pub unsafe fn instr_660F61_reg(r1: i32, r2: i32) { instr_660F61(read_xmm128s(r1), r2); }
  1666. #[no_mangle]
  1667. pub unsafe fn instr_660F61_mem(addr: i32, r: i32) {
  1668. instr_660F61(return_on_pagefault!(safe_read128s(addr)), r);
  1669. }
  1670. #[no_mangle]
  1671. pub unsafe fn instr_0F62(source: i32, r: i32) {
  1672. // punpckldq mm, mm/m32
  1673. let destination = read_mmx64s(r);
  1674. write_mmx_reg64(
  1675. r,
  1676. (destination & 0xFFFF_FFFF) | (source as u32 as u64) << 32,
  1677. );
  1678. transition_fpu_to_mmx();
  1679. }
  1680. #[no_mangle]
  1681. pub unsafe fn instr_0F62_reg(r1: i32, r2: i32) { instr_0F62(read_mmx32s(r1), r2); }
  1682. #[no_mangle]
  1683. pub unsafe fn instr_0F62_mem(addr: i32, r: i32) {
  1684. instr_0F62(return_on_pagefault!(safe_read32s(addr)), r);
  1685. }
  1686. #[no_mangle]
  1687. pub unsafe fn instr_660F62(source: reg128, r: i32) {
  1688. // punpckldq xmm, xmm/m128
  1689. // XXX: Aligned access or #gp
  1690. let destination = read_xmm128s(r);
  1691. write_xmm128(
  1692. r,
  1693. destination.u32_0[0] as i32,
  1694. source.u32_0[0] as i32,
  1695. destination.u32_0[1] as i32,
  1696. source.u32_0[1] as i32,
  1697. );
  1698. }
  1699. #[no_mangle]
  1700. pub unsafe fn instr_660F62_reg(r1: i32, r2: i32) { instr_660F62(read_xmm128s(r1), r2); }
  1701. #[no_mangle]
  1702. pub unsafe fn instr_660F62_mem(addr: i32, r: i32) {
  1703. instr_660F62(return_on_pagefault!(safe_read128s(addr)), r);
  1704. }
  1705. #[no_mangle]
  1706. pub unsafe fn instr_0F63(source: u64, r: i32) {
  1707. // packsswb mm, mm/m64
  1708. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  1709. let source: [u16; 4] = std::mem::transmute(source);
  1710. let mut result: [u8; 8] = [0; 8];
  1711. for i in 0..4 {
  1712. result[i + 0] = saturate_sw_to_sb(destination[i] as i32);
  1713. result[i + 4] = saturate_sw_to_sb(source[i] as i32);
  1714. }
  1715. write_mmx_reg64(r, std::mem::transmute(result));
  1716. transition_fpu_to_mmx();
  1717. }
  1718. #[no_mangle]
  1719. pub unsafe fn instr_0F63_reg(r1: i32, r2: i32) { instr_0F63(read_mmx64s(r1), r2); }
  1720. #[no_mangle]
  1721. pub unsafe fn instr_0F63_mem(addr: i32, r: i32) {
  1722. instr_0F63(return_on_pagefault!(safe_read64s(addr)), r);
  1723. }
  1724. #[no_mangle]
  1725. pub unsafe fn instr_660F63(source: reg128, r: i32) {
  1726. // packsswb xmm, xmm/m128
  1727. // XXX: Aligned access or #gp
  1728. let destination = read_xmm128s(r);
  1729. let mut result = reg128 { i8_0: [0; 16] };
  1730. for i in 0..8 {
  1731. result.u8_0[i + 0] = saturate_sw_to_sb(destination.u16_0[i] as i32);
  1732. result.u8_0[i + 8] = saturate_sw_to_sb(source.u16_0[i] as i32);
  1733. }
  1734. write_xmm_reg128(r, result)
  1735. }
  1736. #[no_mangle]
  1737. pub unsafe fn instr_660F63_reg(r1: i32, r2: i32) { instr_660F63(read_xmm128s(r1), r2); }
  1738. #[no_mangle]
  1739. pub unsafe fn instr_660F63_mem(addr: i32, r: i32) {
  1740. instr_660F63(return_on_pagefault!(safe_read128s(addr)), r);
  1741. }
  1742. #[no_mangle]
  1743. pub unsafe fn instr_0F64(source: u64, r: i32) {
  1744. // pcmpgtb mm, mm/m64
  1745. let destination: [i8; 8] = std::mem::transmute(read_mmx64s(r));
  1746. let source: [i8; 8] = std::mem::transmute(source);
  1747. let mut result: [u8; 8] = [0; 8];
  1748. for i in 0..8 {
  1749. result[i] = if destination[i] > source[i] { 255 } else { 0 };
  1750. }
  1751. write_mmx_reg64(r, std::mem::transmute(result));
  1752. transition_fpu_to_mmx();
  1753. }
  1754. #[no_mangle]
  1755. pub unsafe fn instr_0F64_reg(r1: i32, r2: i32) { instr_0F64(read_mmx64s(r1), r2); }
  1756. #[no_mangle]
  1757. pub unsafe fn instr_0F64_mem(addr: i32, r: i32) {
  1758. instr_0F64(return_on_pagefault!(safe_read64s(addr)), r);
  1759. }
  1760. #[no_mangle]
  1761. pub unsafe fn instr_660F64(source: reg128, r: i32) {
  1762. // pcmpgtb xmm, xmm/m128
  1763. // XXX: Aligned access or #gp
  1764. let destination = read_xmm128s(r);
  1765. let mut result = reg128 { i8_0: [0; 16] };
  1766. for i in 0..16 {
  1767. result.u8_0[i] = if destination.i8_0[i] as i32 > source.i8_0[i] as i32 { 255 } else { 0 };
  1768. }
  1769. write_xmm_reg128(r, result);
  1770. }
  1771. #[no_mangle]
  1772. pub unsafe fn instr_660F64_reg(r1: i32, r2: i32) { instr_660F64(read_xmm128s(r1), r2); }
  1773. #[no_mangle]
  1774. pub unsafe fn instr_660F64_mem(addr: i32, r: i32) {
  1775. instr_660F64(return_on_pagefault!(safe_read128s(addr)), r);
  1776. }
  1777. #[no_mangle]
  1778. pub unsafe fn instr_0F65(source: u64, r: i32) {
  1779. // pcmpgtw mm, mm/m64
  1780. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  1781. let source: [i16; 4] = std::mem::transmute(source);
  1782. let mut result: [u16; 4] = [0; 4];
  1783. for i in 0..4 {
  1784. result[i] = if destination[i] > source[i] { 0xFFFF } else { 0 }
  1785. }
  1786. write_mmx_reg64(r, std::mem::transmute(result));
  1787. transition_fpu_to_mmx();
  1788. }
  1789. #[no_mangle]
  1790. pub unsafe fn instr_0F65_reg(r1: i32, r2: i32) { instr_0F65(read_mmx64s(r1), r2); }
  1791. #[no_mangle]
  1792. pub unsafe fn instr_0F65_mem(addr: i32, r: i32) {
  1793. instr_0F65(return_on_pagefault!(safe_read64s(addr)), r);
  1794. }
  1795. #[no_mangle]
  1796. pub unsafe fn instr_660F65(source: reg128, r: i32) {
  1797. // pcmpgtw xmm, xmm/m128
  1798. // XXX: Aligned access or #gp
  1799. let destination = read_xmm128s(r);
  1800. let mut result = reg128 { i8_0: [0; 16] };
  1801. for i in 0..8 {
  1802. result.u16_0[i] = if destination.i16_0[i] > source.i16_0[i] { 0xFFFF } else { 0 };
  1803. }
  1804. write_xmm_reg128(r, result);
  1805. }
  1806. #[no_mangle]
  1807. pub unsafe fn instr_660F65_reg(r1: i32, r2: i32) { instr_660F65(read_xmm128s(r1), r2); }
  1808. #[no_mangle]
  1809. pub unsafe fn instr_660F65_mem(addr: i32, r: i32) {
  1810. instr_660F65(return_on_pagefault!(safe_read128s(addr)), r);
  1811. }
  1812. #[no_mangle]
  1813. pub unsafe fn instr_0F66(source: u64, r: i32) {
  1814. // pcmpgtd mm, mm/m64
  1815. let destination: [i32; 2] = std::mem::transmute(read_mmx64s(r));
  1816. let source: [i32; 2] = std::mem::transmute(source);
  1817. let mut result = [0; 2];
  1818. for i in 0..2 {
  1819. result[i] = if destination[i] > source[i] { -1 } else { 0 }
  1820. }
  1821. write_mmx_reg64(r, std::mem::transmute(result));
  1822. transition_fpu_to_mmx();
  1823. }
  1824. #[no_mangle]
  1825. pub unsafe fn instr_0F66_reg(r1: i32, r2: i32) { instr_0F66(read_mmx64s(r1), r2); }
  1826. #[no_mangle]
  1827. pub unsafe fn instr_0F66_mem(addr: i32, r: i32) {
  1828. instr_0F66(return_on_pagefault!(safe_read64s(addr)), r);
  1829. }
  1830. #[no_mangle]
  1831. pub unsafe fn instr_660F66(source: reg128, r: i32) {
  1832. // pcmpgtd xmm, xmm/m128
  1833. // XXX: Aligned access or #gp
  1834. let destination = read_xmm128s(r);
  1835. write_xmm128(
  1836. r,
  1837. if destination.i32_0[0] > source.i32_0[0] { -1 } else { 0 },
  1838. if destination.i32_0[1] > source.i32_0[1] { -1 } else { 0 },
  1839. if destination.i32_0[2] > source.i32_0[2] { -1 } else { 0 },
  1840. if destination.i32_0[3] > source.i32_0[3] { -1 } else { 0 },
  1841. );
  1842. }
  1843. #[no_mangle]
  1844. pub unsafe fn instr_660F66_reg(r1: i32, r2: i32) { instr_660F66(read_xmm128s(r1), r2); }
  1845. #[no_mangle]
  1846. pub unsafe fn instr_660F66_mem(addr: i32, r: i32) {
  1847. instr_660F66(return_on_pagefault!(safe_read128s(addr)), r);
  1848. }
  1849. #[no_mangle]
  1850. pub unsafe fn instr_0F67(source: u64, r: i32) {
  1851. // packuswb mm, mm/m64
  1852. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  1853. let source: [u16; 4] = std::mem::transmute(source);
  1854. let mut result = [0; 8];
  1855. for i in 0..4 {
  1856. result[i + 0] = saturate_sw_to_ub(destination[i]);
  1857. result[i + 4] = saturate_sw_to_ub(source[i]);
  1858. }
  1859. write_mmx_reg64(r, std::mem::transmute(result));
  1860. transition_fpu_to_mmx();
  1861. }
  1862. #[no_mangle]
  1863. pub unsafe fn instr_0F67_reg(r1: i32, r2: i32) { instr_0F67(read_mmx64s(r1), r2); }
  1864. #[no_mangle]
  1865. pub unsafe fn instr_0F67_mem(addr: i32, r: i32) {
  1866. instr_0F67(return_on_pagefault!(safe_read64s(addr)), r);
  1867. }
  1868. #[no_mangle]
  1869. pub unsafe fn instr_660F67(source: reg128, r: i32) {
  1870. // packuswb xmm, xmm/m128
  1871. // XXX: Aligned access or #gp
  1872. let destination = read_xmm128s(r);
  1873. let mut result = reg128 { i8_0: [0; 16] };
  1874. for i in 0..8 {
  1875. result.u8_0[i + 0] = saturate_sw_to_ub(destination.u16_0[i]);
  1876. result.u8_0[i + 8] = saturate_sw_to_ub(source.u16_0[i]);
  1877. }
  1878. write_xmm_reg128(r, result);
  1879. }
  1880. #[no_mangle]
  1881. pub unsafe fn instr_660F67_reg(r1: i32, r2: i32) { instr_660F67(read_xmm128s(r1), r2); }
  1882. #[no_mangle]
  1883. pub unsafe fn instr_660F67_mem(addr: i32, r: i32) {
  1884. instr_660F67(return_on_pagefault!(safe_read128s(addr)), r);
  1885. }
  1886. #[no_mangle]
  1887. pub unsafe fn instr_0F68(source: u64, r: i32) {
  1888. // punpckhbw mm, mm/m64
  1889. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  1890. let source: [u8; 8] = std::mem::transmute(source);
  1891. let mut result: [u8; 8] = [0; 8];
  1892. for i in 0..4 {
  1893. result[2 * i + 0] = destination[i + 4];
  1894. result[2 * i + 1] = source[i + 4];
  1895. }
  1896. write_mmx_reg64(r, std::mem::transmute(result));
  1897. transition_fpu_to_mmx();
  1898. }
  1899. #[no_mangle]
  1900. pub unsafe fn instr_0F68_reg(r1: i32, r2: i32) { instr_0F68(read_mmx64s(r1), r2); }
  1901. #[no_mangle]
  1902. pub unsafe fn instr_0F68_mem(addr: i32, r: i32) {
  1903. instr_0F68(return_on_pagefault!(safe_read64s(addr)), r);
  1904. }
  1905. #[no_mangle]
  1906. pub unsafe fn instr_660F68(source: reg128, r: i32) {
  1907. // punpckhbw xmm, xmm/m128
  1908. // XXX: Aligned access or #gp
  1909. let destination = read_xmm128s(r);
  1910. let mut result = reg128 { i8_0: [0; 16] };
  1911. for i in 0..8 {
  1912. result.u8_0[2 * i + 0] = destination.u8_0[i + 8];
  1913. result.u8_0[2 * i + 1] = source.u8_0[i + 8];
  1914. }
  1915. write_xmm_reg128(r, result);
  1916. }
  1917. #[no_mangle]
  1918. pub unsafe fn instr_660F68_reg(r1: i32, r2: i32) { instr_660F68(read_xmm128s(r1), r2); }
  1919. #[no_mangle]
  1920. pub unsafe fn instr_660F68_mem(addr: i32, r: i32) {
  1921. instr_660F68(return_on_pagefault!(safe_read128s(addr)), r);
  1922. }
  1923. #[no_mangle]
  1924. pub unsafe fn instr_0F69(source: u64, r: i32) {
  1925. // punpckhwd mm, mm/m64
  1926. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  1927. let source: [u16; 4] = std::mem::transmute(source);
  1928. let result = [destination[2], source[2], destination[3], source[3]];
  1929. write_mmx_reg64(r, std::mem::transmute(result));
  1930. transition_fpu_to_mmx();
  1931. }
  1932. #[no_mangle]
  1933. pub unsafe fn instr_0F69_reg(r1: i32, r2: i32) { instr_0F69(read_mmx64s(r1), r2); }
  1934. #[no_mangle]
  1935. pub unsafe fn instr_0F69_mem(addr: i32, r: i32) {
  1936. instr_0F69(return_on_pagefault!(safe_read64s(addr)), r);
  1937. }
  1938. #[no_mangle]
  1939. pub unsafe fn instr_660F69(source: reg128, r: i32) {
  1940. // punpckhwd xmm, xmm/m128
  1941. // XXX: Aligned access or #gp
  1942. let destination = read_xmm128s(r);
  1943. let mut result = reg128 { i8_0: [0; 16] };
  1944. for i in 0..4 {
  1945. result.u16_0[2 * i + 0] = destination.u16_0[i + 4];
  1946. result.u16_0[2 * i + 1] = source.u16_0[i + 4];
  1947. }
  1948. write_xmm_reg128(r, result);
  1949. }
  1950. #[no_mangle]
  1951. pub unsafe fn instr_660F69_reg(r1: i32, r2: i32) { instr_660F69(read_xmm128s(r1), r2); }
  1952. #[no_mangle]
  1953. pub unsafe fn instr_660F69_mem(addr: i32, r: i32) {
  1954. instr_660F69(return_on_pagefault!(safe_read128s(addr)), r);
  1955. }
  1956. #[no_mangle]
  1957. pub unsafe fn instr_0F6A(source: u64, r: i32) {
  1958. // punpckhdq mm, mm/m64
  1959. let destination = read_mmx64s(r);
  1960. write_mmx_reg64(r, (destination >> 32) | (source >> 32 << 32));
  1961. transition_fpu_to_mmx();
  1962. }
  1963. #[no_mangle]
  1964. pub unsafe fn instr_0F6A_reg(r1: i32, r2: i32) { instr_0F6A(read_mmx64s(r1), r2); }
  1965. #[no_mangle]
  1966. pub unsafe fn instr_0F6A_mem(addr: i32, r: i32) {
  1967. instr_0F6A(return_on_pagefault!(safe_read64s(addr)), r);
  1968. }
  1969. #[no_mangle]
  1970. pub unsafe fn instr_660F6A(source: reg128, r: i32) {
  1971. // punpckhdq xmm, xmm/m128
  1972. // XXX: Aligned access or #gp
  1973. let destination = read_xmm128s(r);
  1974. write_xmm128(
  1975. r,
  1976. destination.u32_0[2] as i32,
  1977. source.u32_0[2] as i32,
  1978. destination.u32_0[3] as i32,
  1979. source.u32_0[3] as i32,
  1980. );
  1981. }
  1982. #[no_mangle]
  1983. pub unsafe fn instr_660F6A_reg(r1: i32, r2: i32) { instr_660F6A(read_xmm128s(r1), r2); }
  1984. #[no_mangle]
  1985. pub unsafe fn instr_660F6A_mem(addr: i32, r: i32) {
  1986. instr_660F6A(return_on_pagefault!(safe_read128s(addr)), r);
  1987. }
  1988. #[no_mangle]
  1989. pub unsafe fn instr_0F6B(source: u64, r: i32) {
  1990. // packssdw mm, mm/m64
  1991. let destination: [u32; 2] = std::mem::transmute(read_mmx64s(r));
  1992. let source: [u32; 2] = std::mem::transmute(source);
  1993. let mut result = [0; 4];
  1994. for i in 0..2 {
  1995. result[i + 0] = saturate_sd_to_sw(destination[i]);
  1996. result[i + 2] = saturate_sd_to_sw(source[i]);
  1997. }
  1998. write_mmx_reg64(r, std::mem::transmute(result));
  1999. transition_fpu_to_mmx();
  2000. }
  2001. #[no_mangle]
  2002. pub unsafe fn instr_0F6B_reg(r1: i32, r2: i32) { instr_0F6B(read_mmx64s(r1), r2); }
  2003. #[no_mangle]
  2004. pub unsafe fn instr_0F6B_mem(addr: i32, r: i32) {
  2005. instr_0F6B(return_on_pagefault!(safe_read64s(addr)), r);
  2006. }
  2007. #[no_mangle]
  2008. pub unsafe fn instr_660F6B(source: reg128, r: i32) {
  2009. // packssdw xmm, xmm/m128
  2010. // XXX: Aligned access or #gp
  2011. let destination = read_xmm128s(r);
  2012. let mut result = reg128 { i8_0: [0; 16] };
  2013. for i in 0..4 {
  2014. result.u16_0[i + 0] = saturate_sd_to_sw(destination.u32_0[i]);
  2015. result.u16_0[i + 4] = saturate_sd_to_sw(source.u32_0[i]);
  2016. }
  2017. write_xmm_reg128(r, result);
  2018. }
  2019. #[no_mangle]
  2020. pub unsafe fn instr_660F6B_reg(r1: i32, r2: i32) { instr_660F6B(read_xmm128s(r1), r2); }
  2021. #[no_mangle]
  2022. pub unsafe fn instr_660F6B_mem(addr: i32, r: i32) {
  2023. instr_660F6B(return_on_pagefault!(safe_read128s(addr)), r);
  2024. }
  2025. #[no_mangle]
  2026. pub unsafe fn instr_0F6C_mem(addr: i32, r: i32) { trigger_ud(); }
  2027. #[no_mangle]
  2028. pub unsafe fn instr_0F6C_reg(r1: i32, r2: i32) { trigger_ud(); }
  2029. #[no_mangle]
  2030. pub unsafe fn instr_660F6C(source: reg128, r: i32) {
  2031. // punpcklqdq xmm, xmm/m128
  2032. // XXX: Aligned access or #gp
  2033. let destination = read_xmm128s(r);
  2034. write_xmm128(
  2035. r,
  2036. destination.u32_0[0] as i32,
  2037. destination.u32_0[1] as i32,
  2038. source.u32_0[0] as i32,
  2039. source.u32_0[1] as i32,
  2040. );
  2041. }
  2042. #[no_mangle]
  2043. pub unsafe fn instr_660F6C_reg(r1: i32, r2: i32) { instr_660F6C(read_xmm128s(r1), r2); }
  2044. #[no_mangle]
  2045. pub unsafe fn instr_660F6C_mem(addr: i32, r: i32) {
  2046. instr_660F6C(return_on_pagefault!(safe_read128s(addr)), r);
  2047. }
  2048. #[no_mangle]
  2049. pub unsafe fn instr_0F6D_mem(addr: i32, r: i32) { trigger_ud(); }
  2050. #[no_mangle]
  2051. pub unsafe fn instr_0F6D_reg(r1: i32, r2: i32) { trigger_ud(); }
  2052. #[no_mangle]
  2053. pub unsafe fn instr_660F6D(source: reg128, r: i32) {
  2054. // punpckhqdq xmm, xmm/m128
  2055. // XXX: Aligned access or #gp
  2056. let destination = read_xmm128s(r);
  2057. write_xmm128(
  2058. r,
  2059. destination.u32_0[2] as i32,
  2060. destination.u32_0[3] as i32,
  2061. source.u32_0[2] as i32,
  2062. source.u32_0[3] as i32,
  2063. );
  2064. }
  2065. #[no_mangle]
  2066. pub unsafe fn instr_660F6D_reg(r1: i32, r2: i32) { instr_660F6D(read_xmm128s(r1), r2); }
  2067. #[no_mangle]
  2068. pub unsafe fn instr_660F6D_mem(addr: i32, r: i32) {
  2069. instr_660F6D(return_on_pagefault!(safe_read128s(addr)), r);
  2070. }
  2071. #[no_mangle]
  2072. pub unsafe fn instr_0F6E(source: i32, r: i32) {
  2073. // movd mm, r/m32
  2074. write_mmx_reg64(r, source as u32 as u64);
  2075. transition_fpu_to_mmx();
  2076. }
  2077. #[no_mangle]
  2078. pub unsafe fn instr_0F6E_reg(r1: i32, r2: i32) { instr_0F6E(read_reg32(r1), r2); }
  2079. #[no_mangle]
  2080. pub unsafe fn instr_0F6E_mem(addr: i32, r: i32) {
  2081. instr_0F6E(return_on_pagefault!(safe_read32s(addr)), r);
  2082. }
  2083. #[no_mangle]
  2084. pub unsafe fn instr_660F6E(source: i32, r: i32) {
  2085. // movd mm, r/m32
  2086. write_xmm128(r, source, 0, 0, 0);
  2087. }
  2088. #[no_mangle]
  2089. pub unsafe fn instr_660F6E_reg(r1: i32, r2: i32) { instr_660F6E(read_reg32(r1), r2); }
  2090. #[no_mangle]
  2091. pub unsafe fn instr_660F6E_mem(addr: i32, r: i32) {
  2092. instr_660F6E(return_on_pagefault!(safe_read32s(addr)), r);
  2093. }
  2094. #[no_mangle]
  2095. pub unsafe fn instr_0F6F(source: u64, r: i32) {
  2096. // movq mm, mm/m64
  2097. write_mmx_reg64(r, source);
  2098. transition_fpu_to_mmx();
  2099. }
  2100. #[no_mangle]
  2101. pub unsafe fn instr_0F6F_reg(r1: i32, r2: i32) { instr_0F6F(read_mmx64s(r1), r2); }
  2102. #[no_mangle]
  2103. pub unsafe fn instr_0F6F_mem(addr: i32, r: i32) {
  2104. instr_0F6F(return_on_pagefault!(safe_read64s(addr)), r);
  2105. }
  2106. #[no_mangle]
  2107. pub unsafe fn instr_660F6F(source: reg128, r: i32) {
  2108. // movdqa xmm, xmm/mem128
  2109. // XXX: Aligned access or #gp
  2110. // XXX: Aligned read or #gp
  2111. mov_rm_r128(source, r);
  2112. }
  2113. #[no_mangle]
  2114. pub unsafe fn instr_660F6F_reg(r1: i32, r2: i32) { instr_660F6F(read_xmm128s(r1), r2); }
  2115. #[no_mangle]
  2116. pub unsafe fn instr_660F6F_mem(addr: i32, r: i32) {
  2117. instr_660F6F(return_on_pagefault!(safe_read128s(addr)), r);
  2118. }
  2119. #[no_mangle]
  2120. pub unsafe fn instr_F30F6F(source: reg128, r: i32) {
  2121. // movdqu xmm, xmm/m128
  2122. mov_rm_r128(source, r);
  2123. }
  2124. #[no_mangle]
  2125. pub unsafe fn instr_F30F6F_reg(r1: i32, r2: i32) { instr_F30F6F(read_xmm128s(r1), r2); }
  2126. #[no_mangle]
  2127. pub unsafe fn instr_F30F6F_mem(addr: i32, r: i32) {
  2128. instr_F30F6F(return_on_pagefault!(safe_read128s(addr)), r);
  2129. }
  2130. #[no_mangle]
  2131. pub unsafe fn instr_0F70(source: u64, r: i32, imm8: i32) {
  2132. // pshufw mm1, mm2/m64, imm8
  2133. let source: [u16; 4] = std::mem::transmute(source);
  2134. let mut result = [0; 4];
  2135. for i in 0..4 {
  2136. result[i] = source[(imm8 >> (2 * i) & 3) as usize]
  2137. }
  2138. write_mmx_reg64(r, std::mem::transmute(result));
  2139. transition_fpu_to_mmx();
  2140. }
  2141. #[no_mangle]
  2142. pub unsafe fn instr_0F70_reg(r1: i32, r2: i32, imm: i32) { instr_0F70(read_mmx64s(r1), r2, imm); }
  2143. #[no_mangle]
  2144. pub unsafe fn instr_0F70_mem(addr: i32, r: i32, imm: i32) {
  2145. instr_0F70(return_on_pagefault!(safe_read64s(addr)), r, imm);
  2146. }
  2147. #[no_mangle]
  2148. pub unsafe fn instr_660F70(source: reg128, r: i32, imm8: i32) {
  2149. // pshufd xmm, xmm/mem128, imm8
  2150. // XXX: Aligned access or #gp
  2151. write_xmm128(
  2152. r,
  2153. source.u32_0[(imm8 & 3) as usize] as i32,
  2154. source.u32_0[(imm8 >> 2 & 3) as usize] as i32,
  2155. source.u32_0[(imm8 >> 4 & 3) as usize] as i32,
  2156. source.u32_0[(imm8 >> 6 & 3) as usize] as i32,
  2157. );
  2158. }
  2159. #[no_mangle]
  2160. pub unsafe fn instr_660F70_reg(r1: i32, r2: i32, imm: i32) {
  2161. instr_660F70(read_xmm128s(r1), r2, imm);
  2162. }
  2163. #[no_mangle]
  2164. pub unsafe fn instr_660F70_mem(addr: i32, r: i32, imm: i32) {
  2165. instr_660F70(return_on_pagefault!(safe_read128s(addr)), r, imm);
  2166. }
  2167. #[no_mangle]
  2168. pub unsafe fn instr_F20F70(source: reg128, r: i32, imm8: i32) {
  2169. // pshuflw xmm, xmm/m128, imm8
  2170. // XXX: Aligned access or #gp
  2171. write_xmm128(
  2172. r,
  2173. source.u16_0[(imm8 & 3) as usize] as i32
  2174. | (source.u16_0[(imm8 >> 2 & 3) as usize] as i32) << 16,
  2175. source.u16_0[(imm8 >> 4 & 3) as usize] as i32
  2176. | (source.u16_0[(imm8 >> 6 & 3) as usize] as i32) << 16,
  2177. source.u32_0[2] as i32,
  2178. source.u32_0[3] as i32,
  2179. );
  2180. }
  2181. #[no_mangle]
  2182. pub unsafe fn instr_F20F70_reg(r1: i32, r2: i32, imm: i32) {
  2183. instr_F20F70(read_xmm128s(r1), r2, imm);
  2184. }
  2185. #[no_mangle]
  2186. pub unsafe fn instr_F20F70_mem(addr: i32, r: i32, imm: i32) {
  2187. instr_F20F70(return_on_pagefault!(safe_read128s(addr)), r, imm);
  2188. }
  2189. #[no_mangle]
  2190. pub unsafe fn instr_F30F70(source: reg128, r: i32, imm8: i32) {
  2191. // pshufhw xmm, xmm/m128, imm8
  2192. // XXX: Aligned access or #gp
  2193. write_xmm128(
  2194. r,
  2195. source.u32_0[0] as i32,
  2196. source.u32_0[1] as i32,
  2197. source.u16_0[(imm8 & 3 | 4) as usize] as i32
  2198. | (source.u16_0[(imm8 >> 2 & 3 | 4) as usize] as i32) << 16,
  2199. source.u16_0[(imm8 >> 4 & 3 | 4) as usize] as i32
  2200. | (source.u16_0[(imm8 >> 6 & 3 | 4) as usize] as i32) << 16,
  2201. );
  2202. }
  2203. #[no_mangle]
  2204. pub unsafe fn instr_F30F70_reg(r1: i32, r2: i32, imm: i32) {
  2205. instr_F30F70(read_xmm128s(r1), r2, imm);
  2206. }
  2207. #[no_mangle]
  2208. pub unsafe fn instr_F30F70_mem(addr: i32, r: i32, imm: i32) {
  2209. instr_F30F70(return_on_pagefault!(safe_read128s(addr)), r, imm);
  2210. }
  2211. #[no_mangle]
  2212. pub unsafe fn instr_0F71_2_mem(addr: i32, r: i32) { trigger_ud(); }
  2213. #[no_mangle]
  2214. pub unsafe fn instr_0F71_4_mem(addr: i32, r: i32) { trigger_ud(); }
  2215. #[no_mangle]
  2216. pub unsafe fn instr_0F71_6_mem(addr: i32, r: i32) { trigger_ud(); }
  2217. #[no_mangle]
  2218. pub unsafe fn instr_0F71_2_reg(r: i32, imm8: i32) {
  2219. // psrlw mm, imm8
  2220. psrlw_r64(r, imm8 as u64);
  2221. }
  2222. #[no_mangle]
  2223. pub unsafe fn instr_0F71_4_reg(r: i32, imm8: i32) {
  2224. // psraw mm, imm8
  2225. psraw_r64(r, imm8 as u64);
  2226. }
  2227. #[no_mangle]
  2228. pub unsafe fn instr_0F71_6_reg(r: i32, imm8: i32) {
  2229. // psllw mm, imm8
  2230. psllw_r64(r, imm8 as u64);
  2231. }
  2232. #[no_mangle]
  2233. pub unsafe fn instr_660F71_2_mem(addr: i32, r: i32) { trigger_ud(); }
  2234. #[no_mangle]
  2235. pub unsafe fn instr_660F71_4_mem(addr: i32, r: i32) { trigger_ud(); }
  2236. #[no_mangle]
  2237. pub unsafe fn instr_660F71_6_mem(addr: i32, r: i32) { trigger_ud(); }
  2238. #[no_mangle]
  2239. pub unsafe fn instr_660F71_2_reg(r: i32, imm8: i32) {
  2240. // psrlw xmm, imm8
  2241. psrlw_r128(r, imm8 as u64);
  2242. }
  2243. #[no_mangle]
  2244. pub unsafe fn instr_660F71_4_reg(r: i32, imm8: i32) {
  2245. // psraw xmm, imm8
  2246. psraw_r128(r, imm8 as u64);
  2247. }
  2248. #[no_mangle]
  2249. pub unsafe fn instr_660F71_6_reg(r: i32, imm8: i32) {
  2250. // psllw xmm, imm8
  2251. psllw_r128(r, imm8 as u64);
  2252. }
  2253. #[no_mangle]
  2254. pub unsafe fn instr_0F72_2_mem(addr: i32, r: i32) { trigger_ud(); }
  2255. #[no_mangle]
  2256. pub unsafe fn instr_0F72_4_mem(addr: i32, r: i32) { trigger_ud(); }
  2257. #[no_mangle]
  2258. pub unsafe fn instr_0F72_6_mem(addr: i32, r: i32) { trigger_ud(); }
  2259. #[no_mangle]
  2260. pub unsafe fn instr_0F72_2_reg(r: i32, imm8: i32) {
  2261. // psrld mm, imm8
  2262. psrld_r64(r, imm8 as u64);
  2263. }
  2264. #[no_mangle]
  2265. pub unsafe fn instr_0F72_4_reg(r: i32, imm8: i32) {
  2266. // psrad mm, imm8
  2267. psrad_r64(r, imm8 as u64);
  2268. }
  2269. #[no_mangle]
  2270. pub unsafe fn instr_0F72_6_reg(r: i32, imm8: i32) {
  2271. // pslld mm, imm8
  2272. pslld_r64(r, imm8 as u64);
  2273. }
  2274. #[no_mangle]
  2275. pub unsafe fn instr_660F72_2_mem(addr: i32, r: i32) { trigger_ud(); }
  2276. #[no_mangle]
  2277. pub unsafe fn instr_660F72_4_mem(addr: i32, r: i32) { trigger_ud(); }
  2278. #[no_mangle]
  2279. pub unsafe fn instr_660F72_6_mem(addr: i32, r: i32) { trigger_ud(); }
  2280. #[no_mangle]
  2281. pub unsafe fn instr_660F72_2_reg(r: i32, imm8: i32) {
  2282. // psrld xmm, imm8
  2283. psrld_r128(r, imm8 as u64);
  2284. }
  2285. #[no_mangle]
  2286. pub unsafe fn instr_660F72_4_reg(r: i32, imm8: i32) {
  2287. // psrad xmm, imm8
  2288. psrad_r128(r, imm8 as u64);
  2289. }
  2290. #[no_mangle]
  2291. pub unsafe fn instr_660F72_6_reg(r: i32, imm8: i32) {
  2292. // pslld xmm, imm8
  2293. pslld_r128(r, imm8 as u64);
  2294. }
  2295. #[no_mangle]
  2296. pub unsafe fn instr_0F73_2_mem(addr: i32, r: i32) { trigger_ud(); }
  2297. #[no_mangle]
  2298. pub unsafe fn instr_0F73_6_mem(addr: i32, r: i32) { trigger_ud(); }
  2299. #[no_mangle]
  2300. pub unsafe fn instr_0F73_2_reg(r: i32, imm8: i32) {
  2301. // psrlq mm, imm8
  2302. psrlq_r64(r, imm8 as u64);
  2303. }
  2304. #[no_mangle]
  2305. pub unsafe fn instr_0F73_6_reg(r: i32, imm8: i32) {
  2306. // psllq mm, imm8
  2307. psllq_r64(r, imm8 as u64);
  2308. }
  2309. #[no_mangle]
  2310. pub unsafe fn instr_660F73_2_mem(addr: i32, r: i32) { trigger_ud(); }
  2311. #[no_mangle]
  2312. pub unsafe fn instr_660F73_3_mem(addr: i32, r: i32) { trigger_ud(); }
  2313. #[no_mangle]
  2314. pub unsafe fn instr_660F73_6_mem(addr: i32, r: i32) { trigger_ud(); }
  2315. #[no_mangle]
  2316. pub unsafe fn instr_660F73_7_mem(addr: i32, r: i32) { trigger_ud(); }
  2317. #[no_mangle]
  2318. pub unsafe fn instr_660F73_2_reg(r: i32, imm8: i32) {
  2319. // psrlq xmm, imm8
  2320. psrlq_r128(r, imm8 as u64);
  2321. }
  2322. #[no_mangle]
  2323. pub unsafe fn instr_660F73_3_reg(r: i32, imm8: i32) {
  2324. // psrldq xmm, imm8
  2325. let destination = read_xmm128s(r);
  2326. let mut result = reg128 { i8_0: [0; 16] };
  2327. if imm8 == 0 {
  2328. return;
  2329. }
  2330. let shift = (if imm8 > 15 { 128 } else { imm8 << 3 }) as u32;
  2331. if shift <= 63 {
  2332. result.u64_0[0] = destination.u64_0[0] >> shift | destination.u64_0[1] << (64 - shift);
  2333. result.u64_0[1] = destination.u64_0[1] >> shift
  2334. }
  2335. else if shift <= 127 {
  2336. result.u64_0[0] = destination.u64_0[1] >> shift.wrapping_sub(64);
  2337. result.u64_0[1] = 0
  2338. }
  2339. write_xmm_reg128(r, result);
  2340. }
  2341. #[no_mangle]
  2342. pub unsafe fn instr_660F73_6_reg(r: i32, imm8: i32) {
  2343. // psllq xmm, imm8
  2344. psllq_r128(r, imm8 as u64);
  2345. }
  2346. #[no_mangle]
  2347. pub unsafe fn instr_660F73_7_reg(r: i32, imm8: i32) {
  2348. // pslldq xmm, imm8
  2349. if imm8 == 0 {
  2350. return;
  2351. }
  2352. let destination = read_xmm128s(r);
  2353. let mut result = reg128 { i8_0: [0; 16] };
  2354. let shift = (if imm8 > 15 { 128 } else { imm8 << 3 }) as u32;
  2355. if shift <= 63 {
  2356. result.u64_0[0] = destination.u64_0[0] << shift;
  2357. result.u64_0[1] = destination.u64_0[1] << shift | destination.u64_0[0] >> (64 - shift)
  2358. }
  2359. else if shift <= 127 {
  2360. result.u64_0[0] = 0;
  2361. result.u64_0[1] = destination.u64_0[0] << shift.wrapping_sub(64)
  2362. }
  2363. write_xmm_reg128(r, result);
  2364. }
  2365. #[no_mangle]
  2366. pub unsafe fn instr_0F74(source: u64, r: i32) {
  2367. // pcmpeqb mm, mm/m64
  2368. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  2369. let source: [u8; 8] = std::mem::transmute(source);
  2370. let mut result: [u8; 8] = [0; 8];
  2371. for i in 0..8 {
  2372. result[i] = if destination[i] == source[i] { 255 } else { 0 };
  2373. }
  2374. write_mmx_reg64(r, std::mem::transmute(result));
  2375. transition_fpu_to_mmx();
  2376. }
  2377. #[no_mangle]
  2378. pub unsafe fn instr_0F74_reg(r1: i32, r2: i32) { instr_0F74(read_mmx64s(r1), r2); }
  2379. #[no_mangle]
  2380. pub unsafe fn instr_0F74_mem(addr: i32, r: i32) {
  2381. instr_0F74(return_on_pagefault!(safe_read64s(addr)), r);
  2382. }
  2383. #[no_mangle]
  2384. pub unsafe fn instr_660F74(source: reg128, r: i32) {
  2385. // pcmpeqb xmm, xmm/m128
  2386. // XXX: Aligned access or #gp
  2387. let destination = read_xmm128s(r);
  2388. let mut result = reg128 { i8_0: [0; 16] };
  2389. for i in 0..16 {
  2390. result.u8_0[i] = if source.u8_0[i] == destination.u8_0[i] { 255 } else { 0 }
  2391. }
  2392. write_xmm_reg128(r, result);
  2393. }
  2394. #[no_mangle]
  2395. pub unsafe fn instr_660F74_reg(r1: i32, r2: i32) { instr_660F74(read_xmm128s(r1), r2); }
  2396. #[no_mangle]
  2397. pub unsafe fn instr_660F74_mem(addr: i32, r: i32) {
  2398. instr_660F74(return_on_pagefault!(safe_read128s(addr)), r);
  2399. }
  2400. #[no_mangle]
  2401. pub unsafe fn instr_0F75(source: u64, r: i32) {
  2402. // pcmpeqw mm, mm/m64
  2403. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  2404. let source: [i16; 4] = std::mem::transmute(source);
  2405. let mut result: [u16; 4] = [0; 4];
  2406. for i in 0..4 {
  2407. result[i] = if destination[i] == source[i] { 0xFFFF } else { 0 };
  2408. }
  2409. write_mmx_reg64(r, std::mem::transmute(result));
  2410. transition_fpu_to_mmx();
  2411. }
  2412. #[no_mangle]
  2413. pub unsafe fn instr_0F75_reg(r1: i32, r2: i32) { instr_0F75(read_mmx64s(r1), r2); }
  2414. #[no_mangle]
  2415. pub unsafe fn instr_0F75_mem(addr: i32, r: i32) {
  2416. instr_0F75(return_on_pagefault!(safe_read64s(addr)), r);
  2417. }
  2418. #[no_mangle]
  2419. pub unsafe fn instr_660F75(source: reg128, r: i32) {
  2420. // pcmpeqw xmm, xmm/m128
  2421. // XXX: Aligned access or #gp
  2422. let destination = read_xmm128s(r);
  2423. let mut result = reg128 { i8_0: [0; 16] };
  2424. for i in 0..8 {
  2425. result.u16_0[i] =
  2426. (if source.u16_0[i] as i32 == destination.u16_0[i] as i32 { 0xFFFF } else { 0 }) as u16;
  2427. }
  2428. write_xmm_reg128(r, result);
  2429. }
  2430. #[no_mangle]
  2431. pub unsafe fn instr_660F75_reg(r1: i32, r2: i32) { instr_660F75(read_xmm128s(r1), r2); }
  2432. #[no_mangle]
  2433. pub unsafe fn instr_660F75_mem(addr: i32, r: i32) {
  2434. instr_660F75(return_on_pagefault!(safe_read128s(addr)), r);
  2435. }
  2436. #[no_mangle]
  2437. pub unsafe fn instr_0F76(source: u64, r: i32) {
  2438. // pcmpeqd mm, mm/m64
  2439. let destination: [i32; 2] = std::mem::transmute(read_mmx64s(r));
  2440. let source: [i32; 2] = std::mem::transmute(source);
  2441. let mut result = [0; 2];
  2442. for i in 0..2 {
  2443. result[i] = if destination[i] == source[i] { -1 } else { 0 }
  2444. }
  2445. write_mmx_reg64(r, std::mem::transmute(result));
  2446. transition_fpu_to_mmx();
  2447. }
  2448. #[no_mangle]
  2449. pub unsafe fn instr_0F76_reg(r1: i32, r2: i32) { instr_0F76(read_mmx64s(r1), r2); }
  2450. #[no_mangle]
  2451. pub unsafe fn instr_0F76_mem(addr: i32, r: i32) {
  2452. instr_0F76(return_on_pagefault!(safe_read64s(addr)), r);
  2453. }
  2454. #[no_mangle]
  2455. pub unsafe fn instr_660F76(source: reg128, r: i32) {
  2456. // pcmpeqd xmm, xmm/m128
  2457. // XXX: Aligned access or #gp
  2458. let destination = read_xmm128s(r);
  2459. let mut result = reg128 { i8_0: [0; 16] };
  2460. for i in 0..4 {
  2461. result.i32_0[i] = if source.u32_0[i] == destination.u32_0[i] { -1 } else { 0 }
  2462. }
  2463. write_xmm_reg128(r, result);
  2464. }
  2465. #[no_mangle]
  2466. pub unsafe fn instr_660F76_reg(r1: i32, r2: i32) { instr_660F76(read_xmm128s(r1), r2); }
  2467. #[no_mangle]
  2468. pub unsafe fn instr_660F76_mem(addr: i32, r: i32) {
  2469. instr_660F76(return_on_pagefault!(safe_read128s(addr)), r);
  2470. }
  2471. #[no_mangle]
  2472. pub unsafe fn instr_0F77() {
  2473. // emms
  2474. fpu_set_tag_word(0xFFFF);
  2475. }
  2476. #[no_mangle]
  2477. pub unsafe fn instr_0F78() { unimplemented_sse(); }
  2478. #[no_mangle]
  2479. pub unsafe fn instr_0F79() { unimplemented_sse(); }
  2480. #[no_mangle]
  2481. pub unsafe fn instr_0F7A() { unimplemented_sse(); }
  2482. #[no_mangle]
  2483. pub unsafe fn instr_0F7B() { unimplemented_sse(); }
  2484. #[no_mangle]
  2485. pub unsafe fn instr_0F7C() { unimplemented_sse(); }
  2486. #[no_mangle]
  2487. pub unsafe fn instr_0F7D() { unimplemented_sse(); }
  2488. #[no_mangle]
  2489. pub unsafe fn instr_0F7E(r: i32) -> i32 {
  2490. // movd r/m32, mm
  2491. let data = read_mmx64s(r);
  2492. transition_fpu_to_mmx();
  2493. return data as i32;
  2494. }
  2495. #[no_mangle]
  2496. pub unsafe fn instr_0F7E_reg(r1: i32, r2: i32) { write_reg32(r1, instr_0F7E(r2)); }
  2497. #[no_mangle]
  2498. pub unsafe fn instr_0F7E_mem(addr: i32, r: i32) {
  2499. return_on_pagefault!(safe_write32(addr, instr_0F7E(r)));
  2500. }
  2501. #[no_mangle]
  2502. pub unsafe fn instr_660F7E(r: i32) -> i32 {
  2503. // movd r/m32, xmm
  2504. let data = read_xmm64s(r);
  2505. return data as i32;
  2506. }
  2507. #[no_mangle]
  2508. pub unsafe fn instr_660F7E_reg(r1: i32, r2: i32) { write_reg32(r1, instr_660F7E(r2)); }
  2509. #[no_mangle]
  2510. pub unsafe fn instr_660F7E_mem(addr: i32, r: i32) {
  2511. return_on_pagefault!(safe_write32(addr, instr_660F7E(r)));
  2512. }
  2513. #[no_mangle]
  2514. pub unsafe fn instr_F30F7E_mem(addr: i32, r: i32) {
  2515. // movq xmm, xmm/mem64
  2516. let data = return_on_pagefault!(safe_read64s(addr));
  2517. write_xmm128_2(r, data, 0);
  2518. }
  2519. #[no_mangle]
  2520. pub unsafe fn instr_F30F7E_reg(r1: i32, r2: i32) {
  2521. // movq xmm, xmm/mem64
  2522. write_xmm128_2(r2, read_xmm64s(r1), 0);
  2523. }
  2524. #[no_mangle]
  2525. pub unsafe fn instr_0F7F(r: i32) -> u64 {
  2526. // movq mm/m64, mm
  2527. transition_fpu_to_mmx();
  2528. read_mmx64s(r)
  2529. }
  2530. #[no_mangle]
  2531. pub unsafe fn instr_0F7F_mem(addr: i32, r: i32) {
  2532. // movq mm/m64, mm
  2533. mov_r_m64(addr, r);
  2534. }
  2535. #[no_mangle]
  2536. pub unsafe fn instr_0F7F_reg(r1: i32, r2: i32) {
  2537. // movq mm/m64, mm
  2538. write_mmx_reg64(r1, read_mmx64s(r2));
  2539. transition_fpu_to_mmx();
  2540. }
  2541. #[no_mangle]
  2542. pub unsafe fn instr_660F7F_mem(addr: i32, r: i32) {
  2543. // movdqa xmm/m128, xmm
  2544. // XXX: Aligned write or #gp
  2545. mov_r_m128(addr, r);
  2546. }
  2547. #[no_mangle]
  2548. pub unsafe fn instr_660F7F_reg(r1: i32, r2: i32) {
  2549. // movdqa xmm/m128, xmm
  2550. // XXX: Aligned access or #gp
  2551. mov_r_r128(r1, r2);
  2552. }
  2553. #[no_mangle]
  2554. pub unsafe fn instr_F30F7F_mem(addr: i32, r: i32) {
  2555. // movdqu xmm/m128, xmm
  2556. mov_r_m128(addr, r);
  2557. }
  2558. #[no_mangle]
  2559. pub unsafe fn instr_F30F7F_reg(r1: i32, r2: i32) {
  2560. // movdqu xmm/m128, xmm
  2561. mov_r_r128(r1, r2);
  2562. }
  2563. pub unsafe fn instr16_0F80(imm: i32) { jmpcc16(test_o(), imm); }
  2564. pub unsafe fn instr32_0F80(imm: i32) { jmpcc32(test_o(), imm); }
  2565. pub unsafe fn instr16_0F81(imm: i32) { jmpcc16(!test_o(), imm); }
  2566. pub unsafe fn instr32_0F81(imm: i32) { jmpcc32(!test_o(), imm); }
  2567. pub unsafe fn instr16_0F82(imm: i32) { jmpcc16(test_b(), imm); }
  2568. pub unsafe fn instr32_0F82(imm: i32) { jmpcc32(test_b(), imm); }
  2569. pub unsafe fn instr16_0F83(imm: i32) { jmpcc16(!test_b(), imm); }
  2570. pub unsafe fn instr32_0F83(imm: i32) { jmpcc32(!test_b(), imm); }
  2571. pub unsafe fn instr16_0F84(imm: i32) { jmpcc16(test_z(), imm); }
  2572. pub unsafe fn instr32_0F84(imm: i32) { jmpcc32(test_z(), imm); }
  2573. pub unsafe fn instr16_0F85(imm: i32) { jmpcc16(!test_z(), imm); }
  2574. pub unsafe fn instr32_0F85(imm: i32) { jmpcc32(!test_z(), imm); }
  2575. pub unsafe fn instr16_0F86(imm: i32) { jmpcc16(test_be(), imm); }
  2576. pub unsafe fn instr32_0F86(imm: i32) { jmpcc32(test_be(), imm); }
  2577. pub unsafe fn instr16_0F87(imm: i32) { jmpcc16(!test_be(), imm); }
  2578. pub unsafe fn instr32_0F87(imm: i32) { jmpcc32(!test_be(), imm); }
  2579. pub unsafe fn instr16_0F88(imm: i32) { jmpcc16(test_s(), imm); }
  2580. pub unsafe fn instr32_0F88(imm: i32) { jmpcc32(test_s(), imm); }
  2581. pub unsafe fn instr16_0F89(imm: i32) { jmpcc16(!test_s(), imm); }
  2582. pub unsafe fn instr32_0F89(imm: i32) { jmpcc32(!test_s(), imm); }
  2583. pub unsafe fn instr16_0F8A(imm: i32) { jmpcc16(test_p(), imm); }
  2584. pub unsafe fn instr32_0F8A(imm: i32) { jmpcc32(test_p(), imm); }
  2585. pub unsafe fn instr16_0F8B(imm: i32) { jmpcc16(!test_p(), imm); }
  2586. pub unsafe fn instr32_0F8B(imm: i32) { jmpcc32(!test_p(), imm); }
  2587. pub unsafe fn instr16_0F8C(imm: i32) { jmpcc16(test_l(), imm); }
  2588. pub unsafe fn instr32_0F8C(imm: i32) { jmpcc32(test_l(), imm); }
  2589. pub unsafe fn instr16_0F8D(imm: i32) { jmpcc16(!test_l(), imm); }
  2590. pub unsafe fn instr32_0F8D(imm: i32) { jmpcc32(!test_l(), imm); }
  2591. pub unsafe fn instr16_0F8E(imm: i32) { jmpcc16(test_le(), imm); }
  2592. pub unsafe fn instr32_0F8E(imm: i32) { jmpcc32(test_le(), imm); }
  2593. pub unsafe fn instr16_0F8F(imm: i32) { jmpcc16(!test_le(), imm); }
  2594. pub unsafe fn instr32_0F8F(imm: i32) { jmpcc32(!test_le(), imm); }
  2595. pub unsafe fn instr_0F90_reg(r: i32, unused: i32) { setcc_reg(test_o(), r); }
  2596. pub unsafe fn instr_0F91_reg(r: i32, unused: i32) { setcc_reg(!test_o(), r); }
  2597. pub unsafe fn instr_0F92_reg(r: i32, unused: i32) { setcc_reg(test_b(), r); }
  2598. pub unsafe fn instr_0F93_reg(r: i32, unused: i32) { setcc_reg(!test_b(), r); }
  2599. pub unsafe fn instr_0F94_reg(r: i32, unused: i32) { setcc_reg(test_z(), r); }
  2600. pub unsafe fn instr_0F95_reg(r: i32, unused: i32) { setcc_reg(!test_z(), r); }
  2601. pub unsafe fn instr_0F96_reg(r: i32, unused: i32) { setcc_reg(test_be(), r); }
  2602. pub unsafe fn instr_0F97_reg(r: i32, unused: i32) { setcc_reg(!test_be(), r); }
  2603. pub unsafe fn instr_0F98_reg(r: i32, unused: i32) { setcc_reg(test_s(), r); }
  2604. pub unsafe fn instr_0F99_reg(r: i32, unused: i32) { setcc_reg(!test_s(), r); }
  2605. pub unsafe fn instr_0F9A_reg(r: i32, unused: i32) { setcc_reg(test_p(), r); }
  2606. pub unsafe fn instr_0F9B_reg(r: i32, unused: i32) { setcc_reg(!test_p(), r); }
  2607. pub unsafe fn instr_0F9C_reg(r: i32, unused: i32) { setcc_reg(test_l(), r); }
  2608. pub unsafe fn instr_0F9D_reg(r: i32, unused: i32) { setcc_reg(!test_l(), r); }
  2609. pub unsafe fn instr_0F9E_reg(r: i32, unused: i32) { setcc_reg(test_le(), r); }
  2610. pub unsafe fn instr_0F9F_reg(r: i32, unused: i32) { setcc_reg(!test_le(), r); }
  2611. pub unsafe fn instr_0F90_mem(addr: i32, unused: i32) { setcc_mem(test_o(), addr); }
  2612. pub unsafe fn instr_0F91_mem(addr: i32, unused: i32) { setcc_mem(!test_o(), addr); }
  2613. pub unsafe fn instr_0F92_mem(addr: i32, unused: i32) { setcc_mem(test_b(), addr); }
  2614. pub unsafe fn instr_0F93_mem(addr: i32, unused: i32) { setcc_mem(!test_b(), addr); }
  2615. pub unsafe fn instr_0F94_mem(addr: i32, unused: i32) { setcc_mem(test_z(), addr); }
  2616. pub unsafe fn instr_0F95_mem(addr: i32, unused: i32) { setcc_mem(!test_z(), addr); }
  2617. pub unsafe fn instr_0F96_mem(addr: i32, unused: i32) { setcc_mem(test_be(), addr); }
  2618. pub unsafe fn instr_0F97_mem(addr: i32, unused: i32) { setcc_mem(!test_be(), addr); }
  2619. pub unsafe fn instr_0F98_mem(addr: i32, unused: i32) { setcc_mem(test_s(), addr); }
  2620. pub unsafe fn instr_0F99_mem(addr: i32, unused: i32) { setcc_mem(!test_s(), addr); }
  2621. pub unsafe fn instr_0F9A_mem(addr: i32, unused: i32) { setcc_mem(test_p(), addr); }
  2622. pub unsafe fn instr_0F9B_mem(addr: i32, unused: i32) { setcc_mem(!test_p(), addr); }
  2623. pub unsafe fn instr_0F9C_mem(addr: i32, unused: i32) { setcc_mem(test_l(), addr); }
  2624. pub unsafe fn instr_0F9D_mem(addr: i32, unused: i32) { setcc_mem(!test_l(), addr); }
  2625. pub unsafe fn instr_0F9E_mem(addr: i32, unused: i32) { setcc_mem(test_le(), addr); }
  2626. pub unsafe fn instr_0F9F_mem(addr: i32, unused: i32) { setcc_mem(!test_le(), addr); }
  2627. #[no_mangle]
  2628. pub unsafe fn instr16_0FA0() {
  2629. return_on_pagefault!(push16(*sreg.offset(FS as isize) as i32));
  2630. }
  2631. #[no_mangle]
  2632. pub unsafe fn instr32_0FA0() {
  2633. return_on_pagefault!(push32(*sreg.offset(FS as isize) as i32));
  2634. }
  2635. #[no_mangle]
  2636. pub unsafe fn instr16_0FA1() {
  2637. if !switch_seg(FS, return_on_pagefault!(safe_read16(get_stack_pointer(0)))) {
  2638. return;
  2639. }
  2640. else {
  2641. adjust_stack_reg(2);
  2642. return;
  2643. };
  2644. }
  2645. #[no_mangle]
  2646. pub unsafe fn instr32_0FA1() {
  2647. if !switch_seg(
  2648. FS,
  2649. return_on_pagefault!(safe_read32s(get_stack_pointer(0))) & 0xFFFF,
  2650. ) {
  2651. return;
  2652. }
  2653. else {
  2654. adjust_stack_reg(4);
  2655. return;
  2656. };
  2657. }
  2658. #[no_mangle]
  2659. pub unsafe fn instr_0FA2() { cpuid(); }
  2660. #[no_mangle]
  2661. pub unsafe fn instr16_0FA3_reg(r1: i32, r2: i32) { bt_reg(read_reg16(r1), read_reg16(r2) & 15); }
  2662. #[no_mangle]
  2663. pub unsafe fn instr16_0FA3_mem(addr: i32, r: i32) { bt_mem(addr, read_reg16(r) << 16 >> 16); }
  2664. #[no_mangle]
  2665. pub unsafe fn instr32_0FA3_reg(r1: i32, r2: i32) { bt_reg(read_reg32(r1), read_reg32(r2) & 31); }
  2666. #[no_mangle]
  2667. pub unsafe fn instr32_0FA3_mem(addr: i32, r: i32) { bt_mem(addr, read_reg32(r)); }
  2668. #[no_mangle]
  2669. pub unsafe fn instr16_0FA4_mem(addr: i32, r: i32, imm: i32) {
  2670. SAFE_READ_WRITE16!(___, addr, shld16(___, read_reg16(r), imm & 31));
  2671. }
  2672. #[no_mangle]
  2673. pub unsafe fn instr16_0FA4_reg(r1: i32, r: i32, imm: i32) {
  2674. write_reg16(r1, shld16(read_reg16(r1), read_reg16(r), imm & 31));
  2675. }
  2676. #[no_mangle]
  2677. pub unsafe fn instr32_0FA4_mem(addr: i32, r: i32, imm: i32) {
  2678. SAFE_READ_WRITE32!(___, addr, shld32(___, read_reg32(r), imm & 31));
  2679. }
  2680. #[no_mangle]
  2681. pub unsafe fn instr32_0FA4_reg(r1: i32, r: i32, imm: i32) {
  2682. write_reg32(r1, shld32(read_reg32(r1), read_reg32(r), imm & 31));
  2683. }
  2684. #[no_mangle]
  2685. pub unsafe fn instr16_0FA5_mem(addr: i32, r: i32) {
  2686. SAFE_READ_WRITE16!(___, addr, shld16(___, read_reg16(r), read_reg8(CL) & 31));
  2687. }
  2688. #[no_mangle]
  2689. pub unsafe fn instr16_0FA5_reg(r1: i32, r: i32) {
  2690. write_reg16(
  2691. r1,
  2692. shld16(read_reg16(r1), read_reg16(r), read_reg8(CL) & 31),
  2693. );
  2694. }
  2695. #[no_mangle]
  2696. pub unsafe fn instr32_0FA5_mem(addr: i32, r: i32) {
  2697. SAFE_READ_WRITE32!(___, addr, shld32(___, read_reg32(r), read_reg8(CL) & 31));
  2698. }
  2699. #[no_mangle]
  2700. pub unsafe fn instr32_0FA5_reg(r1: i32, r: i32) {
  2701. write_reg32(
  2702. r1,
  2703. shld32(read_reg32(r1), read_reg32(r), read_reg8(CL) & 31),
  2704. );
  2705. }
  2706. #[no_mangle]
  2707. pub unsafe fn instr_0FA6() {
  2708. // obsolete cmpxchg (os/2)
  2709. trigger_ud();
  2710. }
  2711. #[no_mangle]
  2712. pub unsafe fn instr_0FA7() { undefined_instruction(); }
  2713. #[no_mangle]
  2714. pub unsafe fn instr16_0FA8() {
  2715. return_on_pagefault!(push16(*sreg.offset(GS as isize) as i32));
  2716. }
  2717. #[no_mangle]
  2718. pub unsafe fn instr32_0FA8() {
  2719. return_on_pagefault!(push32(*sreg.offset(GS as isize) as i32));
  2720. }
  2721. #[no_mangle]
  2722. pub unsafe fn instr16_0FA9() {
  2723. if !switch_seg(GS, return_on_pagefault!(safe_read16(get_stack_pointer(0)))) {
  2724. return;
  2725. }
  2726. else {
  2727. adjust_stack_reg(2);
  2728. return;
  2729. };
  2730. }
  2731. #[no_mangle]
  2732. pub unsafe fn instr32_0FA9() {
  2733. if !switch_seg(
  2734. GS,
  2735. return_on_pagefault!(safe_read32s(get_stack_pointer(0))) & 0xFFFF,
  2736. ) {
  2737. return;
  2738. }
  2739. else {
  2740. adjust_stack_reg(4);
  2741. return;
  2742. };
  2743. }
  2744. #[no_mangle]
  2745. pub unsafe fn instr_0FAA() {
  2746. // rsm
  2747. undefined_instruction();
  2748. }
  2749. #[no_mangle]
  2750. pub unsafe fn instr16_0FAB_reg(r1: i32, r2: i32) {
  2751. write_reg16(r1, bts_reg(read_reg16(r1), read_reg16(r2) & 15));
  2752. }
  2753. #[no_mangle]
  2754. pub unsafe fn instr16_0FAB_mem(addr: i32, r: i32) { bts_mem(addr, read_reg16(r) << 16 >> 16); }
  2755. #[no_mangle]
  2756. pub unsafe fn instr32_0FAB_reg(r1: i32, r2: i32) {
  2757. write_reg32(r1, bts_reg(read_reg32(r1), read_reg32(r2) & 31));
  2758. }
  2759. #[no_mangle]
  2760. pub unsafe fn instr32_0FAB_mem(addr: i32, r: i32) { bts_mem(addr, read_reg32(r)); }
  2761. #[no_mangle]
  2762. pub unsafe fn instr16_0FAC_mem(addr: i32, r: i32, imm: i32) {
  2763. SAFE_READ_WRITE16!(___, addr, shrd16(___, read_reg16(r), imm & 31));
  2764. }
  2765. #[no_mangle]
  2766. pub unsafe fn instr16_0FAC_reg(r1: i32, r: i32, imm: i32) {
  2767. write_reg16(r1, shrd16(read_reg16(r1), read_reg16(r), imm & 31));
  2768. }
  2769. #[no_mangle]
  2770. pub unsafe fn instr32_0FAC_mem(addr: i32, r: i32, imm: i32) {
  2771. SAFE_READ_WRITE32!(___, addr, shrd32(___, read_reg32(r), imm & 31));
  2772. }
  2773. #[no_mangle]
  2774. pub unsafe fn instr32_0FAC_reg(r1: i32, r: i32, imm: i32) {
  2775. write_reg32(r1, shrd32(read_reg32(r1), read_reg32(r), imm & 31));
  2776. }
  2777. #[no_mangle]
  2778. pub unsafe fn instr16_0FAD_mem(addr: i32, r: i32) {
  2779. SAFE_READ_WRITE16!(___, addr, shrd16(___, read_reg16(r), read_reg8(CL) & 31));
  2780. }
  2781. #[no_mangle]
  2782. pub unsafe fn instr16_0FAD_reg(r1: i32, r: i32) {
  2783. write_reg16(
  2784. r1,
  2785. shrd16(read_reg16(r1), read_reg16(r), read_reg8(CL) & 31),
  2786. );
  2787. }
  2788. #[no_mangle]
  2789. pub unsafe fn instr32_0FAD_mem(addr: i32, r: i32) {
  2790. SAFE_READ_WRITE32!(___, addr, shrd32(___, read_reg32(r), read_reg8(CL) & 31));
  2791. }
  2792. #[no_mangle]
  2793. pub unsafe fn instr32_0FAD_reg(r1: i32, r: i32) {
  2794. write_reg32(
  2795. r1,
  2796. shrd32(read_reg32(r1), read_reg32(r), read_reg8(CL) & 31),
  2797. );
  2798. }
  2799. #[no_mangle]
  2800. pub unsafe fn instr_0FAE_0_reg(r: i32) { trigger_ud(); }
  2801. #[no_mangle]
  2802. pub unsafe fn instr_0FAE_0_mem(addr: i32) { fxsave(addr); }
  2803. #[no_mangle]
  2804. pub unsafe fn instr_0FAE_1_reg(r: i32) { trigger_ud(); }
  2805. #[no_mangle]
  2806. pub unsafe fn instr_0FAE_1_mem(addr: i32) { fxrstor(addr); }
  2807. #[no_mangle]
  2808. pub unsafe fn instr_0FAE_2_reg(r: i32) { unimplemented_sse(); }
  2809. #[no_mangle]
  2810. pub unsafe fn instr_0FAE_2_mem(addr: i32) {
  2811. // ldmxcsr
  2812. let new_mxcsr = return_on_pagefault!(safe_read32s(addr));
  2813. if 0 != new_mxcsr & !MXCSR_MASK {
  2814. dbg_log!("Invalid mxcsr bits: {:x}", new_mxcsr & !MXCSR_MASK);
  2815. trigger_gp(0);
  2816. return;
  2817. }
  2818. else {
  2819. set_mxcsr(new_mxcsr);
  2820. return;
  2821. };
  2822. }
  2823. #[no_mangle]
  2824. pub unsafe fn instr_0FAE_3_reg(r: i32) { trigger_ud(); }
  2825. #[no_mangle]
  2826. pub unsafe fn instr_0FAE_3_mem(addr: i32) {
  2827. // stmxcsr
  2828. return_on_pagefault!(safe_write32(addr, *mxcsr));
  2829. }
  2830. #[no_mangle]
  2831. pub unsafe fn instr_0FAE_4_reg(r: i32) { trigger_ud(); }
  2832. #[no_mangle]
  2833. pub unsafe fn instr_0FAE_4_mem(addr: i32) {
  2834. // xsave
  2835. undefined_instruction();
  2836. }
  2837. #[no_mangle]
  2838. pub unsafe fn instr_0FAE_5_reg(_r: i32) {
  2839. // lfence
  2840. }
  2841. #[no_mangle]
  2842. pub unsafe fn instr_0FAE_5_mem(addr: i32) {
  2843. // xrstor
  2844. undefined_instruction();
  2845. }
  2846. #[no_mangle]
  2847. pub unsafe fn instr_0FAE_6_reg(_r: i32) {
  2848. // mfence
  2849. }
  2850. #[no_mangle]
  2851. pub unsafe fn instr_0FAE_6_mem(addr: i32) {
  2852. // xsaveopt
  2853. undefined_instruction();
  2854. }
  2855. #[no_mangle]
  2856. pub unsafe fn instr_0FAE_7_reg(_r: i32) {
  2857. // sfence
  2858. }
  2859. #[no_mangle]
  2860. pub unsafe fn instr_0FAE_7_mem(addr: i32) {
  2861. // clflush
  2862. undefined_instruction();
  2863. }
  2864. #[no_mangle]
  2865. pub unsafe fn instr16_0FAF_mem(addr: i32, r: i32) {
  2866. write_reg16(
  2867. r,
  2868. imul_reg16(read_reg16(r), return_on_pagefault!(safe_read16(addr))),
  2869. );
  2870. }
  2871. #[no_mangle]
  2872. pub unsafe fn instr16_0FAF_reg(r1: i32, r: i32) {
  2873. write_reg16(r, imul_reg16(read_reg16(r), read_reg16(r1)));
  2874. }
  2875. #[no_mangle]
  2876. pub unsafe fn instr32_0FAF_mem(addr: i32, r: i32) {
  2877. write_reg32(
  2878. r,
  2879. imul_reg32(read_reg32(r), return_on_pagefault!(safe_read32s(addr))),
  2880. );
  2881. }
  2882. #[no_mangle]
  2883. pub unsafe fn instr32_0FAF_reg(r1: i32, r: i32) {
  2884. write_reg32(r, imul_reg32(read_reg32(r), read_reg32(r1)));
  2885. }
  2886. #[no_mangle]
  2887. pub unsafe fn instr_0FB0_reg(r1: i32, r2: i32) { write_reg8(r1, cmpxchg8(read_reg8(r1), r2)); }
  2888. #[no_mangle]
  2889. pub unsafe fn instr_0FB0_mem(addr: i32, r: i32) {
  2890. SAFE_READ_WRITE8!(___, addr, cmpxchg8(___, r));
  2891. }
  2892. #[no_mangle]
  2893. pub unsafe fn instr16_0FB1_reg(r1: i32, r2: i32) { write_reg16(r1, cmpxchg16(read_reg16(r1), r2)); }
  2894. #[no_mangle]
  2895. pub unsafe fn instr16_0FB1_mem(addr: i32, r: i32) {
  2896. SAFE_READ_WRITE16!(___, addr, cmpxchg16(___, r));
  2897. }
  2898. #[no_mangle]
  2899. pub unsafe fn instr32_0FB1_reg(r1: i32, r2: i32) { write_reg32(r1, cmpxchg32(read_reg32(r1), r2)); }
  2900. #[no_mangle]
  2901. pub unsafe fn instr32_0FB1_mem(addr: i32, r: i32) {
  2902. SAFE_READ_WRITE32!(___, addr, cmpxchg32(___, r));
  2903. }
  2904. #[no_mangle]
  2905. pub unsafe fn instr16_0FB2_reg(unused: i32, unused2: i32) { trigger_ud(); }
  2906. #[no_mangle]
  2907. pub unsafe fn instr16_0FB2_mem(addr: i32, r: i32) { lss16(addr, r, SS); }
  2908. #[no_mangle]
  2909. pub unsafe fn instr32_0FB2_reg(unused: i32, unused2: i32) { trigger_ud(); }
  2910. #[no_mangle]
  2911. pub unsafe fn instr32_0FB2_mem(addr: i32, r: i32) { lss32(addr, r, SS); }
  2912. #[no_mangle]
  2913. pub unsafe fn instr16_0FB3_reg(r1: i32, r2: i32) {
  2914. write_reg16(r1, btr_reg(read_reg16(r1), read_reg16(r2) & 15));
  2915. }
  2916. #[no_mangle]
  2917. pub unsafe fn instr16_0FB3_mem(addr: i32, r: i32) { btr_mem(addr, read_reg16(r) << 16 >> 16); }
  2918. #[no_mangle]
  2919. pub unsafe fn instr32_0FB3_reg(r1: i32, r2: i32) {
  2920. write_reg32(r1, btr_reg(read_reg32(r1), read_reg32(r2) & 31));
  2921. }
  2922. #[no_mangle]
  2923. pub unsafe fn instr32_0FB3_mem(addr: i32, r: i32) { btr_mem(addr, read_reg32(r)); }
  2924. #[no_mangle]
  2925. pub unsafe fn instr16_0FB4_reg(unused: i32, unused2: i32) { trigger_ud(); }
  2926. #[no_mangle]
  2927. pub unsafe fn instr16_0FB4_mem(addr: i32, r: i32) { lss16(addr, r, FS); }
  2928. #[no_mangle]
  2929. pub unsafe fn instr32_0FB4_reg(unused: i32, unused2: i32) { trigger_ud(); }
  2930. #[no_mangle]
  2931. pub unsafe fn instr32_0FB4_mem(addr: i32, r: i32) { lss32(addr, r, FS); }
  2932. #[no_mangle]
  2933. pub unsafe fn instr16_0FB5_reg(unused: i32, unused2: i32) { trigger_ud(); }
  2934. #[no_mangle]
  2935. pub unsafe fn instr16_0FB5_mem(addr: i32, r: i32) { lss16(addr, r, GS); }
  2936. #[no_mangle]
  2937. pub unsafe fn instr32_0FB5_reg(unused: i32, unused2: i32) { trigger_ud(); }
  2938. #[no_mangle]
  2939. pub unsafe fn instr32_0FB5_mem(addr: i32, r: i32) { lss32(addr, r, GS); }
  2940. #[no_mangle]
  2941. pub unsafe fn instr16_0FB6_mem(addr: i32, r: i32) {
  2942. write_reg16(r, return_on_pagefault!(safe_read8(addr)));
  2943. }
  2944. #[no_mangle]
  2945. pub unsafe fn instr16_0FB6_reg(r1: i32, r: i32) { write_reg16(r, read_reg8(r1)); }
  2946. #[no_mangle]
  2947. pub unsafe fn instr32_0FB6_mem(addr: i32, r: i32) {
  2948. write_reg32(r, return_on_pagefault!(safe_read8(addr)));
  2949. }
  2950. #[no_mangle]
  2951. pub unsafe fn instr32_0FB6_reg(r1: i32, r: i32) { write_reg32(r, read_reg8(r1)); }
  2952. #[no_mangle]
  2953. pub unsafe fn instr16_0FB7_mem(addr: i32, r: i32) {
  2954. write_reg16(r, return_on_pagefault!(safe_read16(addr)));
  2955. }
  2956. #[no_mangle]
  2957. pub unsafe fn instr16_0FB7_reg(r1: i32, r: i32) { write_reg16(r, read_reg16(r1)); }
  2958. #[no_mangle]
  2959. pub unsafe fn instr32_0FB7_mem(addr: i32, r: i32) {
  2960. write_reg32(r, return_on_pagefault!(safe_read16(addr)));
  2961. }
  2962. #[no_mangle]
  2963. pub unsafe fn instr32_0FB7_reg(r1: i32, r: i32) { write_reg32(r, read_reg16(r1)); }
  2964. #[no_mangle]
  2965. pub unsafe fn instr16_0FB8_reg(r1: i32, r2: i32) { trigger_ud(); }
  2966. #[no_mangle]
  2967. pub unsafe fn instr16_0FB8_mem(addr: i32, r: i32) { trigger_ud(); }
  2968. #[no_mangle]
  2969. pub unsafe fn instr16_F30FB8_mem(addr: i32, r: i32) {
  2970. write_reg16(r, popcnt(return_on_pagefault!(safe_read16(addr))));
  2971. }
  2972. #[no_mangle]
  2973. pub unsafe fn instr16_F30FB8_reg(r1: i32, r: i32) { write_reg16(r, popcnt(read_reg16(r1))); }
  2974. #[no_mangle]
  2975. pub unsafe fn instr32_0FB8_reg(r1: i32, r2: i32) { trigger_ud(); }
  2976. #[no_mangle]
  2977. pub unsafe fn instr32_0FB8_mem(addr: i32, r: i32) { trigger_ud(); }
  2978. #[no_mangle]
  2979. pub unsafe fn instr32_F30FB8_mem(addr: i32, r: i32) {
  2980. write_reg32(r, popcnt(return_on_pagefault!(safe_read32s(addr))));
  2981. }
  2982. #[no_mangle]
  2983. pub unsafe fn instr32_F30FB8_reg(r1: i32, r: i32) { write_reg32(r, popcnt(read_reg32(r1))); }
  2984. #[no_mangle]
  2985. pub unsafe fn instr_0FB9() {
  2986. // UD2
  2987. trigger_ud();
  2988. }
  2989. #[no_mangle]
  2990. pub unsafe fn instr16_0FBA_4_reg(r: i32, imm: i32) { bt_reg(read_reg16(r), imm & 15); }
  2991. #[no_mangle]
  2992. pub unsafe fn instr16_0FBA_4_mem(addr: i32, imm: i32) { bt_mem(addr, imm & 15); }
  2993. #[no_mangle]
  2994. pub unsafe fn instr16_0FBA_5_reg(r: i32, imm: i32) {
  2995. write_reg16(r, bts_reg(read_reg16(r), imm & 15));
  2996. }
  2997. #[no_mangle]
  2998. pub unsafe fn instr16_0FBA_5_mem(addr: i32, imm: i32) { bts_mem(addr, imm & 15); }
  2999. #[no_mangle]
  3000. pub unsafe fn instr16_0FBA_6_reg(r: i32, imm: i32) {
  3001. write_reg16(r, btr_reg(read_reg16(r), imm & 15));
  3002. }
  3003. #[no_mangle]
  3004. pub unsafe fn instr16_0FBA_6_mem(addr: i32, imm: i32) { btr_mem(addr, imm & 15); }
  3005. #[no_mangle]
  3006. pub unsafe fn instr16_0FBA_7_reg(r: i32, imm: i32) {
  3007. write_reg16(r, btc_reg(read_reg16(r), imm & 15));
  3008. }
  3009. #[no_mangle]
  3010. pub unsafe fn instr16_0FBA_7_mem(addr: i32, imm: i32) { btc_mem(addr, imm & 15); }
  3011. #[no_mangle]
  3012. pub unsafe fn instr32_0FBA_4_reg(r: i32, imm: i32) { bt_reg(read_reg32(r), imm & 31); }
  3013. #[no_mangle]
  3014. pub unsafe fn instr32_0FBA_4_mem(addr: i32, imm: i32) { bt_mem(addr, imm & 31); }
  3015. #[no_mangle]
  3016. pub unsafe fn instr32_0FBA_5_reg(r: i32, imm: i32) {
  3017. write_reg32(r, bts_reg(read_reg32(r), imm & 31));
  3018. }
  3019. #[no_mangle]
  3020. pub unsafe fn instr32_0FBA_5_mem(addr: i32, imm: i32) { bts_mem(addr, imm & 31); }
  3021. #[no_mangle]
  3022. pub unsafe fn instr32_0FBA_6_reg(r: i32, imm: i32) {
  3023. write_reg32(r, btr_reg(read_reg32(r), imm & 31));
  3024. }
  3025. #[no_mangle]
  3026. pub unsafe fn instr32_0FBA_6_mem(addr: i32, imm: i32) { btr_mem(addr, imm & 31); }
  3027. #[no_mangle]
  3028. pub unsafe fn instr32_0FBA_7_reg(r: i32, imm: i32) {
  3029. write_reg32(r, btc_reg(read_reg32(r), imm & 31));
  3030. }
  3031. #[no_mangle]
  3032. pub unsafe fn instr32_0FBA_7_mem(addr: i32, imm: i32) { btc_mem(addr, imm & 31); }
  3033. #[no_mangle]
  3034. pub unsafe fn instr16_0FBB_reg(r1: i32, r2: i32) {
  3035. write_reg16(r1, btc_reg(read_reg16(r1), read_reg16(r2) & 15));
  3036. }
  3037. #[no_mangle]
  3038. pub unsafe fn instr16_0FBB_mem(addr: i32, r: i32) { btc_mem(addr, read_reg16(r) << 16 >> 16); }
  3039. #[no_mangle]
  3040. pub unsafe fn instr32_0FBB_reg(r1: i32, r2: i32) {
  3041. write_reg32(r1, btc_reg(read_reg32(r1), read_reg32(r2) & 31));
  3042. }
  3043. #[no_mangle]
  3044. pub unsafe fn instr32_0FBB_mem(addr: i32, r: i32) { btc_mem(addr, read_reg32(r)); }
  3045. #[no_mangle]
  3046. pub unsafe fn instr16_0FBC_mem(addr: i32, r: i32) {
  3047. write_reg16(
  3048. r,
  3049. bsf16(read_reg16(r), return_on_pagefault!(safe_read16(addr))),
  3050. );
  3051. }
  3052. #[no_mangle]
  3053. pub unsafe fn instr16_0FBC_reg(r1: i32, r: i32) {
  3054. write_reg16(r, bsf16(read_reg16(r), read_reg16(r1)));
  3055. }
  3056. #[no_mangle]
  3057. pub unsafe fn instr32_0FBC_mem(addr: i32, r: i32) {
  3058. write_reg32(
  3059. r,
  3060. bsf32(read_reg32(r), return_on_pagefault!(safe_read32s(addr))),
  3061. );
  3062. }
  3063. #[no_mangle]
  3064. pub unsafe fn instr32_0FBC_reg(r1: i32, r: i32) {
  3065. write_reg32(r, bsf32(read_reg32(r), read_reg32(r1)));
  3066. }
  3067. #[no_mangle]
  3068. pub unsafe fn instr16_0FBD_mem(addr: i32, r: i32) {
  3069. write_reg16(
  3070. r,
  3071. bsr16(read_reg16(r), return_on_pagefault!(safe_read16(addr))),
  3072. );
  3073. }
  3074. #[no_mangle]
  3075. pub unsafe fn instr16_0FBD_reg(r1: i32, r: i32) {
  3076. write_reg16(r, bsr16(read_reg16(r), read_reg16(r1)));
  3077. }
  3078. #[no_mangle]
  3079. pub unsafe fn instr32_0FBD_mem(addr: i32, r: i32) {
  3080. write_reg32(
  3081. r,
  3082. bsr32(read_reg32(r), return_on_pagefault!(safe_read32s(addr))),
  3083. );
  3084. }
  3085. #[no_mangle]
  3086. pub unsafe fn instr32_0FBD_reg(r1: i32, r: i32) {
  3087. write_reg32(r, bsr32(read_reg32(r), read_reg32(r1)));
  3088. }
  3089. #[no_mangle]
  3090. pub unsafe fn instr16_0FBE_mem(addr: i32, r: i32) {
  3091. write_reg16(r, return_on_pagefault!(safe_read8(addr)) << 24 >> 24);
  3092. }
  3093. #[no_mangle]
  3094. pub unsafe fn instr16_0FBE_reg(r1: i32, r: i32) { write_reg16(r, read_reg8(r1) << 24 >> 24); }
  3095. #[no_mangle]
  3096. pub unsafe fn instr32_0FBE_mem(addr: i32, r: i32) {
  3097. write_reg32(r, return_on_pagefault!(safe_read8(addr)) << 24 >> 24);
  3098. }
  3099. #[no_mangle]
  3100. pub unsafe fn instr32_0FBE_reg(r1: i32, r: i32) { write_reg32(r, read_reg8(r1) << 24 >> 24); }
  3101. #[no_mangle]
  3102. pub unsafe fn instr16_0FBF_mem(addr: i32, r: i32) {
  3103. write_reg16(r, return_on_pagefault!(safe_read16(addr)) << 16 >> 16);
  3104. }
  3105. #[no_mangle]
  3106. pub unsafe fn instr16_0FBF_reg(r1: i32, r: i32) { write_reg16(r, read_reg16(r1) << 16 >> 16); }
  3107. #[no_mangle]
  3108. pub unsafe fn instr32_0FBF_mem(addr: i32, r: i32) {
  3109. write_reg32(r, return_on_pagefault!(safe_read16(addr)) << 16 >> 16);
  3110. }
  3111. #[no_mangle]
  3112. pub unsafe fn instr32_0FBF_reg(r1: i32, r: i32) { write_reg32(r, read_reg16(r1) << 16 >> 16); }
  3113. #[no_mangle]
  3114. pub unsafe fn instr_0FC0_mem(addr: i32, r: i32) {
  3115. SAFE_READ_WRITE8!(___, addr, xadd8(___, r));
  3116. }
  3117. #[no_mangle]
  3118. pub unsafe fn instr_0FC0_reg(r1: i32, r: i32) { write_reg8(r1, xadd8(read_reg8(r1), r)); }
  3119. #[no_mangle]
  3120. pub unsafe fn instr16_0FC1_mem(addr: i32, r: i32) {
  3121. SAFE_READ_WRITE16!(___, addr, xadd16(___, r));
  3122. }
  3123. #[no_mangle]
  3124. pub unsafe fn instr16_0FC1_reg(r1: i32, r: i32) { write_reg16(r1, xadd16(read_reg16(r1), r)); }
  3125. #[no_mangle]
  3126. pub unsafe fn instr32_0FC1_mem(addr: i32, r: i32) {
  3127. SAFE_READ_WRITE32!(___, addr, xadd32(___, r));
  3128. }
  3129. #[no_mangle]
  3130. pub unsafe fn instr32_0FC1_reg(r1: i32, r: i32) { write_reg32(r1, xadd32(read_reg32(r1), r)); }
  3131. pub unsafe fn instr_0FC3_reg(r1: i32, r2: i32) { trigger_ud(); }
  3132. pub unsafe fn instr_0FC3_mem(addr: i32, r: i32) {
  3133. // movnti
  3134. return_on_pagefault!(safe_write32(addr, read_reg32(r)));
  3135. }
  3136. #[no_mangle]
  3137. pub unsafe fn instr_0FC4(source: i32, r: i32, imm8: i32) {
  3138. // pinsrw mm, r32/m16, imm8
  3139. let mut destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  3140. destination[(imm8 & 3) as usize] = source as u16;
  3141. write_mmx_reg64(r, std::mem::transmute(destination));
  3142. transition_fpu_to_mmx();
  3143. }
  3144. #[no_mangle]
  3145. pub unsafe fn instr_0FC4_reg(r1: i32, r2: i32, imm: i32) { instr_0FC4(read_reg32(r1), r2, imm); }
  3146. #[no_mangle]
  3147. pub unsafe fn instr_0FC4_mem(addr: i32, r: i32, imm: i32) {
  3148. instr_0FC4(return_on_pagefault!(safe_read16(addr)), r, imm);
  3149. }
  3150. #[no_mangle]
  3151. pub unsafe fn instr_660FC4(source: i32, r: i32, imm8: i32) {
  3152. // pinsrw xmm, r32/m16, imm8
  3153. let mut destination = read_xmm128s(r);
  3154. let index = (imm8 & 7) as u32;
  3155. destination.u16_0[index as usize] = (source & 0xFFFF) as u16;
  3156. write_xmm_reg128(r, destination);
  3157. }
  3158. #[no_mangle]
  3159. pub unsafe fn instr_660FC4_reg(r1: i32, r2: i32, imm: i32) {
  3160. instr_660FC4(read_reg32(r1), r2, imm);
  3161. }
  3162. #[no_mangle]
  3163. pub unsafe fn instr_660FC4_mem(addr: i32, r: i32, imm: i32) {
  3164. instr_660FC4(return_on_pagefault!(safe_read16(addr)), r, imm);
  3165. }
  3166. #[no_mangle]
  3167. pub unsafe fn instr_0FC5_mem(addr: i32, r: i32, imm8: i32) { trigger_ud(); }
  3168. #[no_mangle]
  3169. pub unsafe fn instr_0FC5_reg(r1: i32, r2: i32, imm8: i32) {
  3170. // pextrw r32, mm, imm8
  3171. let data: [u16; 4] = std::mem::transmute(read_mmx64s(r1));
  3172. write_reg32(r2, data[(imm8 & 3) as usize] as i32);
  3173. transition_fpu_to_mmx();
  3174. }
  3175. #[no_mangle]
  3176. pub unsafe fn instr_660FC5_mem(addr: i32, r: i32, imm8: i32) { trigger_ud(); }
  3177. #[no_mangle]
  3178. pub unsafe fn instr_660FC5_reg(r1: i32, r2: i32, imm8: i32) {
  3179. // pextrw r32, xmm, imm8
  3180. let data = read_xmm128s(r1);
  3181. let index = (imm8 & 7) as u32;
  3182. let result = data.u16_0[index as usize] as u32;
  3183. write_reg32(r2, result as i32);
  3184. }
  3185. #[no_mangle]
  3186. pub unsafe fn instr_0FC6(source: reg128, r: i32, imm8: i32) {
  3187. // shufps xmm, xmm/mem128
  3188. // XXX: Aligned access or #gp
  3189. let destination = read_xmm128s(r);
  3190. write_xmm128(
  3191. r,
  3192. destination.u32_0[(imm8 & 3) as usize] as i32,
  3193. destination.u32_0[(imm8 >> 2 & 3) as usize] as i32,
  3194. source.u32_0[(imm8 >> 4 & 3) as usize] as i32,
  3195. source.u32_0[(imm8 >> 6 & 3) as usize] as i32,
  3196. );
  3197. }
  3198. #[no_mangle]
  3199. pub unsafe fn instr_0FC6_reg(r1: i32, r2: i32, imm: i32) { instr_0FC6(read_xmm128s(r1), r2, imm); }
  3200. #[no_mangle]
  3201. pub unsafe fn instr_0FC6_mem(addr: i32, r: i32, imm: i32) {
  3202. instr_0FC6(return_on_pagefault!(safe_read128s(addr)), r, imm);
  3203. }
  3204. #[no_mangle]
  3205. pub unsafe fn instr_660FC6(source: reg128, r: i32, imm8: i32) {
  3206. // shufpd xmm, xmm/mem128
  3207. let destination = read_xmm128s(r);
  3208. let result = reg128 {
  3209. i64_0: [
  3210. destination.i64_0[imm8 as usize & 1],
  3211. source.i64_0[imm8 as usize >> 1 & 1],
  3212. ],
  3213. };
  3214. write_xmm_reg128(r, result);
  3215. }
  3216. #[no_mangle]
  3217. pub unsafe fn instr_660FC6_reg(r1: i32, r2: i32, imm: i32) {
  3218. instr_660FC6(read_xmm128s(r1), r2, imm);
  3219. }
  3220. #[no_mangle]
  3221. pub unsafe fn instr_660FC6_mem(addr: i32, r: i32, imm: i32) {
  3222. instr_660FC6(return_on_pagefault!(safe_read128s(addr)), r, imm);
  3223. }
  3224. #[no_mangle]
  3225. pub unsafe fn instr16_0FC7_1_reg(r: i32) { trigger_ud(); }
  3226. #[no_mangle]
  3227. pub unsafe fn instr32_0FC7_1_reg(r: i32) { trigger_ud(); }
  3228. #[no_mangle]
  3229. pub unsafe fn instr16_0FC7_1_mem(addr: i32) {
  3230. // cmpxchg8b
  3231. return_on_pagefault!(writable_or_pagefault(addr, 8));
  3232. let m64_low = safe_read32s(addr).unwrap();
  3233. let m64_high = safe_read32s(addr + 4).unwrap();
  3234. if read_reg32(EAX) == m64_low && read_reg32(EDX) == m64_high {
  3235. *flags |= FLAG_ZERO;
  3236. safe_write32(addr, read_reg32(EBX)).unwrap();
  3237. safe_write32(addr + 4, read_reg32(ECX)).unwrap();
  3238. }
  3239. else {
  3240. *flags &= !FLAG_ZERO;
  3241. write_reg32(EAX, m64_low);
  3242. write_reg32(EDX, m64_high);
  3243. safe_write32(addr, m64_low).unwrap();
  3244. safe_write32(addr + 4, m64_high).unwrap();
  3245. }
  3246. *flags_changed &= !FLAG_ZERO;
  3247. }
  3248. #[no_mangle]
  3249. pub unsafe fn instr32_0FC7_1_mem(addr: i32) { instr16_0FC7_1_mem(addr) }
  3250. #[no_mangle]
  3251. pub unsafe fn instr16_0FC7_6_reg(r: i32) {
  3252. // rdrand
  3253. let rand = get_rand_int();
  3254. write_reg16(r, rand);
  3255. *flags &= !FLAGS_ALL;
  3256. *flags |= 1;
  3257. *flags_changed = 0;
  3258. }
  3259. #[no_mangle]
  3260. pub unsafe fn instr32_0FC7_6_reg(r: i32) {
  3261. // rdrand
  3262. let rand = get_rand_int();
  3263. write_reg32(r, rand);
  3264. *flags &= !FLAGS_ALL;
  3265. *flags |= 1;
  3266. *flags_changed = 0;
  3267. }
  3268. #[no_mangle]
  3269. pub unsafe fn instr16_0FC7_6_mem(addr: i32) { trigger_ud(); }
  3270. #[no_mangle]
  3271. pub unsafe fn instr32_0FC7_6_mem(addr: i32) { trigger_ud(); }
  3272. #[no_mangle]
  3273. pub unsafe fn instr_0FC8() { bswap(EAX); }
  3274. #[no_mangle]
  3275. pub unsafe fn instr_0FC9() { bswap(ECX); }
  3276. #[no_mangle]
  3277. pub unsafe fn instr_0FCA() { bswap(EDX); }
  3278. #[no_mangle]
  3279. pub unsafe fn instr_0FCB() { bswap(EBX); }
  3280. #[no_mangle]
  3281. pub unsafe fn instr_0FCC() { bswap(ESP); }
  3282. #[no_mangle]
  3283. pub unsafe fn instr_0FCD() { bswap(EBP); }
  3284. #[no_mangle]
  3285. pub unsafe fn instr_0FCE() { bswap(ESI); }
  3286. #[no_mangle]
  3287. pub unsafe fn instr_0FCF() { bswap(EDI); }
  3288. #[no_mangle]
  3289. pub unsafe fn instr_0FD0() { unimplemented_sse(); }
  3290. #[no_mangle]
  3291. pub unsafe fn instr_0FD1(source: u64, r: i32) {
  3292. // psrlw mm, mm/m64
  3293. psrlw_r64(r, source);
  3294. }
  3295. #[no_mangle]
  3296. pub unsafe fn instr_0FD1_reg(r1: i32, r2: i32) { instr_0FD1(read_mmx64s(r1), r2); }
  3297. #[no_mangle]
  3298. pub unsafe fn instr_0FD1_mem(addr: i32, r: i32) {
  3299. instr_0FD1(return_on_pagefault!(safe_read64s(addr)), r);
  3300. }
  3301. #[no_mangle]
  3302. pub unsafe fn instr_660FD1(source: reg128, r: i32) {
  3303. // psrlw xmm, xmm/m128
  3304. // XXX: Aligned access or #gp
  3305. psrlw_r128(r, source.u64_0[0]);
  3306. }
  3307. #[no_mangle]
  3308. pub unsafe fn instr_660FD1_reg(r1: i32, r2: i32) { instr_660FD1(read_xmm128s(r1), r2); }
  3309. #[no_mangle]
  3310. pub unsafe fn instr_660FD1_mem(addr: i32, r: i32) {
  3311. instr_660FD1(return_on_pagefault!(safe_read128s(addr)), r);
  3312. }
  3313. #[no_mangle]
  3314. pub unsafe fn instr_0FD2(source: u64, r: i32) {
  3315. // psrld mm, mm/m64
  3316. psrld_r64(r, source);
  3317. }
  3318. #[no_mangle]
  3319. pub unsafe fn instr_0FD2_reg(r1: i32, r2: i32) { instr_0FD2(read_mmx64s(r1), r2); }
  3320. #[no_mangle]
  3321. pub unsafe fn instr_0FD2_mem(addr: i32, r: i32) {
  3322. instr_0FD2(return_on_pagefault!(safe_read64s(addr)), r);
  3323. }
  3324. #[no_mangle]
  3325. pub unsafe fn instr_660FD2(source: reg128, r: i32) {
  3326. // psrld xmm, xmm/m128
  3327. // XXX: Aligned access or #gp
  3328. psrld_r128(r, source.u64_0[0]);
  3329. }
  3330. #[no_mangle]
  3331. pub unsafe fn instr_660FD2_reg(r1: i32, r2: i32) { instr_660FD2(read_xmm128s(r1), r2); }
  3332. #[no_mangle]
  3333. pub unsafe fn instr_660FD2_mem(addr: i32, r: i32) {
  3334. instr_660FD2(return_on_pagefault!(safe_read128s(addr)), r);
  3335. }
  3336. #[no_mangle]
  3337. pub unsafe fn instr_0FD3(source: u64, r: i32) {
  3338. // psrlq mm, mm/m64
  3339. psrlq_r64(r, source);
  3340. }
  3341. #[no_mangle]
  3342. pub unsafe fn instr_0FD3_reg(r1: i32, r2: i32) { instr_0FD3(read_mmx64s(r1), r2); }
  3343. #[no_mangle]
  3344. pub unsafe fn instr_0FD3_mem(addr: i32, r: i32) {
  3345. instr_0FD3(return_on_pagefault!(safe_read64s(addr)), r);
  3346. }
  3347. #[no_mangle]
  3348. pub unsafe fn instr_660FD3(source: reg128, r: i32) {
  3349. // psrlq xmm, mm/m64
  3350. psrlq_r128(r, source.u64_0[0]);
  3351. }
  3352. #[no_mangle]
  3353. pub unsafe fn instr_660FD3_reg(r1: i32, r2: i32) { instr_660FD3(read_xmm128s(r1), r2); }
  3354. #[no_mangle]
  3355. pub unsafe fn instr_660FD3_mem(addr: i32, r: i32) {
  3356. instr_660FD3(return_on_pagefault!(safe_read128s(addr)), r);
  3357. }
  3358. #[no_mangle]
  3359. pub unsafe fn instr_0FD4(source: u64, r: i32) {
  3360. // paddq mm, mm/m64
  3361. let destination = read_mmx64s(r);
  3362. write_mmx_reg64(r, source + destination);
  3363. transition_fpu_to_mmx();
  3364. }
  3365. #[no_mangle]
  3366. pub unsafe fn instr_0FD4_reg(r1: i32, r2: i32) { instr_0FD4(read_mmx64s(r1), r2); }
  3367. #[no_mangle]
  3368. pub unsafe fn instr_0FD4_mem(addr: i32, r: i32) {
  3369. instr_0FD4(return_on_pagefault!(safe_read64s(addr)), r);
  3370. }
  3371. #[no_mangle]
  3372. pub unsafe fn instr_660FD4(source: reg128, r: i32) {
  3373. // paddq xmm, xmm/m128
  3374. // XXX: Aligned access or #gp
  3375. let destination = read_xmm128s(r);
  3376. let mut result = reg128 { i8_0: [0; 16] };
  3377. result.u64_0[0] = destination.u64_0[0] + source.u64_0[0];
  3378. result.u64_0[1] = destination.u64_0[1] + source.u64_0[1];
  3379. write_xmm_reg128(r, result);
  3380. }
  3381. #[no_mangle]
  3382. pub unsafe fn instr_660FD4_reg(r1: i32, r2: i32) { instr_660FD4(read_xmm128s(r1), r2); }
  3383. #[no_mangle]
  3384. pub unsafe fn instr_660FD4_mem(addr: i32, r: i32) {
  3385. instr_660FD4(return_on_pagefault!(safe_read128s(addr)), r);
  3386. }
  3387. #[no_mangle]
  3388. pub unsafe fn instr_0FD5(source: u64, r: i32) {
  3389. // pmullw mm, mm/m64
  3390. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  3391. let source: [i16; 4] = std::mem::transmute(source);
  3392. let mut result = [0; 4];
  3393. for i in 0..4 {
  3394. result[i] = destination[i] * source[i];
  3395. }
  3396. write_mmx_reg64(r, std::mem::transmute(result));
  3397. transition_fpu_to_mmx();
  3398. }
  3399. #[no_mangle]
  3400. pub unsafe fn instr_0FD5_reg(r1: i32, r2: i32) { instr_0FD5(read_mmx64s(r1), r2); }
  3401. #[no_mangle]
  3402. pub unsafe fn instr_0FD5_mem(addr: i32, r: i32) {
  3403. instr_0FD5(return_on_pagefault!(safe_read64s(addr)), r);
  3404. }
  3405. #[no_mangle]
  3406. pub unsafe fn instr_660FD5(source: reg128, r: i32) {
  3407. // pmullw xmm, xmm/m128
  3408. // XXX: Aligned access or #gp
  3409. let destination = read_xmm128s(r);
  3410. let mut result = reg128 { i8_0: [0; 16] };
  3411. for i in 0..8 {
  3412. result.u16_0[i] = destination.u16_0[i] * source.u16_0[i]
  3413. }
  3414. write_xmm_reg128(r, result);
  3415. }
  3416. #[no_mangle]
  3417. pub unsafe fn instr_660FD5_reg(r1: i32, r2: i32) { instr_660FD5(read_xmm128s(r1), r2); }
  3418. #[no_mangle]
  3419. pub unsafe fn instr_660FD5_mem(addr: i32, r: i32) {
  3420. instr_660FD5(return_on_pagefault!(safe_read128s(addr)), r);
  3421. }
  3422. #[no_mangle]
  3423. pub unsafe fn instr_0FD6_mem(addr: i32, r: i32) { trigger_ud(); }
  3424. #[no_mangle]
  3425. pub unsafe fn instr_0FD6_reg(r1: i32, r2: i32) { trigger_ud(); }
  3426. #[no_mangle]
  3427. pub unsafe fn instr_660FD6_mem(addr: i32, r: i32) {
  3428. // movq xmm/m64, xmm
  3429. movl_r128_m64(addr, r);
  3430. }
  3431. #[no_mangle]
  3432. pub unsafe fn instr_660FD6_reg(r1: i32, r2: i32) {
  3433. // movq xmm/m64, xmm
  3434. let data = read_xmm64s(r2);
  3435. write_xmm128_2(r1, data, 0);
  3436. }
  3437. #[no_mangle]
  3438. pub unsafe fn instr_F20FD6_mem(addr: i32, r: i32) { trigger_ud(); }
  3439. #[no_mangle]
  3440. pub unsafe fn instr_F20FD6_reg(r1: i32, r2: i32) {
  3441. // movdq2q mm, xmm
  3442. write_mmx_reg64(r2, read_xmm128s(r1).u64_0[0]);
  3443. transition_fpu_to_mmx();
  3444. }
  3445. #[no_mangle]
  3446. pub unsafe fn instr_F30FD6_mem(addr: i32, r: i32) { trigger_ud(); }
  3447. #[no_mangle]
  3448. pub unsafe fn instr_F30FD6_reg(r1: i32, r2: i32) {
  3449. // movq2dq xmm, mm
  3450. let source = read_mmx64s(r1);
  3451. write_xmm_reg128(r2, reg128 { u64_0: [source, 0] });
  3452. transition_fpu_to_mmx();
  3453. }
  3454. #[no_mangle]
  3455. pub unsafe fn instr_0FD7_mem(addr: i32, r: i32) { trigger_ud(); }
  3456. #[no_mangle]
  3457. pub unsafe fn instr_0FD7_reg(r1: i32, r2: i32) {
  3458. // pmovmskb r, mm
  3459. let x: [u8; 8] = std::mem::transmute(read_mmx64s(r1));
  3460. let mut result = 0;
  3461. for i in 0..8 {
  3462. result |= x[i] as i32 >> 7 << i
  3463. }
  3464. write_reg32(r2, result);
  3465. transition_fpu_to_mmx();
  3466. }
  3467. #[no_mangle]
  3468. pub unsafe fn instr_660FD7_mem(addr: i32, r: i32) { trigger_ud(); }
  3469. #[no_mangle]
  3470. pub unsafe fn instr_660FD7_reg(r1: i32, r2: i32) {
  3471. // pmovmskb reg, xmm
  3472. let x = read_xmm128s(r1);
  3473. let mut result = 0;
  3474. for i in 0..16 {
  3475. result |= x.u8_0[i] as i32 >> 7 << i
  3476. }
  3477. write_reg32(r2, result);
  3478. }
  3479. #[no_mangle]
  3480. pub unsafe fn instr_0FD8(source: u64, r: i32) {
  3481. // psubusb mm, mm/m64
  3482. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  3483. let source: [u8; 8] = std::mem::transmute(source);
  3484. let mut result = [0; 8];
  3485. for i in 0..8 {
  3486. result[i] = saturate_sd_to_ub(destination[i] as i32 - source[i] as i32) as u8;
  3487. }
  3488. write_mmx_reg64(r, std::mem::transmute(result));
  3489. transition_fpu_to_mmx();
  3490. }
  3491. #[no_mangle]
  3492. pub unsafe fn instr_0FD8_reg(r1: i32, r2: i32) { instr_0FD8(read_mmx64s(r1), r2); }
  3493. #[no_mangle]
  3494. pub unsafe fn instr_0FD8_mem(addr: i32, r: i32) {
  3495. instr_0FD8(return_on_pagefault!(safe_read64s(addr)), r);
  3496. }
  3497. #[no_mangle]
  3498. pub unsafe fn instr_660FD8(source: reg128, r: i32) {
  3499. // psubusb xmm, xmm/m128
  3500. let destination = read_xmm128s(r);
  3501. let mut result = reg128 { i8_0: [0; 16] };
  3502. for i in 0..16 {
  3503. result.u8_0[i] =
  3504. saturate_sd_to_ub(destination.u8_0[i] as i32 - source.u8_0[i] as i32) as u8;
  3505. }
  3506. write_xmm_reg128(r, result);
  3507. }
  3508. #[no_mangle]
  3509. pub unsafe fn instr_660FD8_reg(r1: i32, r2: i32) { instr_660FD8(read_xmm128s(r1), r2); }
  3510. #[no_mangle]
  3511. pub unsafe fn instr_660FD8_mem(addr: i32, r: i32) {
  3512. instr_660FD8(return_on_pagefault!(safe_read128s(addr)), r);
  3513. }
  3514. #[no_mangle]
  3515. pub unsafe fn instr_0FD9(source: u64, r: i32) {
  3516. // psubusw mm, mm/m64
  3517. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  3518. let source: [u16; 4] = std::mem::transmute(source);
  3519. let mut result = [0; 4];
  3520. for i in 0..4 {
  3521. result[i] = saturate_uw(destination[i] as u32 - source[i] as u32)
  3522. }
  3523. write_mmx_reg64(r, std::mem::transmute(result));
  3524. transition_fpu_to_mmx();
  3525. }
  3526. #[no_mangle]
  3527. pub unsafe fn instr_0FD9_reg(r1: i32, r2: i32) { instr_0FD9(read_mmx64s(r1), r2); }
  3528. #[no_mangle]
  3529. pub unsafe fn instr_0FD9_mem(addr: i32, r: i32) {
  3530. instr_0FD9(return_on_pagefault!(safe_read64s(addr)), r);
  3531. }
  3532. #[no_mangle]
  3533. pub unsafe fn instr_660FD9(source: reg128, r: i32) {
  3534. // psubusw xmm, xmm/m128
  3535. let destination = read_xmm128s(r);
  3536. let mut result = reg128 { i8_0: [0; 16] };
  3537. for i in 0..8 {
  3538. result.u16_0[i] = saturate_uw(destination.u16_0[i] as u32 - source.u16_0[i] as u32)
  3539. }
  3540. write_xmm_reg128(r, result);
  3541. }
  3542. #[no_mangle]
  3543. pub unsafe fn instr_660FD9_reg(r1: i32, r2: i32) { instr_660FD9(read_xmm128s(r1), r2); }
  3544. #[no_mangle]
  3545. pub unsafe fn instr_660FD9_mem(addr: i32, r: i32) {
  3546. instr_660FD9(return_on_pagefault!(safe_read128s(addr)), r);
  3547. }
  3548. #[no_mangle]
  3549. pub unsafe fn instr_0FDA(source: u64, r: i32) {
  3550. // pminub mm, mm/m64
  3551. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  3552. let source: [u8; 8] = std::mem::transmute(source);
  3553. let mut result = [0; 8];
  3554. for i in 0..8 {
  3555. result[i] = u8::min(source[i], destination[i])
  3556. }
  3557. write_mmx_reg64(r, std::mem::transmute(result));
  3558. transition_fpu_to_mmx();
  3559. }
  3560. #[no_mangle]
  3561. pub unsafe fn instr_0FDA_reg(r1: i32, r2: i32) { instr_0FDA(read_mmx64s(r1), r2); }
  3562. #[no_mangle]
  3563. pub unsafe fn instr_0FDA_mem(addr: i32, r: i32) {
  3564. instr_0FDA(return_on_pagefault!(safe_read64s(addr)), r);
  3565. }
  3566. #[no_mangle]
  3567. pub unsafe fn instr_660FDA(source: reg128, r: i32) {
  3568. // pminub xmm, xmm/m128
  3569. // XXX: Aligned access or #gp
  3570. let destination = read_xmm128s(r);
  3571. let mut result = reg128 { u8_0: [0; 16] };
  3572. for i in 0..16 {
  3573. result.u8_0[i] = u8::min(source.u8_0[i], destination.u8_0[i]);
  3574. }
  3575. write_xmm_reg128(r, result);
  3576. }
  3577. #[no_mangle]
  3578. pub unsafe fn instr_660FDA_reg(r1: i32, r2: i32) { instr_660FDA(read_xmm128s(r1), r2); }
  3579. #[no_mangle]
  3580. pub unsafe fn instr_660FDA_mem(addr: i32, r: i32) {
  3581. instr_660FDA(return_on_pagefault!(safe_read128s(addr)), r);
  3582. }
  3583. #[no_mangle]
  3584. pub unsafe fn instr_0FDB(source: u64, r: i32) {
  3585. // pand mm, mm/m64
  3586. let destination = read_mmx64s(r);
  3587. write_mmx_reg64(r, source & destination);
  3588. transition_fpu_to_mmx();
  3589. }
  3590. #[no_mangle]
  3591. pub unsafe fn instr_0FDB_reg(r1: i32, r2: i32) { instr_0FDB(read_mmx64s(r1), r2); }
  3592. #[no_mangle]
  3593. pub unsafe fn instr_0FDB_mem(addr: i32, r: i32) {
  3594. instr_0FDB(return_on_pagefault!(safe_read64s(addr)), r);
  3595. }
  3596. #[no_mangle]
  3597. pub unsafe fn instr_660FDB(source: reg128, r: i32) {
  3598. // pand xmm, xmm/m128
  3599. // XXX: Aligned access or #gp
  3600. pand_r128(source, r);
  3601. }
  3602. #[no_mangle]
  3603. pub unsafe fn instr_660FDB_reg(r1: i32, r2: i32) { instr_660FDB(read_xmm128s(r1), r2); }
  3604. #[no_mangle]
  3605. pub unsafe fn instr_660FDB_mem(addr: i32, r: i32) {
  3606. instr_660FDB(return_on_pagefault!(safe_read128s(addr)), r);
  3607. }
  3608. #[no_mangle]
  3609. pub unsafe fn instr_0FDC(source: u64, r: i32) {
  3610. // paddusb mm, mm/m64
  3611. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  3612. let source: [u8; 8] = std::mem::transmute(source);
  3613. let mut result = [0; 8];
  3614. for i in 0..8 {
  3615. result[i] = saturate_ud_to_ub(destination[i] as u32 + source[i] as u32);
  3616. }
  3617. write_mmx_reg64(r, std::mem::transmute(result));
  3618. transition_fpu_to_mmx();
  3619. }
  3620. #[no_mangle]
  3621. pub unsafe fn instr_0FDC_reg(r1: i32, r2: i32) { instr_0FDC(read_mmx64s(r1), r2); }
  3622. #[no_mangle]
  3623. pub unsafe fn instr_0FDC_mem(addr: i32, r: i32) {
  3624. instr_0FDC(return_on_pagefault!(safe_read64s(addr)), r);
  3625. }
  3626. #[no_mangle]
  3627. pub unsafe fn instr_660FDC(source: reg128, r: i32) {
  3628. // paddusb xmm, xmm/m128
  3629. // XXX: Aligned access or #gp
  3630. let destination = read_xmm128s(r);
  3631. let mut result = reg128 { i8_0: [0; 16] };
  3632. for i in 0..16 {
  3633. result.u8_0[i] = saturate_ud_to_ub(source.u8_0[i] as u32 + destination.u8_0[i] as u32);
  3634. }
  3635. write_xmm_reg128(r, result);
  3636. }
  3637. #[no_mangle]
  3638. pub unsafe fn instr_660FDC_reg(r1: i32, r2: i32) { instr_660FDC(read_xmm128s(r1), r2); }
  3639. #[no_mangle]
  3640. pub unsafe fn instr_660FDC_mem(addr: i32, r: i32) {
  3641. instr_660FDC(return_on_pagefault!(safe_read128s(addr)), r);
  3642. }
  3643. #[no_mangle]
  3644. pub unsafe fn instr_0FDD(source: u64, r: i32) {
  3645. // paddusw mm, mm/m64
  3646. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  3647. let source: [u16; 4] = std::mem::transmute(source);
  3648. let mut result = [0; 4];
  3649. for i in 0..4 {
  3650. result[i] = saturate_uw(destination[i] as u32 + source[i] as u32)
  3651. }
  3652. write_mmx_reg64(r, std::mem::transmute(result));
  3653. transition_fpu_to_mmx();
  3654. }
  3655. #[no_mangle]
  3656. pub unsafe fn instr_0FDD_reg(r1: i32, r2: i32) { instr_0FDD(read_mmx64s(r1), r2); }
  3657. #[no_mangle]
  3658. pub unsafe fn instr_0FDD_mem(addr: i32, r: i32) {
  3659. instr_0FDD(return_on_pagefault!(safe_read64s(addr)), r);
  3660. }
  3661. #[no_mangle]
  3662. pub unsafe fn instr_660FDD(source: reg128, r: i32) {
  3663. // paddusw xmm, xmm/m128
  3664. // XXX: Aligned access or #gp
  3665. let destination = read_xmm128s(r);
  3666. let mut result = reg128 { i8_0: [0; 16] };
  3667. for i in 0..8 {
  3668. result.u16_0[i] = saturate_uw(source.u16_0[i] as u32 + destination.u16_0[i] as u32)
  3669. }
  3670. write_xmm_reg128(r, result);
  3671. }
  3672. #[no_mangle]
  3673. pub unsafe fn instr_660FDD_reg(r1: i32, r2: i32) { instr_660FDD(read_xmm128s(r1), r2); }
  3674. #[no_mangle]
  3675. pub unsafe fn instr_660FDD_mem(addr: i32, r: i32) {
  3676. instr_660FDD(return_on_pagefault!(safe_read128s(addr)), r);
  3677. }
  3678. #[no_mangle]
  3679. pub unsafe fn instr_0FDE(source: u64, r: i32) {
  3680. // pmaxub mm, mm/m64
  3681. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  3682. let source: [u8; 8] = std::mem::transmute(source);
  3683. let mut result = [0; 8];
  3684. for i in 0..8 {
  3685. result[i] = u8::max(source[i], destination[i])
  3686. }
  3687. write_mmx_reg64(r, std::mem::transmute(result));
  3688. transition_fpu_to_mmx();
  3689. }
  3690. #[no_mangle]
  3691. pub unsafe fn instr_0FDE_reg(r1: i32, r2: i32) { instr_0FDE(read_mmx64s(r1), r2); }
  3692. #[no_mangle]
  3693. pub unsafe fn instr_0FDE_mem(addr: i32, r: i32) {
  3694. instr_0FDE(return_on_pagefault!(safe_read64s(addr)), r);
  3695. }
  3696. #[no_mangle]
  3697. pub unsafe fn instr_660FDE(source: reg128, r: i32) {
  3698. // pmaxub xmm, xmm/m128
  3699. // XXX: Aligned access or #gp
  3700. let destination = read_xmm128s(r);
  3701. let mut result = reg128 { i8_0: [0; 16] };
  3702. for i in 0..16 {
  3703. result.u8_0[i] = u8::max(source.u8_0[i], destination.u8_0[i]);
  3704. }
  3705. write_xmm_reg128(r, result);
  3706. }
  3707. #[no_mangle]
  3708. pub unsafe fn instr_660FDE_reg(r1: i32, r2: i32) { instr_660FDE(read_xmm128s(r1), r2); }
  3709. #[no_mangle]
  3710. pub unsafe fn instr_660FDE_mem(addr: i32, r: i32) {
  3711. instr_660FDE(return_on_pagefault!(safe_read128s(addr)), r);
  3712. }
  3713. #[no_mangle]
  3714. pub unsafe fn instr_0FDF(source: u64, r: i32) {
  3715. // pandn mm, mm/m64
  3716. let destination = read_mmx64s(r);
  3717. write_mmx_reg64(r, source & !destination);
  3718. transition_fpu_to_mmx();
  3719. }
  3720. #[no_mangle]
  3721. pub unsafe fn instr_0FDF_reg(r1: i32, r2: i32) { instr_0FDF(read_mmx64s(r1), r2); }
  3722. #[no_mangle]
  3723. pub unsafe fn instr_0FDF_mem(addr: i32, r: i32) {
  3724. instr_0FDF(return_on_pagefault!(safe_read64s(addr)), r);
  3725. }
  3726. #[no_mangle]
  3727. pub unsafe fn instr_660FDF(source: reg128, r: i32) {
  3728. // pandn xmm, xmm/m128
  3729. // XXX: Aligned access or #gp
  3730. pandn_r128(source, r);
  3731. }
  3732. #[no_mangle]
  3733. pub unsafe fn instr_660FDF_reg(r1: i32, r2: i32) { instr_660FDF(read_xmm128s(r1), r2); }
  3734. #[no_mangle]
  3735. pub unsafe fn instr_660FDF_mem(addr: i32, r: i32) {
  3736. instr_660FDF(return_on_pagefault!(safe_read128s(addr)), r);
  3737. }
  3738. #[no_mangle]
  3739. pub unsafe fn instr_0FE0(source: u64, r: i32) {
  3740. // pavgb mm, mm/m64
  3741. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  3742. let source: [u8; 8] = std::mem::transmute(source);
  3743. let mut result = [0; 8];
  3744. for i in 0..8 {
  3745. result[i] = (destination[i] as i32 + source[i] as i32 + 1 >> 1) as u8;
  3746. }
  3747. write_mmx_reg64(r, std::mem::transmute(result));
  3748. transition_fpu_to_mmx();
  3749. }
  3750. #[no_mangle]
  3751. pub unsafe fn instr_0FE0_reg(r1: i32, r2: i32) { instr_0FE0(read_mmx64s(r1), r2); }
  3752. #[no_mangle]
  3753. pub unsafe fn instr_0FE0_mem(addr: i32, r: i32) {
  3754. instr_0FE0(return_on_pagefault!(safe_read64s(addr)), r);
  3755. }
  3756. #[no_mangle]
  3757. pub unsafe fn instr_660FE0(source: reg128, r: i32) {
  3758. // pavgb xmm, xmm/m128
  3759. // XXX: Aligned access or #gp
  3760. let destination = read_xmm128s(r);
  3761. let mut result = reg128 { i8_0: [0; 16] };
  3762. for i in 0..16 {
  3763. result.u8_0[i] = (destination.u8_0[i] as i32 + source.u8_0[i] as i32 + 1 >> 1) as u8;
  3764. }
  3765. write_xmm_reg128(r, result);
  3766. }
  3767. #[no_mangle]
  3768. pub unsafe fn instr_660FE0_reg(r1: i32, r2: i32) { instr_660FE0(read_xmm128s(r1), r2); }
  3769. #[no_mangle]
  3770. pub unsafe fn instr_660FE0_mem(addr: i32, r: i32) {
  3771. instr_660FE0(return_on_pagefault!(safe_read128s(addr)), r);
  3772. }
  3773. #[no_mangle]
  3774. pub unsafe fn instr_0FE1(source: u64, r: i32) {
  3775. // psraw mm, mm/m64
  3776. psraw_r64(r, source);
  3777. }
  3778. #[no_mangle]
  3779. pub unsafe fn instr_0FE1_reg(r1: i32, r2: i32) { instr_0FE1(read_mmx64s(r1), r2); }
  3780. #[no_mangle]
  3781. pub unsafe fn instr_0FE1_mem(addr: i32, r: i32) {
  3782. instr_0FE1(return_on_pagefault!(safe_read64s(addr)), r);
  3783. }
  3784. #[no_mangle]
  3785. pub unsafe fn instr_660FE1(source: reg128, r: i32) {
  3786. // psraw xmm, xmm/m128
  3787. // XXX: Aligned access or #gp
  3788. psraw_r128(r, source.u64_0[0]);
  3789. }
  3790. #[no_mangle]
  3791. pub unsafe fn instr_660FE1_reg(r1: i32, r2: i32) { instr_660FE1(read_xmm128s(r1), r2); }
  3792. #[no_mangle]
  3793. pub unsafe fn instr_660FE1_mem(addr: i32, r: i32) {
  3794. instr_660FE1(return_on_pagefault!(safe_read128s(addr)), r);
  3795. }
  3796. #[no_mangle]
  3797. pub unsafe fn instr_0FE2(source: u64, r: i32) {
  3798. // psrad mm, mm/m64
  3799. psrad_r64(r, source);
  3800. }
  3801. #[no_mangle]
  3802. pub unsafe fn instr_0FE2_reg(r1: i32, r2: i32) { instr_0FE2(read_mmx64s(r1), r2); }
  3803. #[no_mangle]
  3804. pub unsafe fn instr_0FE2_mem(addr: i32, r: i32) {
  3805. instr_0FE2(return_on_pagefault!(safe_read64s(addr)), r);
  3806. }
  3807. #[no_mangle]
  3808. pub unsafe fn instr_660FE2(source: reg128, r: i32) {
  3809. // psrad xmm, xmm/m128
  3810. // XXX: Aligned access or #gp
  3811. psrad_r128(r, source.u64_0[0]);
  3812. }
  3813. #[no_mangle]
  3814. pub unsafe fn instr_660FE2_reg(r1: i32, r2: i32) { instr_660FE2(read_xmm128s(r1), r2); }
  3815. #[no_mangle]
  3816. pub unsafe fn instr_660FE2_mem(addr: i32, r: i32) {
  3817. instr_660FE2(return_on_pagefault!(safe_read128s(addr)), r);
  3818. }
  3819. #[no_mangle]
  3820. pub unsafe fn instr_0FE3(source: u64, r: i32) {
  3821. // pavgw mm, mm/m64
  3822. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  3823. let source: [u16; 4] = std::mem::transmute(source);
  3824. let mut result = [0; 4];
  3825. for i in 0..4 {
  3826. result[i] = (destination[i] as i32 + source[i] as i32 + 1 >> 1) as u16
  3827. }
  3828. write_mmx_reg64(r, std::mem::transmute(result));
  3829. transition_fpu_to_mmx();
  3830. }
  3831. #[no_mangle]
  3832. pub unsafe fn instr_0FE3_reg(r1: i32, r2: i32) { instr_0FE3(read_mmx64s(r1), r2); }
  3833. #[no_mangle]
  3834. pub unsafe fn instr_0FE3_mem(addr: i32, r: i32) {
  3835. instr_0FE3(return_on_pagefault!(safe_read64s(addr)), r);
  3836. }
  3837. #[no_mangle]
  3838. pub unsafe fn instr_660FE3(source: reg128, r: i32) {
  3839. // pavgw xmm, xmm/m128
  3840. // XXX: Aligned access or #gp
  3841. let mut destination = read_xmm128s(r);
  3842. for i in 0..8 {
  3843. destination.u16_0[i] =
  3844. (destination.u16_0[i] as i32 + source.u16_0[i] as i32 + 1 >> 1) as u16;
  3845. }
  3846. write_xmm_reg128(r, destination);
  3847. }
  3848. #[no_mangle]
  3849. pub unsafe fn instr_660FE3_reg(r1: i32, r2: i32) { instr_660FE3(read_xmm128s(r1), r2); }
  3850. #[no_mangle]
  3851. pub unsafe fn instr_660FE3_mem(addr: i32, r: i32) {
  3852. instr_660FE3(return_on_pagefault!(safe_read128s(addr)), r);
  3853. }
  3854. #[no_mangle]
  3855. pub unsafe fn instr_0FE4(source: u64, r: i32) {
  3856. // pmulhuw mm, mm/m64
  3857. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  3858. let source: [u16; 4] = std::mem::transmute(source);
  3859. let mut result = [0; 4];
  3860. for i in 0..4 {
  3861. result[i] = ((destination[i] as i32 * source[i] as i32) >> 16) as u16
  3862. }
  3863. write_mmx_reg64(r, std::mem::transmute(result));
  3864. transition_fpu_to_mmx();
  3865. }
  3866. #[no_mangle]
  3867. pub unsafe fn instr_0FE4_reg(r1: i32, r2: i32) { instr_0FE4(read_mmx64s(r1), r2); }
  3868. #[no_mangle]
  3869. pub unsafe fn instr_0FE4_mem(addr: i32, r: i32) {
  3870. instr_0FE4(return_on_pagefault!(safe_read64s(addr)), r);
  3871. }
  3872. #[no_mangle]
  3873. pub unsafe fn instr_660FE4(source: reg128, r: i32) {
  3874. // pmulhuw xmm, xmm/m128
  3875. // XXX: Aligned access or #gp
  3876. let destination = read_xmm128s(r);
  3877. let mut result = reg128 { i8_0: [0; 16] };
  3878. for i in 0..8 {
  3879. result.u16_0[i] = (source.u16_0[i] as i32 * destination.u16_0[i] as i32 >> 16) as u16;
  3880. }
  3881. write_xmm_reg128(r, result);
  3882. }
  3883. #[no_mangle]
  3884. pub unsafe fn instr_660FE4_reg(r1: i32, r2: i32) { instr_660FE4(read_xmm128s(r1), r2); }
  3885. #[no_mangle]
  3886. pub unsafe fn instr_660FE4_mem(addr: i32, r: i32) {
  3887. instr_660FE4(return_on_pagefault!(safe_read128s(addr)), r);
  3888. }
  3889. #[no_mangle]
  3890. pub unsafe fn instr_0FE5(source: u64, r: i32) {
  3891. // pmulhw mm, mm/m64
  3892. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  3893. let source: [i16; 4] = std::mem::transmute(source);
  3894. let mut result = [0; 4];
  3895. for i in 0..4 {
  3896. result[i] = ((destination[i] as i32 * source[i] as i32) >> 16) as i16
  3897. }
  3898. write_mmx_reg64(r, std::mem::transmute(result));
  3899. transition_fpu_to_mmx();
  3900. }
  3901. #[no_mangle]
  3902. pub unsafe fn instr_0FE5_reg(r1: i32, r2: i32) { instr_0FE5(read_mmx64s(r1), r2); }
  3903. #[no_mangle]
  3904. pub unsafe fn instr_0FE5_mem(addr: i32, r: i32) {
  3905. instr_0FE5(return_on_pagefault!(safe_read64s(addr)), r);
  3906. }
  3907. #[no_mangle]
  3908. pub unsafe fn instr_660FE5(source: reg128, r: i32) {
  3909. // pmulhw xmm, xmm/m128
  3910. // XXX: Aligned access or #gp
  3911. let destination = read_xmm128s(r);
  3912. let mut result = reg128 { i8_0: [0; 16] };
  3913. for i in 0..8 {
  3914. result.u16_0[i] = (destination.i16_0[i] as i32 * source.i16_0[i] as i32 >> 16) as u16
  3915. }
  3916. write_xmm_reg128(r, result);
  3917. }
  3918. #[no_mangle]
  3919. pub unsafe fn instr_660FE5_reg(r1: i32, r2: i32) { instr_660FE5(read_xmm128s(r1), r2); }
  3920. #[no_mangle]
  3921. pub unsafe fn instr_660FE5_mem(addr: i32, r: i32) {
  3922. instr_660FE5(return_on_pagefault!(safe_read128s(addr)), r);
  3923. }
  3924. #[no_mangle]
  3925. pub unsafe fn instr_0FE6_mem(addr: i32, r: i32) { trigger_ud(); }
  3926. #[no_mangle]
  3927. pub unsafe fn instr_0FE6_reg(r1: i32, r2: i32) { trigger_ud(); }
  3928. #[no_mangle]
  3929. pub unsafe fn instr_660FE6(source: reg128, r: i32) {
  3930. // cvttpd2dq xmm1, xmm2/m128
  3931. let result = reg128 {
  3932. i32_0: [
  3933. sse_convert_with_truncation_f64_to_i32(source.f64_0[0]),
  3934. sse_convert_with_truncation_f64_to_i32(source.f64_0[1]),
  3935. 0,
  3936. 0,
  3937. ],
  3938. };
  3939. write_xmm_reg128(r, result);
  3940. }
  3941. #[no_mangle]
  3942. pub unsafe fn instr_660FE6_mem(addr: i32, r: i32) {
  3943. instr_660FE6(return_on_pagefault!(safe_read128s(addr)), r);
  3944. }
  3945. #[no_mangle]
  3946. pub unsafe fn instr_660FE6_reg(r1: i32, r2: i32) { instr_660FE6(read_xmm128s(r1), r2); }
  3947. #[no_mangle]
  3948. pub unsafe fn instr_F20FE6(source: reg128, r: i32) {
  3949. // cvtpd2dq xmm1, xmm2/m128
  3950. let result = reg128 {
  3951. i32_0: [
  3952. // XXX: Precision exception
  3953. sse_convert_f64_to_i32(source.f64_0[0]),
  3954. sse_convert_f64_to_i32(source.f64_0[1]),
  3955. 0,
  3956. 0,
  3957. ],
  3958. };
  3959. write_xmm_reg128(r, result);
  3960. }
  3961. #[no_mangle]
  3962. pub unsafe fn instr_F20FE6_mem(addr: i32, r: i32) {
  3963. instr_F20FE6(return_on_pagefault!(safe_read128s(addr)), r);
  3964. }
  3965. #[no_mangle]
  3966. pub unsafe fn instr_F20FE6_reg(r1: i32, r2: i32) { instr_F20FE6(read_xmm128s(r1), r2); }
  3967. #[no_mangle]
  3968. pub unsafe fn instr_F30FE6(source: u64, r: i32) {
  3969. // cvtdq2pd xmm1, xmm2/m64
  3970. let result = reg128 {
  3971. f64_0: [
  3972. // Note: Conversion never fails (i32 fits into f64)
  3973. source as i32 as f64,
  3974. (source >> 32) as i32 as f64,
  3975. ],
  3976. };
  3977. write_xmm_reg128(r, result);
  3978. }
  3979. #[no_mangle]
  3980. pub unsafe fn instr_F30FE6_mem(addr: i32, r: i32) {
  3981. instr_F30FE6(return_on_pagefault!(safe_read64s(addr)), r);
  3982. }
  3983. #[no_mangle]
  3984. pub unsafe fn instr_F30FE6_reg(r1: i32, r2: i32) { instr_F30FE6(read_xmm64s(r1), r2); }
  3985. #[no_mangle]
  3986. pub unsafe fn instr_0FE7_mem(addr: i32, r: i32) {
  3987. // movntq m64, mm
  3988. mov_r_m64(addr, r);
  3989. }
  3990. #[no_mangle]
  3991. pub unsafe fn instr_0FE7_reg(r1: i32, r2: i32) { trigger_ud(); }
  3992. #[no_mangle]
  3993. pub unsafe fn instr_660FE7_reg(r1: i32, r2: i32) { trigger_ud(); }
  3994. #[no_mangle]
  3995. pub unsafe fn instr_660FE7_mem(addr: i32, r: i32) {
  3996. // movntdq m128, xmm
  3997. mov_r_m128(addr, r);
  3998. }
  3999. #[no_mangle]
  4000. pub unsafe fn instr_0FE8(source: u64, r: i32) {
  4001. // psubsb mm, mm/m64
  4002. let destination: [i8; 8] = std::mem::transmute(read_mmx64s(r));
  4003. let source: [i8; 8] = std::mem::transmute(source);
  4004. let mut result = [0; 8];
  4005. for i in 0..8 {
  4006. result[i] = saturate_sd_to_sb(destination[i] as u32 - source[i] as u32);
  4007. }
  4008. write_mmx_reg64(r, std::mem::transmute(result));
  4009. transition_fpu_to_mmx();
  4010. }
  4011. #[no_mangle]
  4012. pub unsafe fn instr_0FE8_reg(r1: i32, r2: i32) { instr_0FE8(read_mmx64s(r1), r2); }
  4013. #[no_mangle]
  4014. pub unsafe fn instr_0FE8_mem(addr: i32, r: i32) {
  4015. instr_0FE8(return_on_pagefault!(safe_read64s(addr)), r);
  4016. }
  4017. #[no_mangle]
  4018. pub unsafe fn instr_660FE8(source: reg128, r: i32) {
  4019. // psubsb xmm, xmm/m128
  4020. // XXX: Aligned access or #gp
  4021. let destination = read_xmm128s(r);
  4022. let mut result = reg128 { i8_0: [0; 16] };
  4023. for i in 0..16 {
  4024. result.i8_0[i] = saturate_sd_to_sb(destination.i8_0[i] as u32 - source.i8_0[i] as u32);
  4025. }
  4026. write_xmm_reg128(r, result);
  4027. }
  4028. #[no_mangle]
  4029. pub unsafe fn instr_660FE8_reg(r1: i32, r2: i32) { instr_660FE8(read_xmm128s(r1), r2); }
  4030. #[no_mangle]
  4031. pub unsafe fn instr_660FE8_mem(addr: i32, r: i32) {
  4032. instr_660FE8(return_on_pagefault!(safe_read128s(addr)), r);
  4033. }
  4034. #[no_mangle]
  4035. pub unsafe fn instr_0FE9(source: u64, r: i32) {
  4036. // psubsw mm, mm/m64
  4037. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  4038. let source: [i16; 4] = std::mem::transmute(source);
  4039. let mut result = [0; 4];
  4040. for i in 0..4 {
  4041. result[i] = saturate_sd_to_sw(destination[i] as u32 - source[i] as u32)
  4042. }
  4043. write_mmx_reg64(r, std::mem::transmute(result));
  4044. transition_fpu_to_mmx();
  4045. }
  4046. #[no_mangle]
  4047. pub unsafe fn instr_0FE9_reg(r1: i32, r2: i32) { instr_0FE9(read_mmx64s(r1), r2); }
  4048. #[no_mangle]
  4049. pub unsafe fn instr_0FE9_mem(addr: i32, r: i32) {
  4050. instr_0FE9(return_on_pagefault!(safe_read64s(addr)), r);
  4051. }
  4052. #[no_mangle]
  4053. pub unsafe fn instr_660FE9(source: reg128, r: i32) {
  4054. // psubsw xmm, xmm/m128
  4055. // XXX: Aligned access or #gp
  4056. let destination = read_xmm128s(r);
  4057. let mut result = reg128 { i8_0: [0; 16] };
  4058. for i in 0..8 {
  4059. result.u16_0[i] = saturate_sd_to_sw(destination.i16_0[i] as u32 - source.i16_0[i] as u32)
  4060. }
  4061. write_xmm_reg128(r, result);
  4062. }
  4063. #[no_mangle]
  4064. pub unsafe fn instr_660FE9_reg(r1: i32, r2: i32) { instr_660FE9(read_xmm128s(r1), r2); }
  4065. #[no_mangle]
  4066. pub unsafe fn instr_660FE9_mem(addr: i32, r: i32) {
  4067. instr_660FE9(return_on_pagefault!(safe_read128s(addr)), r);
  4068. }
  4069. #[no_mangle]
  4070. pub unsafe fn instr_0FEA(source: u64, r: i32) {
  4071. // pminsw mm, mm/m64
  4072. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  4073. let source: [i16; 4] = std::mem::transmute(source);
  4074. let mut result = [0; 4];
  4075. for i in 0..4 {
  4076. result[i] = i16::min(destination[i], source[i])
  4077. }
  4078. write_mmx_reg64(r, std::mem::transmute(result));
  4079. transition_fpu_to_mmx();
  4080. }
  4081. #[no_mangle]
  4082. pub unsafe fn instr_0FEA_reg(r1: i32, r2: i32) { instr_0FEA(read_mmx64s(r1), r2); }
  4083. #[no_mangle]
  4084. pub unsafe fn instr_0FEA_mem(addr: i32, r: i32) {
  4085. instr_0FEA(return_on_pagefault!(safe_read64s(addr)), r);
  4086. }
  4087. #[no_mangle]
  4088. pub unsafe fn instr_660FEA(source: reg128, r: i32) {
  4089. // pminsw xmm, xmm/m128
  4090. // XXX: Aligned access or #gp
  4091. let destination = read_xmm128s(r);
  4092. let mut result = reg128 { i8_0: [0; 16] };
  4093. for i in 0..8 {
  4094. result.i16_0[i] = i16::min(destination.i16_0[i], source.i16_0[i])
  4095. }
  4096. write_xmm_reg128(r, result);
  4097. }
  4098. #[no_mangle]
  4099. pub unsafe fn instr_660FEA_reg(r1: i32, r2: i32) { instr_660FEA(read_xmm128s(r1), r2); }
  4100. #[no_mangle]
  4101. pub unsafe fn instr_660FEA_mem(addr: i32, r: i32) {
  4102. instr_660FEA(return_on_pagefault!(safe_read128s(addr)), r);
  4103. }
  4104. #[no_mangle]
  4105. pub unsafe fn instr_0FEB(source: u64, r: i32) {
  4106. // por mm, mm/m64
  4107. let destination = read_mmx64s(r);
  4108. write_mmx_reg64(r, source | destination);
  4109. transition_fpu_to_mmx();
  4110. }
  4111. #[no_mangle]
  4112. pub unsafe fn instr_0FEB_reg(r1: i32, r2: i32) { instr_0FEB(read_mmx64s(r1), r2); }
  4113. #[no_mangle]
  4114. pub unsafe fn instr_0FEB_mem(addr: i32, r: i32) {
  4115. instr_0FEB(return_on_pagefault!(safe_read64s(addr)), r);
  4116. }
  4117. #[no_mangle]
  4118. pub unsafe fn instr_660FEB(source: reg128, r: i32) {
  4119. // por xmm, xmm/m128
  4120. // XXX: Aligned access or #gp
  4121. por_r128(source, r);
  4122. }
  4123. #[no_mangle]
  4124. pub unsafe fn instr_660FEB_reg(r1: i32, r2: i32) { instr_660FEB(read_xmm128s(r1), r2); }
  4125. #[no_mangle]
  4126. pub unsafe fn instr_660FEB_mem(addr: i32, r: i32) {
  4127. instr_660FEB(return_on_pagefault!(safe_read128s(addr)), r);
  4128. }
  4129. #[no_mangle]
  4130. pub unsafe fn instr_0FEC(source: u64, r: i32) {
  4131. // paddsb mm, mm/m64
  4132. let destination: [i8; 8] = std::mem::transmute(read_mmx64s(r));
  4133. let source: [i8; 8] = std::mem::transmute(source);
  4134. let mut result = [0; 8];
  4135. for i in 0..8 {
  4136. result[i] = saturate_sd_to_sb(destination[i] as u32 + source[i] as u32);
  4137. }
  4138. write_mmx_reg64(r, std::mem::transmute(result));
  4139. transition_fpu_to_mmx();
  4140. }
  4141. #[no_mangle]
  4142. pub unsafe fn instr_0FEC_reg(r1: i32, r2: i32) { instr_0FEC(read_mmx64s(r1), r2); }
  4143. #[no_mangle]
  4144. pub unsafe fn instr_0FEC_mem(addr: i32, r: i32) {
  4145. instr_0FEC(return_on_pagefault!(safe_read64s(addr)), r);
  4146. }
  4147. #[no_mangle]
  4148. pub unsafe fn instr_660FEC(source: reg128, r: i32) {
  4149. // paddsb xmm, xmm/m128
  4150. // XXX: Aligned access or #gp
  4151. let destination = read_xmm128s(r);
  4152. let mut result = reg128 { i8_0: [0; 16] };
  4153. for i in 0..16 {
  4154. result.i8_0[i] = saturate_sd_to_sb(destination.i8_0[i] as u32 + source.i8_0[i] as u32);
  4155. }
  4156. write_xmm_reg128(r, result);
  4157. }
  4158. #[no_mangle]
  4159. pub unsafe fn instr_660FEC_reg(r1: i32, r2: i32) { instr_660FEC(read_xmm128s(r1), r2); }
  4160. #[no_mangle]
  4161. pub unsafe fn instr_660FEC_mem(addr: i32, r: i32) {
  4162. instr_660FEC(return_on_pagefault!(safe_read128s(addr)), r);
  4163. }
  4164. #[no_mangle]
  4165. pub unsafe fn instr_0FED(source: u64, r: i32) {
  4166. // paddsw mm, mm/m64
  4167. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  4168. let source: [i16; 4] = std::mem::transmute(source);
  4169. let mut result = [0; 4];
  4170. for i in 0..4 {
  4171. result[i] = saturate_sd_to_sw(destination[i] as u32 + source[i] as u32)
  4172. }
  4173. write_mmx_reg64(r, std::mem::transmute(result));
  4174. transition_fpu_to_mmx();
  4175. }
  4176. #[no_mangle]
  4177. pub unsafe fn instr_0FED_reg(r1: i32, r2: i32) { instr_0FED(read_mmx64s(r1), r2); }
  4178. #[no_mangle]
  4179. pub unsafe fn instr_0FED_mem(addr: i32, r: i32) {
  4180. instr_0FED(return_on_pagefault!(safe_read64s(addr)), r);
  4181. }
  4182. #[no_mangle]
  4183. pub unsafe fn instr_660FED(source: reg128, r: i32) {
  4184. // paddsw xmm, xmm/m128
  4185. // XXX: Aligned access or #gp
  4186. let destination = read_xmm128s(r);
  4187. let mut result = reg128 { i8_0: [0; 16] };
  4188. for i in 0..8 {
  4189. result.u16_0[i] = saturate_sd_to_sw(destination.i16_0[i] as u32 + source.i16_0[i] as u32)
  4190. }
  4191. write_xmm_reg128(r, result);
  4192. }
  4193. #[no_mangle]
  4194. pub unsafe fn instr_660FED_reg(r1: i32, r2: i32) { instr_660FED(read_xmm128s(r1), r2); }
  4195. #[no_mangle]
  4196. pub unsafe fn instr_660FED_mem(addr: i32, r: i32) {
  4197. instr_660FED(return_on_pagefault!(safe_read128s(addr)), r);
  4198. }
  4199. #[no_mangle]
  4200. pub unsafe fn instr_0FEE(source: u64, r: i32) {
  4201. // pmaxsw mm, mm/m64
  4202. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  4203. let source: [i16; 4] = std::mem::transmute(source);
  4204. let mut result = [0; 4];
  4205. for i in 0..4 {
  4206. result[i] = i16::max(destination[i], source[i])
  4207. }
  4208. write_mmx_reg64(r, std::mem::transmute(result));
  4209. transition_fpu_to_mmx();
  4210. }
  4211. #[no_mangle]
  4212. pub unsafe fn instr_0FEE_reg(r1: i32, r2: i32) { instr_0FEE(read_mmx64s(r1), r2); }
  4213. #[no_mangle]
  4214. pub unsafe fn instr_0FEE_mem(addr: i32, r: i32) {
  4215. instr_0FEE(return_on_pagefault!(safe_read64s(addr)), r);
  4216. }
  4217. #[no_mangle]
  4218. pub unsafe fn instr_660FEE(source: reg128, r: i32) {
  4219. // pmaxsw xmm, xmm/m128
  4220. // XXX: Aligned access or #gp
  4221. let destination = read_xmm128s(r);
  4222. let mut result = reg128 { i8_0: [0; 16] };
  4223. for i in 0..8 {
  4224. result.i16_0[i] = i16::max(destination.i16_0[i], source.i16_0[i])
  4225. }
  4226. write_xmm_reg128(r, result);
  4227. }
  4228. #[no_mangle]
  4229. pub unsafe fn instr_660FEE_reg(r1: i32, r2: i32) { instr_660FEE(read_xmm128s(r1), r2); }
  4230. #[no_mangle]
  4231. pub unsafe fn instr_660FEE_mem(addr: i32, r: i32) {
  4232. instr_660FEE(return_on_pagefault!(safe_read128s(addr)), r);
  4233. }
  4234. #[no_mangle]
  4235. pub unsafe fn instr_0FEF(source: u64, r: i32) {
  4236. // pxor mm, mm/m64
  4237. let destination = read_mmx64s(r);
  4238. write_mmx_reg64(r, source ^ destination);
  4239. transition_fpu_to_mmx();
  4240. }
  4241. #[no_mangle]
  4242. pub unsafe fn instr_0FEF_reg(r1: i32, r2: i32) { instr_0FEF(read_mmx64s(r1), r2); }
  4243. #[no_mangle]
  4244. pub unsafe fn instr_0FEF_mem(addr: i32, r: i32) {
  4245. instr_0FEF(return_on_pagefault!(safe_read64s(addr)), r);
  4246. }
  4247. #[no_mangle]
  4248. pub unsafe fn instr_660FEF(source: reg128, r: i32) {
  4249. // pxor xmm, xmm/m128
  4250. // XXX: Aligned access or #gp
  4251. pxor_r128(source, r);
  4252. }
  4253. #[no_mangle]
  4254. pub unsafe fn instr_660FEF_reg(r1: i32, r2: i32) { instr_660FEF(read_xmm128s(r1), r2); }
  4255. #[no_mangle]
  4256. pub unsafe fn instr_660FEF_mem(addr: i32, r: i32) {
  4257. instr_660FEF(return_on_pagefault!(safe_read128s(addr)), r);
  4258. }
  4259. #[no_mangle]
  4260. pub unsafe fn instr_0FF0() { unimplemented_sse(); }
  4261. #[no_mangle]
  4262. pub unsafe fn instr_0FF1(source: u64, r: i32) {
  4263. // psllw mm, mm/m64
  4264. psllw_r64(r, source);
  4265. }
  4266. #[no_mangle]
  4267. pub unsafe fn instr_0FF1_reg(r1: i32, r2: i32) { instr_0FF1(read_mmx64s(r1), r2); }
  4268. #[no_mangle]
  4269. pub unsafe fn instr_0FF1_mem(addr: i32, r: i32) {
  4270. instr_0FF1(return_on_pagefault!(safe_read64s(addr)), r);
  4271. }
  4272. #[no_mangle]
  4273. pub unsafe fn instr_660FF1(source: reg128, r: i32) {
  4274. // psllw xmm, xmm/m128
  4275. // XXX: Aligned access or #gp
  4276. psllw_r128(r, source.u64_0[0]);
  4277. }
  4278. #[no_mangle]
  4279. pub unsafe fn instr_660FF1_reg(r1: i32, r2: i32) { instr_660FF1(read_xmm128s(r1), r2); }
  4280. #[no_mangle]
  4281. pub unsafe fn instr_660FF1_mem(addr: i32, r: i32) {
  4282. instr_660FF1(return_on_pagefault!(safe_read128s(addr)), r);
  4283. }
  4284. #[no_mangle]
  4285. pub unsafe fn instr_0FF2(source: u64, r: i32) {
  4286. // pslld mm, mm/m64
  4287. pslld_r64(r, source);
  4288. }
  4289. #[no_mangle]
  4290. pub unsafe fn instr_0FF2_reg(r1: i32, r2: i32) { instr_0FF2(read_mmx64s(r1), r2); }
  4291. #[no_mangle]
  4292. pub unsafe fn instr_0FF2_mem(addr: i32, r: i32) {
  4293. instr_0FF2(return_on_pagefault!(safe_read64s(addr)), r);
  4294. }
  4295. #[no_mangle]
  4296. pub unsafe fn instr_660FF2(source: reg128, r: i32) {
  4297. // pslld xmm, xmm/m128
  4298. // XXX: Aligned access or #gp
  4299. pslld_r128(r, source.u64_0[0]);
  4300. }
  4301. #[no_mangle]
  4302. pub unsafe fn instr_660FF2_reg(r1: i32, r2: i32) { instr_660FF2(read_xmm128s(r1), r2); }
  4303. #[no_mangle]
  4304. pub unsafe fn instr_660FF2_mem(addr: i32, r: i32) {
  4305. instr_660FF2(return_on_pagefault!(safe_read128s(addr)), r);
  4306. }
  4307. #[no_mangle]
  4308. pub unsafe fn instr_0FF3(source: u64, r: i32) {
  4309. // psllq mm, mm/m64
  4310. psllq_r64(r, source);
  4311. }
  4312. #[no_mangle]
  4313. pub unsafe fn instr_0FF3_reg(r1: i32, r2: i32) { instr_0FF3(read_mmx64s(r1), r2); }
  4314. #[no_mangle]
  4315. pub unsafe fn instr_0FF3_mem(addr: i32, r: i32) {
  4316. instr_0FF3(return_on_pagefault!(safe_read64s(addr)), r);
  4317. }
  4318. #[no_mangle]
  4319. pub unsafe fn instr_660FF3(source: reg128, r: i32) {
  4320. // psllq xmm, xmm/m128
  4321. // XXX: Aligned access or #gp
  4322. psllq_r128(r, source.u64_0[0]);
  4323. }
  4324. #[no_mangle]
  4325. pub unsafe fn instr_660FF3_reg(r1: i32, r2: i32) { instr_660FF3(read_xmm128s(r1), r2); }
  4326. #[no_mangle]
  4327. pub unsafe fn instr_660FF3_mem(addr: i32, r: i32) {
  4328. instr_660FF3(return_on_pagefault!(safe_read128s(addr)), r);
  4329. }
  4330. #[no_mangle]
  4331. pub unsafe fn instr_0FF4(source: u64, r: i32) {
  4332. // pmuludq mm, mm/m64
  4333. let destination = read_mmx64s(r);
  4334. write_mmx_reg64(
  4335. r,
  4336. (source as u32 as u64).wrapping_mul(destination as u32 as u64),
  4337. );
  4338. transition_fpu_to_mmx();
  4339. }
  4340. #[no_mangle]
  4341. pub unsafe fn instr_0FF4_reg(r1: i32, r2: i32) { instr_0FF4(read_mmx64s(r1), r2); }
  4342. #[no_mangle]
  4343. pub unsafe fn instr_0FF4_mem(addr: i32, r: i32) {
  4344. instr_0FF4(return_on_pagefault!(safe_read64s(addr)), r);
  4345. }
  4346. #[no_mangle]
  4347. pub unsafe fn instr_660FF4(source: reg128, r: i32) {
  4348. // pmuludq xmm, xmm/m128
  4349. // XXX: Aligned access or #gp
  4350. let destination = read_xmm128s(r);
  4351. let mut result = reg128 { i8_0: [0; 16] };
  4352. result.u64_0[0] = source.u32_0[0] as u64 * destination.u32_0[0] as u64;
  4353. result.u64_0[1] = source.u32_0[2] as u64 * destination.u32_0[2] as u64;
  4354. write_xmm_reg128(r, result);
  4355. }
  4356. #[no_mangle]
  4357. pub unsafe fn instr_660FF4_reg(r1: i32, r2: i32) { instr_660FF4(read_xmm128s(r1), r2); }
  4358. #[no_mangle]
  4359. pub unsafe fn instr_660FF4_mem(addr: i32, r: i32) {
  4360. instr_660FF4(return_on_pagefault!(safe_read128s(addr)), r);
  4361. }
  4362. #[no_mangle]
  4363. pub unsafe fn instr_0FF5(source: u64, r: i32) {
  4364. // pmaddwd mm, mm/m64
  4365. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  4366. let source: [i16; 4] = std::mem::transmute(source);
  4367. let mul0 = destination[0] as i32 * source[0] as i32;
  4368. let mul1 = destination[1] as i32 * source[1] as i32;
  4369. let mul2 = destination[2] as i32 * source[2] as i32;
  4370. let mul3 = destination[3] as i32 * source[3] as i32;
  4371. let low = mul0 + mul1;
  4372. let high = mul2 + mul3;
  4373. write_mmx_reg64(r, low as u32 as u64 | (high as u64) << 32);
  4374. transition_fpu_to_mmx();
  4375. }
  4376. #[no_mangle]
  4377. pub unsafe fn instr_0FF5_reg(r1: i32, r2: i32) { instr_0FF5(read_mmx64s(r1), r2); }
  4378. #[no_mangle]
  4379. pub unsafe fn instr_0FF5_mem(addr: i32, r: i32) {
  4380. instr_0FF5(return_on_pagefault!(safe_read64s(addr)), r);
  4381. }
  4382. #[no_mangle]
  4383. pub unsafe fn instr_660FF5(source: reg128, r: i32) {
  4384. // pmaddwd xmm, xmm/m128
  4385. // XXX: Aligned access or #gp
  4386. let destination = read_xmm128s(r);
  4387. let mut result = reg128 { i8_0: [0; 16] };
  4388. for i in 0..4 {
  4389. result.i32_0[i] = destination.i16_0[2 * i] as i32 * source.i16_0[2 * i] as i32
  4390. + destination.i16_0[2 * i + 1] as i32 * source.i16_0[2 * i + 1] as i32
  4391. }
  4392. write_xmm_reg128(r, result);
  4393. }
  4394. #[no_mangle]
  4395. pub unsafe fn instr_660FF5_reg(r1: i32, r2: i32) { instr_660FF5(read_xmm128s(r1), r2); }
  4396. #[no_mangle]
  4397. pub unsafe fn instr_660FF5_mem(addr: i32, r: i32) {
  4398. instr_660FF5(return_on_pagefault!(safe_read128s(addr)), r);
  4399. }
  4400. #[no_mangle]
  4401. pub unsafe fn instr_0FF6(source: u64, r: i32) {
  4402. // psadbw mm, mm/m64
  4403. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  4404. let source: [u8; 8] = std::mem::transmute(source);
  4405. let mut sum = 0;
  4406. for i in 0..8 {
  4407. sum += (destination[i] as i32 - source[i] as i32).abs() as u64;
  4408. }
  4409. write_mmx_reg64(r, sum);
  4410. transition_fpu_to_mmx();
  4411. }
  4412. #[no_mangle]
  4413. pub unsafe fn instr_0FF6_reg(r1: i32, r2: i32) { instr_0FF6(read_mmx64s(r1), r2); }
  4414. #[no_mangle]
  4415. pub unsafe fn instr_0FF6_mem(addr: i32, r: i32) {
  4416. instr_0FF6(return_on_pagefault!(safe_read64s(addr)), r);
  4417. }
  4418. #[no_mangle]
  4419. pub unsafe fn instr_660FF6(source: reg128, r: i32) {
  4420. // psadbw xmm, xmm/m128
  4421. // XXX: Aligned access or #gp
  4422. let destination = read_xmm128s(r);
  4423. let mut sum0 = 0;
  4424. let mut sum1 = 0;
  4425. for i in 0..8 {
  4426. sum0 += (destination.u8_0[i + 0] as i32 - source.u8_0[i + 0] as i32).abs() as u32;
  4427. sum1 += (destination.u8_0[i + 8] as i32 - source.u8_0[i + 8] as i32).abs() as u32;
  4428. }
  4429. write_xmm128(r, sum0 as i32, 0, sum1 as i32, 0);
  4430. }
  4431. #[no_mangle]
  4432. pub unsafe fn instr_660FF6_reg(r1: i32, r2: i32) { instr_660FF6(read_xmm128s(r1), r2); }
  4433. #[no_mangle]
  4434. pub unsafe fn instr_660FF6_mem(addr: i32, r: i32) {
  4435. instr_660FF6(return_on_pagefault!(safe_read128s(addr)), r);
  4436. }
  4437. #[no_mangle]
  4438. pub unsafe fn instr_0FF7_mem(addr: i32, r: i32) { trigger_ud(); }
  4439. #[no_mangle]
  4440. pub unsafe fn maskmovq(r1: i32, r2: i32, addr: i32) {
  4441. // maskmovq mm, mm
  4442. let source: [u8; 8] = std::mem::transmute(read_mmx64s(r2));
  4443. let mask: [u8; 8] = std::mem::transmute(read_mmx64s(r1));
  4444. match writable_or_pagefault(addr, 8) {
  4445. Ok(()) => *page_fault = false,
  4446. Err(()) => {
  4447. *page_fault = true;
  4448. return;
  4449. },
  4450. }
  4451. for i in 0..8 {
  4452. if 0 != mask[i] & 0x80 {
  4453. safe_write8(addr + i as i32, source[i] as i32).unwrap();
  4454. }
  4455. }
  4456. transition_fpu_to_mmx();
  4457. }
  4458. #[no_mangle]
  4459. pub unsafe fn instr_0FF7_reg(r1: i32, r2: i32) {
  4460. maskmovq(
  4461. r1,
  4462. r2,
  4463. return_on_pagefault!(get_seg_prefix_ds(get_reg_asize(EDI))),
  4464. )
  4465. }
  4466. #[no_mangle]
  4467. pub unsafe fn instr_660FF7_mem(addr: i32, r: i32) { trigger_ud(); }
  4468. #[no_mangle]
  4469. pub unsafe fn maskmovdqu(r1: i32, r2: i32, addr: i32) {
  4470. // maskmovdqu xmm, xmm
  4471. let source = read_xmm128s(r2);
  4472. let mask = read_xmm128s(r1);
  4473. match writable_or_pagefault(addr, 16) {
  4474. Ok(()) => *page_fault = false,
  4475. Err(()) => {
  4476. *page_fault = true;
  4477. return;
  4478. },
  4479. }
  4480. for i in 0..16 {
  4481. if 0 != mask.u8_0[i] & 0x80 {
  4482. safe_write8(addr + i as i32, source.u8_0[i] as i32).unwrap();
  4483. }
  4484. }
  4485. }
  4486. #[no_mangle]
  4487. pub unsafe fn instr_660FF7_reg(r1: i32, r2: i32) {
  4488. maskmovdqu(
  4489. r1,
  4490. r2,
  4491. return_on_pagefault!(get_seg_prefix_ds(get_reg_asize(EDI))),
  4492. )
  4493. }
  4494. #[no_mangle]
  4495. pub unsafe fn instr_0FF8(source: u64, r: i32) {
  4496. // psubb mm, mm/m64
  4497. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  4498. let source: [u8; 8] = std::mem::transmute(source);
  4499. let mut result = [0; 8];
  4500. for i in 0..8 {
  4501. result[i] = destination[i] - source[i];
  4502. }
  4503. write_mmx_reg64(r, std::mem::transmute(result));
  4504. transition_fpu_to_mmx();
  4505. }
  4506. #[no_mangle]
  4507. pub unsafe fn instr_0FF8_reg(r1: i32, r2: i32) { instr_0FF8(read_mmx64s(r1), r2); }
  4508. #[no_mangle]
  4509. pub unsafe fn instr_0FF8_mem(addr: i32, r: i32) {
  4510. instr_0FF8(return_on_pagefault!(safe_read64s(addr)), r);
  4511. }
  4512. #[no_mangle]
  4513. pub unsafe fn instr_660FF8(source: reg128, r: i32) {
  4514. // psubb xmm, xmm/m128
  4515. // XXX: Aligned access or #gp
  4516. let destination = read_xmm128s(r);
  4517. let mut result = reg128 { i8_0: [0; 16] };
  4518. for i in 0..16 {
  4519. result.u8_0[i] = destination.u8_0[i] - source.u8_0[i];
  4520. }
  4521. write_xmm_reg128(r, result);
  4522. }
  4523. #[no_mangle]
  4524. pub unsafe fn instr_660FF8_reg(r1: i32, r2: i32) { instr_660FF8(read_xmm128s(r1), r2); }
  4525. #[no_mangle]
  4526. pub unsafe fn instr_660FF8_mem(addr: i32, r: i32) {
  4527. instr_660FF8(return_on_pagefault!(safe_read128s(addr)), r);
  4528. }
  4529. #[no_mangle]
  4530. pub unsafe fn instr_0FF9(source: u64, r: i32) {
  4531. // psubw mm, mm/m64
  4532. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  4533. let source: [i16; 4] = std::mem::transmute(source);
  4534. let mut result = [0; 4];
  4535. for i in 0..4 {
  4536. result[i] = destination[i] - source[i]
  4537. }
  4538. write_mmx_reg64(r, std::mem::transmute(result));
  4539. transition_fpu_to_mmx();
  4540. }
  4541. #[no_mangle]
  4542. pub unsafe fn instr_0FF9_reg(r1: i32, r2: i32) { instr_0FF9(read_mmx64s(r1), r2); }
  4543. #[no_mangle]
  4544. pub unsafe fn instr_0FF9_mem(addr: i32, r: i32) {
  4545. instr_0FF9(return_on_pagefault!(safe_read64s(addr)), r);
  4546. }
  4547. #[no_mangle]
  4548. pub unsafe fn instr_660FF9(source: reg128, r: i32) {
  4549. // psubw xmm, xmm/m128
  4550. // XXX: Aligned access or #gp
  4551. let destination = read_xmm128s(r);
  4552. let mut result = reg128 { i8_0: [0; 16] };
  4553. for i in 0..8 {
  4554. result.i16_0[i] = destination.i16_0[i] - source.i16_0[i]
  4555. }
  4556. write_xmm_reg128(r, result);
  4557. }
  4558. #[no_mangle]
  4559. pub unsafe fn instr_660FF9_reg(r1: i32, r2: i32) { instr_660FF9(read_xmm128s(r1), r2); }
  4560. #[no_mangle]
  4561. pub unsafe fn instr_660FF9_mem(addr: i32, r: i32) {
  4562. instr_660FF9(return_on_pagefault!(safe_read128s(addr)), r);
  4563. }
  4564. #[no_mangle]
  4565. pub unsafe fn instr_0FFA(source: u64, r: i32) {
  4566. // psubd mm, mm/m64
  4567. let destination: [i32; 2] = std::mem::transmute(read_mmx64s(r));
  4568. let source: [i32; 2] = std::mem::transmute(source);
  4569. let mut result = [0; 2];
  4570. for i in 0..2 {
  4571. result[i] = destination[i] - source[i]
  4572. }
  4573. write_mmx_reg64(r, std::mem::transmute(result));
  4574. transition_fpu_to_mmx();
  4575. }
  4576. #[no_mangle]
  4577. pub unsafe fn instr_0FFA_reg(r1: i32, r2: i32) { instr_0FFA(read_mmx64s(r1), r2); }
  4578. #[no_mangle]
  4579. pub unsafe fn instr_0FFA_mem(addr: i32, r: i32) {
  4580. instr_0FFA(return_on_pagefault!(safe_read64s(addr)), r);
  4581. }
  4582. #[no_mangle]
  4583. pub unsafe fn instr_660FFA(source: reg128, r: i32) {
  4584. // psubd xmm, xmm/m128
  4585. // XXX: Aligned access or #gp
  4586. let destination = read_xmm128s(r);
  4587. write_xmm128(
  4588. r,
  4589. destination.u32_0[0].wrapping_sub(source.u32_0[0]) as i32,
  4590. destination.u32_0[1].wrapping_sub(source.u32_0[1]) as i32,
  4591. destination.u32_0[2].wrapping_sub(source.u32_0[2]) as i32,
  4592. destination.u32_0[3].wrapping_sub(source.u32_0[3]) as i32,
  4593. );
  4594. }
  4595. #[no_mangle]
  4596. pub unsafe fn instr_660FFA_reg(r1: i32, r2: i32) { instr_660FFA(read_xmm128s(r1), r2); }
  4597. #[no_mangle]
  4598. pub unsafe fn instr_660FFA_mem(addr: i32, r: i32) {
  4599. instr_660FFA(return_on_pagefault!(safe_read128s(addr)), r);
  4600. }
  4601. #[no_mangle]
  4602. pub unsafe fn instr_0FFB(source: u64, r: i32) {
  4603. // psubq mm, mm/m64
  4604. write_mmx_reg64(r, read_mmx64s(r).wrapping_sub(source));
  4605. transition_fpu_to_mmx();
  4606. }
  4607. #[no_mangle]
  4608. pub unsafe fn instr_0FFB_reg(r1: i32, r2: i32) { instr_0FFB(read_mmx64s(r1), r2); }
  4609. #[no_mangle]
  4610. pub unsafe fn instr_0FFB_mem(addr: i32, r: i32) {
  4611. instr_0FFB(return_on_pagefault!(safe_read64s(addr)), r);
  4612. }
  4613. #[no_mangle]
  4614. pub unsafe fn instr_660FFB(source: reg128, r: i32) {
  4615. // psubq xmm, xmm/m128
  4616. // XXX: Aligned access or #gp
  4617. let mut destination = read_xmm128s(r);
  4618. destination.u64_0[0] = destination.u64_0[0].wrapping_sub(source.u64_0[0]);
  4619. destination.u64_0[1] = destination.u64_0[1].wrapping_sub(source.u64_0[1]);
  4620. write_xmm_reg128(r, destination);
  4621. }
  4622. #[no_mangle]
  4623. pub unsafe fn instr_660FFB_reg(r1: i32, r2: i32) { instr_660FFB(read_xmm128s(r1), r2); }
  4624. #[no_mangle]
  4625. pub unsafe fn instr_660FFB_mem(addr: i32, r: i32) {
  4626. instr_660FFB(return_on_pagefault!(safe_read128s(addr)), r);
  4627. }
  4628. #[no_mangle]
  4629. pub unsafe fn instr_0FFC(source: u64, r: i32) {
  4630. // paddb mm, mm/m64
  4631. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  4632. let source: [u8; 8] = std::mem::transmute(source);
  4633. let mut result = [0; 8];
  4634. for i in 0..8 {
  4635. result[i] = destination[i] + source[i];
  4636. }
  4637. write_mmx_reg64(r, std::mem::transmute(result));
  4638. transition_fpu_to_mmx();
  4639. }
  4640. #[no_mangle]
  4641. pub unsafe fn instr_0FFC_reg(r1: i32, r2: i32) { instr_0FFC(read_mmx64s(r1), r2); }
  4642. #[no_mangle]
  4643. pub unsafe fn instr_0FFC_mem(addr: i32, r: i32) {
  4644. instr_0FFC(return_on_pagefault!(safe_read64s(addr)), r);
  4645. }
  4646. #[no_mangle]
  4647. pub unsafe fn instr_660FFC(source: reg128, r: i32) {
  4648. // paddb xmm, xmm/m128
  4649. // XXX: Aligned access or #gp
  4650. let destination = read_xmm128s(r);
  4651. let mut result = reg128 { i8_0: [0; 16] };
  4652. for i in 0..16 {
  4653. result.u8_0[i] = destination.u8_0[i] + source.u8_0[i];
  4654. }
  4655. write_xmm_reg128(r, result);
  4656. }
  4657. #[no_mangle]
  4658. pub unsafe fn instr_660FFC_reg(r1: i32, r2: i32) { instr_660FFC(read_xmm128s(r1), r2); }
  4659. #[no_mangle]
  4660. pub unsafe fn instr_660FFC_mem(addr: i32, r: i32) {
  4661. instr_660FFC(return_on_pagefault!(safe_read128s(addr)), r);
  4662. }
  4663. #[no_mangle]
  4664. pub unsafe fn instr_0FFD(source: u64, r: i32) {
  4665. // paddw mm, mm/m64
  4666. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  4667. let source: [u16; 4] = std::mem::transmute(source);
  4668. let mut result = [0; 4];
  4669. for i in 0..4 {
  4670. result[i] = destination[i] + source[i]
  4671. }
  4672. write_mmx_reg64(r, std::mem::transmute(result));
  4673. transition_fpu_to_mmx();
  4674. }
  4675. #[no_mangle]
  4676. pub unsafe fn instr_0FFD_reg(r1: i32, r2: i32) { instr_0FFD(read_mmx64s(r1), r2); }
  4677. #[no_mangle]
  4678. pub unsafe fn instr_0FFD_mem(addr: i32, r: i32) {
  4679. instr_0FFD(return_on_pagefault!(safe_read64s(addr)), r);
  4680. }
  4681. #[no_mangle]
  4682. pub unsafe fn instr_660FFD(source: reg128, r: i32) {
  4683. // paddw xmm, xmm/m128
  4684. // XXX: Aligned access or #gp
  4685. let destination = read_xmm128s(r);
  4686. let mut result = reg128 { i8_0: [0; 16] };
  4687. for i in 0..8 {
  4688. result.u16_0[i] = (destination.u16_0[i] as i32 + source.u16_0[i] as i32 & 0xFFFF) as u16;
  4689. }
  4690. write_xmm_reg128(r, result);
  4691. }
  4692. #[no_mangle]
  4693. pub unsafe fn instr_660FFD_reg(r1: i32, r2: i32) { instr_660FFD(read_xmm128s(r1), r2); }
  4694. #[no_mangle]
  4695. pub unsafe fn instr_660FFD_mem(addr: i32, r: i32) {
  4696. instr_660FFD(return_on_pagefault!(safe_read128s(addr)), r);
  4697. }
  4698. #[no_mangle]
  4699. pub unsafe fn instr_0FFE(source: u64, r: i32) {
  4700. // paddd mm, mm/m64
  4701. let destination: [i32; 2] = std::mem::transmute(read_mmx64s(r));
  4702. let source: [i32; 2] = std::mem::transmute(source);
  4703. let mut result = [0; 2];
  4704. for i in 0..2 {
  4705. result[i] = destination[i] + source[i]
  4706. }
  4707. write_mmx_reg64(r, std::mem::transmute(result));
  4708. transition_fpu_to_mmx();
  4709. }
  4710. #[no_mangle]
  4711. pub unsafe fn instr_0FFE_reg(r1: i32, r2: i32) { instr_0FFE(read_mmx64s(r1), r2); }
  4712. #[no_mangle]
  4713. pub unsafe fn instr_0FFE_mem(addr: i32, r: i32) {
  4714. instr_0FFE(return_on_pagefault!(safe_read64s(addr)), r);
  4715. }
  4716. #[no_mangle]
  4717. pub unsafe fn instr_660FFE(source: reg128, r: i32) {
  4718. // paddd xmm, xmm/m128
  4719. // XXX: Aligned access or #gp
  4720. let destination = read_xmm128s(r);
  4721. let dword0 = destination.u32_0[0].wrapping_add(source.u32_0[0]) as i32;
  4722. let dword1 = destination.u32_0[1].wrapping_add(source.u32_0[1]) as i32;
  4723. let dword2 = destination.u32_0[2].wrapping_add(source.u32_0[2]) as i32;
  4724. let dword3 = destination.u32_0[3].wrapping_add(source.u32_0[3]) as i32;
  4725. write_xmm128(r, dword0, dword1, dword2, dword3);
  4726. }
  4727. #[no_mangle]
  4728. pub unsafe fn instr_660FFE_reg(r1: i32, r2: i32) { instr_660FFE(read_xmm128s(r1), r2); }
  4729. #[no_mangle]
  4730. pub unsafe fn instr_660FFE_mem(addr: i32, r: i32) {
  4731. instr_660FFE(return_on_pagefault!(safe_read128s(addr)), r);
  4732. }
  4733. #[no_mangle]
  4734. pub unsafe fn instr_0FFF() {
  4735. // Windows 98
  4736. dbg_log!("#ud: 0F FF");
  4737. trigger_ud();
  4738. }
  4739. #[no_mangle]
  4740. pub unsafe fn instr_F30F16_reg(r1: i32, r2: i32) { unimplemented_sse(); }
  4741. #[no_mangle]
  4742. pub unsafe fn instr_F30F16_mem(addr: i32, r: i32) { unimplemented_sse(); }
  4743. #[no_mangle]
  4744. pub unsafe fn instr_0F19_reg(r1: i32, r2: i32) {}
  4745. #[no_mangle]
  4746. pub unsafe fn instr_0F19_mem(addr: i32, r: i32) {}
  4747. #[no_mangle]
  4748. pub unsafe fn instr_0F1C_reg(r1: i32, r2: i32) {}
  4749. #[no_mangle]
  4750. pub unsafe fn instr_0F1C_mem(addr: i32, r: i32) {}
  4751. #[no_mangle]
  4752. pub unsafe fn instr_0F1D_reg(r1: i32, r2: i32) {}
  4753. #[no_mangle]
  4754. pub unsafe fn instr_0F1D_mem(addr: i32, r: i32) {}
  4755. #[no_mangle]
  4756. pub unsafe fn instr_0F1E_reg(r1: i32, r2: i32) {}
  4757. #[no_mangle]
  4758. pub unsafe fn instr_0F1E_mem(addr: i32, r: i32) {}
  4759. #[no_mangle]
  4760. pub unsafe fn instr_0F2A(source: u64, r: i32) {
  4761. // cvtpi2ps xmm, mm/m64
  4762. // Note: Casts here can fail
  4763. // XXX: Should round according to round control
  4764. let source: [i32; 2] = std::mem::transmute(source);
  4765. let result = [source[0] as f32, source[1] as f32];
  4766. write_xmm64(r, std::mem::transmute(result));
  4767. transition_fpu_to_mmx();
  4768. }
  4769. #[no_mangle]
  4770. pub unsafe fn instr_0F2A_reg(r1: i32, r2: i32) { instr_0F2A(read_mmx64s(r1), r2); }
  4771. #[no_mangle]
  4772. pub unsafe fn instr_0F2A_mem(addr: i32, r: i32) {
  4773. instr_0F2A(return_on_pagefault!(safe_read64s(addr)), r);
  4774. }
  4775. #[no_mangle]
  4776. pub unsafe fn instr_660F2A(source: u64, r: i32) {
  4777. // cvtpi2pd xmm, xmm/m64
  4778. // These casts can't fail
  4779. let source: [i32; 2] = std::mem::transmute(source);
  4780. let result = reg128 {
  4781. f64_0: [source[0] as f64, source[1] as f64],
  4782. };
  4783. write_xmm_reg128(r, result);
  4784. transition_fpu_to_mmx();
  4785. }
  4786. #[no_mangle]
  4787. pub unsafe fn instr_660F2A_reg(r1: i32, r2: i32) { instr_660F2A(read_mmx64s(r1), r2); }
  4788. #[no_mangle]
  4789. pub unsafe fn instr_660F2A_mem(addr: i32, r: i32) {
  4790. instr_660F2A(return_on_pagefault!(safe_read64s(addr)), r);
  4791. }
  4792. #[no_mangle]
  4793. pub unsafe fn instr_F20F2A(source: i32, r: i32) {
  4794. // cvtsi2sd xmm, r32/m32
  4795. // This cast can't fail
  4796. write_xmm_f64(r, source as f64);
  4797. }
  4798. #[no_mangle]
  4799. pub unsafe fn instr_F20F2A_reg(r1: i32, r2: i32) { instr_F20F2A(read_reg32(r1), r2); }
  4800. #[no_mangle]
  4801. pub unsafe fn instr_F20F2A_mem(addr: i32, r: i32) {
  4802. instr_F20F2A(return_on_pagefault!(safe_read32s(addr)), r);
  4803. }
  4804. #[no_mangle]
  4805. pub unsafe fn instr_F30F2A(source: i32, r: i32) {
  4806. // cvtsi2ss xmm, r/m32
  4807. // Note: This cast can fail
  4808. // XXX: Should round according to round control
  4809. let result = source as f32;
  4810. write_xmm_f32(r, result);
  4811. }
  4812. #[no_mangle]
  4813. pub unsafe fn instr_F30F2A_reg(r1: i32, r2: i32) { instr_F30F2A(read_reg32(r1), r2); }
  4814. #[no_mangle]
  4815. pub unsafe fn instr_F30F2A_mem(addr: i32, r: i32) {
  4816. instr_F30F2A(return_on_pagefault!(safe_read32s(addr)), r);
  4817. }
  4818. #[no_mangle]
  4819. pub unsafe fn instr_0F2D(source: u64, r: i32) {
  4820. // cvtps2pi mm, xmm/m64
  4821. let source: [f32; 2] = std::mem::transmute(source);
  4822. let result = [
  4823. sse_convert_f32_to_i32(source[0]),
  4824. sse_convert_f32_to_i32(source[1]),
  4825. ];
  4826. write_mmx_reg64(r, std::mem::transmute(result));
  4827. transition_fpu_to_mmx();
  4828. }
  4829. #[no_mangle]
  4830. pub unsafe fn instr_0F2D_reg(r1: i32, r2: i32) { instr_0F2D(read_xmm64s(r1), r2); }
  4831. #[no_mangle]
  4832. pub unsafe fn instr_0F2D_mem(addr: i32, r: i32) {
  4833. instr_0F2D(return_on_pagefault!(safe_read64s(addr)), r);
  4834. }
  4835. #[no_mangle]
  4836. pub unsafe fn instr_660F2D(source: reg128, r: i32) {
  4837. // cvtpd2pi mm, xmm/m128
  4838. let result = [
  4839. sse_convert_f64_to_i32(source.f64_0[0]),
  4840. sse_convert_f64_to_i32(source.f64_0[1]),
  4841. ];
  4842. write_mmx_reg64(r, std::mem::transmute(result));
  4843. transition_fpu_to_mmx();
  4844. }
  4845. #[no_mangle]
  4846. pub unsafe fn instr_660F2D_reg(r1: i32, r2: i32) { instr_660F2D(read_xmm128s(r1), r2); }
  4847. #[no_mangle]
  4848. pub unsafe fn instr_660F2D_mem(addr: i32, r: i32) {
  4849. instr_660F2D(return_on_pagefault!(safe_read128s(addr)), r);
  4850. }
  4851. #[no_mangle]
  4852. pub unsafe fn instr_F20F2D(source: u64, r: i32) {
  4853. // cvtsd2si r32, xmm/m64
  4854. write_reg32(r, sse_convert_f64_to_i32(f64::from_bits(source)));
  4855. }
  4856. #[no_mangle]
  4857. pub unsafe fn instr_F20F2D_reg(r1: i32, r2: i32) { instr_F20F2D(read_xmm64s(r1), r2); }
  4858. #[no_mangle]
  4859. pub unsafe fn instr_F20F2D_mem(addr: i32, r: i32) {
  4860. instr_F20F2D(return_on_pagefault!(safe_read64s(addr)), r);
  4861. }
  4862. #[no_mangle]
  4863. pub unsafe fn instr_F30F2D(source: f32, r: i32) {
  4864. // cvtss2si r32, xmm1/m32
  4865. write_reg32(r, sse_convert_f32_to_i32(source));
  4866. }
  4867. #[no_mangle]
  4868. pub unsafe fn instr_F30F2D_reg(r1: i32, r2: i32) { instr_F30F2D(read_xmm_f32(r1), r2); }
  4869. #[no_mangle]
  4870. pub unsafe fn instr_F30F2D_mem(addr: i32, r: i32) {
  4871. instr_F30F2D(return_on_pagefault!(fpu_load_m32(addr)) as f32, r);
  4872. }
  4873. #[no_mangle]
  4874. pub unsafe fn instr_0F51(source: reg128, r: i32) {
  4875. // sqrtps xmm, xmm/mem128
  4876. // XXX: Should round according to round control
  4877. let result = reg128 {
  4878. f32_0: [
  4879. source.f32_0[0].sqrt(),
  4880. source.f32_0[1].sqrt(),
  4881. source.f32_0[2].sqrt(),
  4882. source.f32_0[3].sqrt(),
  4883. ],
  4884. };
  4885. write_xmm_reg128(r, result);
  4886. }
  4887. #[no_mangle]
  4888. pub unsafe fn instr_0F51_reg(r1: i32, r2: i32) { instr_0F51(read_xmm128s(r1), r2); }
  4889. #[no_mangle]
  4890. pub unsafe fn instr_0F51_mem(addr: i32, r: i32) {
  4891. instr_0F51(return_on_pagefault!(safe_read128s(addr)), r);
  4892. }
  4893. #[no_mangle]
  4894. pub unsafe fn instr_660F51(source: reg128, r: i32) {
  4895. // sqrtpd xmm, xmm/mem128
  4896. // XXX: Should round according to round control
  4897. let result = reg128 {
  4898. f64_0: [source.f64_0[0].sqrt(), source.f64_0[1].sqrt()],
  4899. };
  4900. write_xmm_reg128(r, result);
  4901. }
  4902. #[no_mangle]
  4903. pub unsafe fn instr_660F51_reg(r1: i32, r2: i32) { instr_660F51(read_xmm128s(r1), r2); }
  4904. #[no_mangle]
  4905. pub unsafe fn instr_660F51_mem(addr: i32, r: i32) {
  4906. instr_660F51(return_on_pagefault!(safe_read128s(addr)), r);
  4907. }
  4908. #[no_mangle]
  4909. pub unsafe fn instr_F20F51(source: u64, r: i32) {
  4910. // sqrtsd xmm, xmm/mem64
  4911. // XXX: Should round according to round control
  4912. write_xmm_f64(r, f64::from_bits(source).sqrt());
  4913. }
  4914. #[no_mangle]
  4915. pub unsafe fn instr_F20F51_reg(r1: i32, r2: i32) { instr_F20F51(read_xmm64s(r1), r2); }
  4916. #[no_mangle]
  4917. pub unsafe fn instr_F20F51_mem(addr: i32, r: i32) {
  4918. instr_F20F51(return_on_pagefault!(safe_read64s(addr)), r);
  4919. }
  4920. #[no_mangle]
  4921. pub unsafe fn instr_F30F51(source: f32, r: i32) {
  4922. // sqrtss xmm, xmm/mem32
  4923. // XXX: Should round according to round control
  4924. write_xmm_f32(r, source.sqrt());
  4925. }
  4926. #[no_mangle]
  4927. pub unsafe fn instr_F30F51_reg(r1: i32, r2: i32) { instr_F30F51(read_xmm_f32(r1), r2); }
  4928. #[no_mangle]
  4929. pub unsafe fn instr_F30F51_mem(addr: i32, r: i32) {
  4930. instr_F30F51(return_on_pagefault!(fpu_load_m32(addr)) as f32, r);
  4931. }
  4932. #[no_mangle]
  4933. pub unsafe fn instr_0F52(source: reg128, r: i32) {
  4934. // rcpps xmm1, xmm2/m128
  4935. let result = reg128 {
  4936. f32_0: [
  4937. 1.0 / source.f32_0[0].sqrt(),
  4938. 1.0 / source.f32_0[1].sqrt(),
  4939. 1.0 / source.f32_0[2].sqrt(),
  4940. 1.0 / source.f32_0[3].sqrt(),
  4941. ],
  4942. };
  4943. write_xmm_reg128(r, result);
  4944. }
  4945. #[no_mangle]
  4946. pub unsafe fn instr_0F52_reg(r1: i32, r2: i32) { instr_0F52(read_xmm128s(r1), r2); }
  4947. #[no_mangle]
  4948. pub unsafe fn instr_0F52_mem(addr: i32, r: i32) {
  4949. instr_0F52(return_on_pagefault!(safe_read128s(addr)), r);
  4950. }
  4951. #[no_mangle]
  4952. pub unsafe fn instr_F30F52(source: f32, r: i32) {
  4953. // rsqrtss xmm1, xmm2/m32
  4954. write_xmm_f32(r, 1.0 / source.sqrt());
  4955. }
  4956. #[no_mangle]
  4957. pub unsafe fn instr_F30F52_reg(r1: i32, r2: i32) { instr_F30F52(read_xmm_f32(r1), r2); }
  4958. #[no_mangle]
  4959. pub unsafe fn instr_F30F52_mem(addr: i32, r: i32) {
  4960. instr_F30F52(return_on_pagefault!(fpu_load_m32(addr)) as f32, r);
  4961. }
  4962. #[no_mangle]
  4963. pub unsafe fn instr_0F53(source: reg128, r: i32) {
  4964. // rcpps xmm, xmm/m128
  4965. let result = reg128 {
  4966. f32_0: [
  4967. 1.0 / source.f32_0[0],
  4968. 1.0 / source.f32_0[1],
  4969. 1.0 / source.f32_0[2],
  4970. 1.0 / source.f32_0[3],
  4971. ],
  4972. };
  4973. write_xmm_reg128(r, result);
  4974. }
  4975. #[no_mangle]
  4976. pub unsafe fn instr_0F53_reg(r1: i32, r2: i32) { instr_0F53(read_xmm128s(r1), r2); }
  4977. #[no_mangle]
  4978. pub unsafe fn instr_0F53_mem(addr: i32, r: i32) {
  4979. instr_0F53(return_on_pagefault!(safe_read128s(addr)), r);
  4980. }
  4981. #[no_mangle]
  4982. pub unsafe fn instr_F30F53(source: f32, r: i32) {
  4983. // rcpss xmm, xmm/m32
  4984. write_xmm_f32(r, 1.0 / source);
  4985. }
  4986. #[no_mangle]
  4987. pub unsafe fn instr_F30F53_reg(r1: i32, r2: i32) { instr_F30F53(read_xmm_f32(r1), r2); }
  4988. #[no_mangle]
  4989. pub unsafe fn instr_F30F53_mem(addr: i32, r: i32) {
  4990. instr_F30F53(return_on_pagefault!(fpu_load_m32(addr)) as f32, r);
  4991. }
  4992. #[no_mangle]
  4993. pub unsafe fn instr_0F58(source: reg128, r: i32) {
  4994. // addps xmm, xmm/mem128
  4995. let destination = read_xmm128s(r);
  4996. let result = reg128 {
  4997. f32_0: [
  4998. source.f32_0[0] + destination.f32_0[0],
  4999. source.f32_0[1] + destination.f32_0[1],
  5000. source.f32_0[2] + destination.f32_0[2],
  5001. source.f32_0[3] + destination.f32_0[3],
  5002. ],
  5003. };
  5004. write_xmm_reg128(r, result);
  5005. }
  5006. #[no_mangle]
  5007. pub unsafe fn instr_0F58_reg(r1: i32, r2: i32) { instr_0F58(read_xmm128s(r1), r2); }
  5008. #[no_mangle]
  5009. pub unsafe fn instr_0F58_mem(addr: i32, r: i32) {
  5010. instr_0F58(return_on_pagefault!(safe_read128s(addr)), r);
  5011. }
  5012. #[no_mangle]
  5013. pub unsafe fn instr_660F58(source: reg128, r: i32) {
  5014. // addpd xmm, xmm/mem128
  5015. let destination = read_xmm128s(r);
  5016. let result = reg128 {
  5017. f64_0: [
  5018. source.f64_0[0] + destination.f64_0[0],
  5019. source.f64_0[1] + destination.f64_0[1],
  5020. ],
  5021. };
  5022. write_xmm_reg128(r, result);
  5023. }
  5024. #[no_mangle]
  5025. pub unsafe fn instr_660F58_reg(r1: i32, r2: i32) { instr_660F58(read_xmm128s(r1), r2); }
  5026. #[no_mangle]
  5027. pub unsafe fn instr_660F58_mem(addr: i32, r: i32) {
  5028. instr_660F58(return_on_pagefault!(safe_read128s(addr)), r);
  5029. }
  5030. #[no_mangle]
  5031. pub unsafe fn instr_F20F58(source: u64, r: i32) {
  5032. // addsd xmm, xmm/mem64
  5033. let destination = read_xmm64s(r);
  5034. write_xmm_f64(r, f64::from_bits(source) + f64::from_bits(destination));
  5035. }
  5036. #[no_mangle]
  5037. pub unsafe fn instr_F20F58_reg(r1: i32, r2: i32) { instr_F20F58(read_xmm64s(r1), r2); }
  5038. #[no_mangle]
  5039. pub unsafe fn instr_F20F58_mem(addr: i32, r: i32) {
  5040. instr_F20F58(return_on_pagefault!(safe_read64s(addr)), r);
  5041. }
  5042. #[no_mangle]
  5043. pub unsafe fn instr_F30F58(source: f32, r: i32) {
  5044. // addss xmm, xmm/mem32
  5045. let destination = read_xmm_f32(r);
  5046. let result = source + destination;
  5047. write_xmm_f32(r, result);
  5048. }
  5049. #[no_mangle]
  5050. pub unsafe fn instr_F30F58_reg(r1: i32, r2: i32) { instr_F30F58(read_xmm_f32(r1), r2); }
  5051. #[no_mangle]
  5052. pub unsafe fn instr_F30F58_mem(addr: i32, r: i32) {
  5053. instr_F30F58(return_on_pagefault!(fpu_load_m32(addr)) as f32, r);
  5054. }
  5055. #[no_mangle]
  5056. pub unsafe fn instr_0F59(source: reg128, r: i32) {
  5057. // mulps xmm, xmm/mem128
  5058. let destination = read_xmm128s(r);
  5059. let result = reg128 {
  5060. f32_0: [
  5061. source.f32_0[0] * destination.f32_0[0],
  5062. source.f32_0[1] * destination.f32_0[1],
  5063. source.f32_0[2] * destination.f32_0[2],
  5064. source.f32_0[3] * destination.f32_0[3],
  5065. ],
  5066. };
  5067. write_xmm_reg128(r, result);
  5068. }
  5069. #[no_mangle]
  5070. pub unsafe fn instr_0F59_reg(r1: i32, r2: i32) { instr_0F59(read_xmm128s(r1), r2); }
  5071. #[no_mangle]
  5072. pub unsafe fn instr_0F59_mem(addr: i32, r: i32) {
  5073. instr_0F59(return_on_pagefault!(safe_read128s(addr)), r);
  5074. }
  5075. #[no_mangle]
  5076. pub unsafe fn instr_660F59(source: reg128, r: i32) {
  5077. // mulpd xmm, xmm/mem128
  5078. let destination = read_xmm128s(r);
  5079. let result = reg128 {
  5080. f64_0: [
  5081. source.f64_0[0] * destination.f64_0[0],
  5082. source.f64_0[1] * destination.f64_0[1],
  5083. ],
  5084. };
  5085. write_xmm_reg128(r, result);
  5086. }
  5087. #[no_mangle]
  5088. pub unsafe fn instr_660F59_reg(r1: i32, r2: i32) { instr_660F59(read_xmm128s(r1), r2); }
  5089. #[no_mangle]
  5090. pub unsafe fn instr_660F59_mem(addr: i32, r: i32) {
  5091. instr_660F59(return_on_pagefault!(safe_read128s(addr)), r);
  5092. }
  5093. #[no_mangle]
  5094. pub unsafe fn instr_F20F59(source: u64, r: i32) {
  5095. // mulsd xmm, xmm/mem64
  5096. let destination = read_xmm64s(r);
  5097. write_xmm_f64(r, f64::from_bits(source) * f64::from_bits(destination));
  5098. }
  5099. #[no_mangle]
  5100. pub unsafe fn instr_F20F59_reg(r1: i32, r2: i32) { instr_F20F59(read_xmm64s(r1), r2); }
  5101. #[no_mangle]
  5102. pub unsafe fn instr_F20F59_mem(addr: i32, r: i32) {
  5103. instr_F20F59(return_on_pagefault!(safe_read64s(addr)), r);
  5104. }
  5105. #[no_mangle]
  5106. pub unsafe fn instr_F30F59(source: f32, r: i32) {
  5107. // mulss xmm, xmm/mem32
  5108. let destination = read_xmm_f32(r);
  5109. let result = source * destination;
  5110. write_xmm_f32(r, result);
  5111. }
  5112. #[no_mangle]
  5113. pub unsafe fn instr_F30F59_reg(r1: i32, r2: i32) { instr_F30F59(read_xmm_f32(r1), r2); }
  5114. #[no_mangle]
  5115. pub unsafe fn instr_F30F59_mem(addr: i32, r: i32) {
  5116. instr_F30F59(return_on_pagefault!(fpu_load_m32(addr)) as f32, r);
  5117. }
  5118. #[no_mangle]
  5119. pub unsafe fn instr_0F5A(source: u64, r: i32) {
  5120. // cvtps2pd xmm1, xmm2/m64
  5121. let source: [f32; 2] = std::mem::transmute(source);
  5122. let result = reg128 {
  5123. f64_0: [source[0] as f64, source[1] as f64],
  5124. };
  5125. write_xmm_reg128(r, result);
  5126. }
  5127. #[no_mangle]
  5128. pub unsafe fn instr_0F5A_reg(r1: i32, r2: i32) { instr_0F5A(read_xmm64s(r1), r2); }
  5129. #[no_mangle]
  5130. pub unsafe fn instr_0F5A_mem(addr: i32, r: i32) {
  5131. instr_0F5A(return_on_pagefault!(safe_read64s(addr)), r);
  5132. }
  5133. #[no_mangle]
  5134. pub unsafe fn instr_660F5A(source: reg128, r: i32) {
  5135. // cvtpd2ps xmm1, xmm2/m128
  5136. let result = reg128 {
  5137. // XXX: These conversions are lossy and should round according to the round control
  5138. f32_0: [source.f64_0[0] as f32, source.f64_0[1] as f32, 0., 0.],
  5139. };
  5140. write_xmm_reg128(r, result);
  5141. }
  5142. #[no_mangle]
  5143. pub unsafe fn instr_660F5A_reg(r1: i32, r2: i32) { instr_660F5A(read_xmm128s(r1), r2); }
  5144. #[no_mangle]
  5145. pub unsafe fn instr_660F5A_mem(addr: i32, r: i32) {
  5146. instr_660F5A(return_on_pagefault!(safe_read128s(addr)), r);
  5147. }
  5148. #[no_mangle]
  5149. pub unsafe fn instr_F20F5A(source: u64, r: i32) {
  5150. // cvtsd2ss xmm1, xmm2/m64
  5151. // XXX: This conversions is lossy and should round according to the round control
  5152. write_xmm_f32(r, f64::from_bits(source) as f32);
  5153. }
  5154. #[no_mangle]
  5155. pub unsafe fn instr_F20F5A_reg(r1: i32, r2: i32) { instr_F20F5A(read_xmm64s(r1), r2); }
  5156. #[no_mangle]
  5157. pub unsafe fn instr_F20F5A_mem(addr: i32, r: i32) {
  5158. instr_F20F5A(return_on_pagefault!(safe_read64s(addr)), r);
  5159. }
  5160. #[no_mangle]
  5161. pub unsafe fn instr_F30F5A(source: f32, r: i32) {
  5162. // cvtss2sd xmm1, xmm2/m32
  5163. write_xmm_f64(r, source as f64);
  5164. }
  5165. #[no_mangle]
  5166. pub unsafe fn instr_F30F5A_reg(r1: i32, r2: i32) { instr_F30F5A(read_xmm_f32(r1), r2); }
  5167. #[no_mangle]
  5168. pub unsafe fn instr_F30F5A_mem(addr: i32, r: i32) {
  5169. instr_F30F5A(return_on_pagefault!(fpu_load_m32(addr)) as f32, r);
  5170. }
  5171. #[no_mangle]
  5172. pub unsafe fn instr_0F5B(source: reg128, r: i32) {
  5173. // cvtdq2ps xmm1, xmm2/m128
  5174. // XXX: Should round according to round control
  5175. let result = reg128 {
  5176. f32_0: [
  5177. // XXX: Precision exception
  5178. source.i32_0[0] as f32,
  5179. source.i32_0[1] as f32,
  5180. source.i32_0[2] as f32,
  5181. source.i32_0[3] as f32,
  5182. ],
  5183. };
  5184. write_xmm_reg128(r, result);
  5185. }
  5186. #[no_mangle]
  5187. pub unsafe fn instr_0F5B_reg(r1: i32, r2: i32) { instr_0F5B(read_xmm128s(r1), r2); }
  5188. #[no_mangle]
  5189. pub unsafe fn instr_0F5B_mem(addr: i32, r: i32) {
  5190. instr_0F5B(return_on_pagefault!(safe_read128s(addr)), r);
  5191. }
  5192. #[no_mangle]
  5193. pub unsafe fn instr_660F5B(source: reg128, r: i32) {
  5194. // cvtps2dq xmm1, xmm2/m128
  5195. let result = reg128 {
  5196. i32_0: [
  5197. // XXX: Precision exception
  5198. sse_convert_f32_to_i32(source.f32_0[0]),
  5199. sse_convert_f32_to_i32(source.f32_0[1]),
  5200. sse_convert_f32_to_i32(source.f32_0[2]),
  5201. sse_convert_f32_to_i32(source.f32_0[3]),
  5202. ],
  5203. };
  5204. write_xmm_reg128(r, result);
  5205. }
  5206. #[no_mangle]
  5207. pub unsafe fn instr_660F5B_reg(r1: i32, r2: i32) { instr_660F5B(read_xmm128s(r1), r2); }
  5208. #[no_mangle]
  5209. pub unsafe fn instr_660F5B_mem(addr: i32, r: i32) {
  5210. instr_660F5B(return_on_pagefault!(safe_read128s(addr)), r);
  5211. }
  5212. #[no_mangle]
  5213. pub unsafe fn instr_F30F5B(source: reg128, r: i32) {
  5214. // cvttps2dq xmm1, xmm2/m128
  5215. let result = reg128 {
  5216. i32_0: [
  5217. sse_convert_with_truncation_f32_to_i32(source.f32_0[0]),
  5218. sse_convert_with_truncation_f32_to_i32(source.f32_0[1]),
  5219. sse_convert_with_truncation_f32_to_i32(source.f32_0[2]),
  5220. sse_convert_with_truncation_f32_to_i32(source.f32_0[3]),
  5221. ],
  5222. };
  5223. write_xmm_reg128(r, result);
  5224. }
  5225. #[no_mangle]
  5226. pub unsafe fn instr_F30F5B_reg(r1: i32, r2: i32) { instr_F30F5B(read_xmm128s(r1), r2); }
  5227. #[no_mangle]
  5228. pub unsafe fn instr_F30F5B_mem(addr: i32, r: i32) {
  5229. instr_F30F5B(return_on_pagefault!(safe_read128s(addr)), r);
  5230. }
  5231. #[no_mangle]
  5232. pub unsafe fn instr_0F5C(source: reg128, r: i32) {
  5233. // subps xmm, xmm/mem128
  5234. let destination = read_xmm128s(r);
  5235. let result = reg128 {
  5236. f32_0: [
  5237. destination.f32_0[0] - source.f32_0[0],
  5238. destination.f32_0[1] - source.f32_0[1],
  5239. destination.f32_0[2] - source.f32_0[2],
  5240. destination.f32_0[3] - source.f32_0[3],
  5241. ],
  5242. };
  5243. write_xmm_reg128(r, result);
  5244. }
  5245. #[no_mangle]
  5246. pub unsafe fn instr_0F5C_reg(r1: i32, r2: i32) { instr_0F5C(read_xmm128s(r1), r2); }
  5247. #[no_mangle]
  5248. pub unsafe fn instr_0F5C_mem(addr: i32, r: i32) {
  5249. instr_0F5C(return_on_pagefault!(safe_read128s(addr)), r);
  5250. }
  5251. #[no_mangle]
  5252. pub unsafe fn instr_660F5C(source: reg128, r: i32) {
  5253. // subpd xmm, xmm/mem128
  5254. let destination = read_xmm128s(r);
  5255. let result = reg128 {
  5256. f64_0: [
  5257. destination.f64_0[0] - source.f64_0[0],
  5258. destination.f64_0[1] - source.f64_0[1],
  5259. ],
  5260. };
  5261. write_xmm_reg128(r, result);
  5262. }
  5263. #[no_mangle]
  5264. pub unsafe fn instr_660F5C_reg(r1: i32, r2: i32) { instr_660F5C(read_xmm128s(r1), r2); }
  5265. #[no_mangle]
  5266. pub unsafe fn instr_660F5C_mem(addr: i32, r: i32) {
  5267. instr_660F5C(return_on_pagefault!(safe_read128s(addr)), r);
  5268. }
  5269. #[no_mangle]
  5270. pub unsafe fn instr_F20F5C(source: u64, r: i32) {
  5271. // subsd xmm, xmm/mem64
  5272. let destination = read_xmm64s(r);
  5273. write_xmm_f64(r, f64::from_bits(destination) - f64::from_bits(source));
  5274. }
  5275. #[no_mangle]
  5276. pub unsafe fn instr_F20F5C_reg(r1: i32, r2: i32) { instr_F20F5C(read_xmm64s(r1), r2); }
  5277. #[no_mangle]
  5278. pub unsafe fn instr_F20F5C_mem(addr: i32, r: i32) {
  5279. instr_F20F5C(return_on_pagefault!(safe_read64s(addr)), r);
  5280. }
  5281. #[no_mangle]
  5282. pub unsafe fn instr_F30F5C(source: f32, r: i32) {
  5283. // subss xmm, xmm/mem32
  5284. let destination = read_xmm_f32(r);
  5285. let result = destination - source;
  5286. write_xmm_f32(r, result);
  5287. }
  5288. #[no_mangle]
  5289. pub unsafe fn instr_F30F5C_reg(r1: i32, r2: i32) { instr_F30F5C(read_xmm_f32(r1), r2); }
  5290. #[no_mangle]
  5291. pub unsafe fn instr_F30F5C_mem(addr: i32, r: i32) {
  5292. instr_F30F5C(return_on_pagefault!(fpu_load_m32(addr)) as f32, r);
  5293. }
  5294. #[no_mangle]
  5295. pub unsafe fn instr_0F5D(source: reg128, r: i32) {
  5296. // minps xmm, xmm/mem128
  5297. let destination = read_xmm128s(r);
  5298. let result = reg128 {
  5299. f32_0: [
  5300. sse_min(destination.f32_0[0] as f64, source.f32_0[0] as f64) as f32,
  5301. sse_min(destination.f32_0[1] as f64, source.f32_0[1] as f64) as f32,
  5302. sse_min(destination.f32_0[2] as f64, source.f32_0[2] as f64) as f32,
  5303. sse_min(destination.f32_0[3] as f64, source.f32_0[3] as f64) as f32,
  5304. ],
  5305. };
  5306. write_xmm_reg128(r, result);
  5307. }
  5308. #[no_mangle]
  5309. pub unsafe fn instr_0F5D_reg(r1: i32, r2: i32) { instr_0F5D(read_xmm128s(r1), r2); }
  5310. #[no_mangle]
  5311. pub unsafe fn instr_0F5D_mem(addr: i32, r: i32) {
  5312. instr_0F5D(return_on_pagefault!(safe_read128s(addr)), r);
  5313. }
  5314. #[no_mangle]
  5315. pub unsafe fn instr_660F5D(source: reg128, r: i32) {
  5316. // minpd xmm, xmm/mem128
  5317. let destination = read_xmm128s(r);
  5318. let result = reg128 {
  5319. f64_0: [
  5320. sse_min(destination.f64_0[0], source.f64_0[0]),
  5321. sse_min(destination.f64_0[1], source.f64_0[1]),
  5322. ],
  5323. };
  5324. write_xmm_reg128(r, result);
  5325. }
  5326. #[no_mangle]
  5327. pub unsafe fn instr_660F5D_reg(r1: i32, r2: i32) { instr_660F5D(read_xmm128s(r1), r2); }
  5328. #[no_mangle]
  5329. pub unsafe fn instr_660F5D_mem(addr: i32, r: i32) {
  5330. instr_660F5D(return_on_pagefault!(safe_read128s(addr)), r);
  5331. }
  5332. #[no_mangle]
  5333. pub unsafe fn instr_F20F5D(source: u64, r: i32) {
  5334. // minsd xmm, xmm/mem64
  5335. let destination = read_xmm64s(r);
  5336. write_xmm_f64(
  5337. r,
  5338. sse_min(f64::from_bits(destination), f64::from_bits(source)),
  5339. );
  5340. }
  5341. #[no_mangle]
  5342. pub unsafe fn instr_F20F5D_reg(r1: i32, r2: i32) { instr_F20F5D(read_xmm64s(r1), r2); }
  5343. #[no_mangle]
  5344. pub unsafe fn instr_F20F5D_mem(addr: i32, r: i32) {
  5345. instr_F20F5D(return_on_pagefault!(safe_read64s(addr)), r);
  5346. }
  5347. #[no_mangle]
  5348. pub unsafe fn instr_F30F5D(source: f32, r: i32) {
  5349. // minss xmm, xmm/mem32
  5350. let destination = read_xmm_f32(r);
  5351. let result = sse_min(destination as f64, source as f64) as f32;
  5352. write_xmm_f32(r, result);
  5353. }
  5354. #[no_mangle]
  5355. pub unsafe fn instr_F30F5D_reg(r1: i32, r2: i32) { instr_F30F5D(read_xmm_f32(r1), r2); }
  5356. #[no_mangle]
  5357. pub unsafe fn instr_F30F5D_mem(addr: i32, r: i32) {
  5358. instr_F30F5D(return_on_pagefault!(fpu_load_m32(addr)) as f32, r);
  5359. }
  5360. #[no_mangle]
  5361. pub unsafe fn instr_0F5E(source: reg128, r: i32) {
  5362. // divps xmm, xmm/mem128
  5363. let destination = read_xmm128s(r);
  5364. let result = reg128 {
  5365. f32_0: [
  5366. destination.f32_0[0] / source.f32_0[0],
  5367. destination.f32_0[1] / source.f32_0[1],
  5368. destination.f32_0[2] / source.f32_0[2],
  5369. destination.f32_0[3] / source.f32_0[3],
  5370. ],
  5371. };
  5372. write_xmm_reg128(r, result);
  5373. }
  5374. #[no_mangle]
  5375. pub unsafe fn instr_0F5E_reg(r1: i32, r2: i32) { instr_0F5E(read_xmm128s(r1), r2); }
  5376. #[no_mangle]
  5377. pub unsafe fn instr_0F5E_mem(addr: i32, r: i32) {
  5378. instr_0F5E(return_on_pagefault!(safe_read128s(addr)), r);
  5379. }
  5380. #[no_mangle]
  5381. pub unsafe fn instr_660F5E(source: reg128, r: i32) {
  5382. // divpd xmm, xmm/mem128
  5383. let destination = read_xmm128s(r);
  5384. let result = reg128 {
  5385. f64_0: [
  5386. destination.f64_0[0] / source.f64_0[0],
  5387. destination.f64_0[1] / source.f64_0[1],
  5388. ],
  5389. };
  5390. write_xmm_reg128(r, result);
  5391. }
  5392. #[no_mangle]
  5393. pub unsafe fn instr_660F5E_reg(r1: i32, r2: i32) { instr_660F5E(read_xmm128s(r1), r2); }
  5394. #[no_mangle]
  5395. pub unsafe fn instr_660F5E_mem(addr: i32, r: i32) {
  5396. instr_660F5E(return_on_pagefault!(safe_read128s(addr)), r);
  5397. }
  5398. #[no_mangle]
  5399. pub unsafe fn instr_F20F5E(source: u64, r: i32) {
  5400. // divsd xmm, xmm/mem64
  5401. let destination = read_xmm64s(r);
  5402. write_xmm_f64(r, f64::from_bits(destination) / f64::from_bits(source));
  5403. }
  5404. #[no_mangle]
  5405. pub unsafe fn instr_F20F5E_reg(r1: i32, r2: i32) { instr_F20F5E(read_xmm64s(r1), r2); }
  5406. #[no_mangle]
  5407. pub unsafe fn instr_F20F5E_mem(addr: i32, r: i32) {
  5408. instr_F20F5E(return_on_pagefault!(safe_read64s(addr)), r);
  5409. }
  5410. #[no_mangle]
  5411. pub unsafe fn instr_F30F5E(source: f32, r: i32) {
  5412. // divss xmm, xmm/mem32
  5413. let destination = read_xmm_f32(r);
  5414. let result = destination / source;
  5415. write_xmm_f32(r, result);
  5416. }
  5417. #[no_mangle]
  5418. pub unsafe fn instr_F30F5E_reg(r1: i32, r2: i32) { instr_F30F5E(read_xmm_f32(r1), r2); }
  5419. #[no_mangle]
  5420. pub unsafe fn instr_F30F5E_mem(addr: i32, r: i32) {
  5421. instr_F30F5E(return_on_pagefault!(fpu_load_m32(addr)) as f32, r);
  5422. }
  5423. #[no_mangle]
  5424. pub unsafe fn instr_0F5F(source: reg128, r: i32) {
  5425. // maxps xmm, xmm/mem128
  5426. let destination = read_xmm128s(r);
  5427. let result = reg128 {
  5428. f32_0: [
  5429. sse_max(destination.f32_0[0] as f64, source.f32_0[0] as f64) as f32,
  5430. sse_max(destination.f32_0[1] as f64, source.f32_0[1] as f64) as f32,
  5431. sse_max(destination.f32_0[2] as f64, source.f32_0[2] as f64) as f32,
  5432. sse_max(destination.f32_0[3] as f64, source.f32_0[3] as f64) as f32,
  5433. ],
  5434. };
  5435. write_xmm_reg128(r, result);
  5436. }
  5437. #[no_mangle]
  5438. pub unsafe fn instr_0F5F_reg(r1: i32, r2: i32) { instr_0F5F(read_xmm128s(r1), r2); }
  5439. #[no_mangle]
  5440. pub unsafe fn instr_0F5F_mem(addr: i32, r: i32) {
  5441. instr_0F5F(return_on_pagefault!(safe_read128s(addr)), r);
  5442. }
  5443. #[no_mangle]
  5444. pub unsafe fn instr_660F5F(source: reg128, r: i32) {
  5445. // maxpd xmm, xmm/mem128
  5446. let destination = read_xmm128s(r);
  5447. let result = reg128 {
  5448. f64_0: [
  5449. sse_max(destination.f64_0[0], source.f64_0[0]),
  5450. sse_max(destination.f64_0[1], source.f64_0[1]),
  5451. ],
  5452. };
  5453. write_xmm_reg128(r, result);
  5454. }
  5455. #[no_mangle]
  5456. pub unsafe fn instr_660F5F_reg(r1: i32, r2: i32) { instr_660F5F(read_xmm128s(r1), r2); }
  5457. #[no_mangle]
  5458. pub unsafe fn instr_660F5F_mem(addr: i32, r: i32) {
  5459. instr_660F5F(return_on_pagefault!(safe_read128s(addr)), r);
  5460. }
  5461. #[no_mangle]
  5462. pub unsafe fn instr_F20F5F(source: u64, r: i32) {
  5463. // maxsd xmm, xmm/mem64
  5464. let destination = read_xmm64s(r);
  5465. write_xmm_f64(
  5466. r,
  5467. sse_max(f64::from_bits(destination), f64::from_bits(source)),
  5468. );
  5469. }
  5470. #[no_mangle]
  5471. pub unsafe fn instr_F20F5F_reg(r1: i32, r2: i32) { instr_F20F5F(read_xmm64s(r1), r2); }
  5472. #[no_mangle]
  5473. pub unsafe fn instr_F20F5F_mem(addr: i32, r: i32) {
  5474. instr_F20F5F(return_on_pagefault!(safe_read64s(addr)), r);
  5475. }
  5476. #[no_mangle]
  5477. pub unsafe fn instr_F30F5F(source: f32, r: i32) {
  5478. // maxss xmm, xmm/mem32
  5479. let destination = read_xmm_f32(r);
  5480. let result = sse_max(destination as f64, source as f64) as f32;
  5481. write_xmm_f32(r, result);
  5482. }
  5483. #[no_mangle]
  5484. pub unsafe fn instr_F30F5F_reg(r1: i32, r2: i32) { instr_F30F5F(read_xmm_f32(r1), r2); }
  5485. #[no_mangle]
  5486. pub unsafe fn instr_F30F5F_mem(addr: i32, r: i32) {
  5487. instr_F30F5F(return_on_pagefault!(fpu_load_m32(addr)) as f32, r);
  5488. }
  5489. #[no_mangle]
  5490. pub unsafe fn instr_0FC2(source: reg128, r: i32, imm8: i32) {
  5491. // cmpps xmm, xmm/m128
  5492. let destination = read_xmm128s(r);
  5493. let mut result = reg128 { i8_0: [0; 16] };
  5494. for i in 0..4 {
  5495. result.i32_0[i] =
  5496. if sse_comparison(imm8, destination.f32_0[i] as f64, source.f32_0[i] as f64) {
  5497. -1
  5498. }
  5499. else {
  5500. 0
  5501. };
  5502. }
  5503. write_xmm_reg128(r, result);
  5504. }
  5505. #[no_mangle]
  5506. pub unsafe fn instr_0FC2_reg(r1: i32, r2: i32, imm: i32) { instr_0FC2(read_xmm128s(r1), r2, imm); }
  5507. #[no_mangle]
  5508. pub unsafe fn instr_0FC2_mem(addr: i32, r: i32, imm: i32) {
  5509. instr_0FC2(return_on_pagefault!(safe_read128s(addr)), r, imm);
  5510. }
  5511. #[no_mangle]
  5512. pub unsafe fn instr_660FC2(source: reg128, r: i32, imm8: i32) {
  5513. // cmppd xmm, xmm/m128
  5514. let destination = read_xmm128s(r);
  5515. let result = reg128 {
  5516. i64_0: [
  5517. (if sse_comparison(imm8, destination.f64_0[0], source.f64_0[0]) { -1 } else { 0 })
  5518. as i64,
  5519. (if sse_comparison(imm8, destination.f64_0[1], source.f64_0[1]) { -1 } else { 0 })
  5520. as i64,
  5521. ],
  5522. };
  5523. write_xmm_reg128(r, result);
  5524. }
  5525. #[no_mangle]
  5526. pub unsafe fn instr_660FC2_reg(r1: i32, r2: i32, imm: i32) {
  5527. instr_660FC2(read_xmm128s(r1), r2, imm);
  5528. }
  5529. #[no_mangle]
  5530. pub unsafe fn instr_660FC2_mem(addr: i32, r: i32, imm: i32) {
  5531. instr_660FC2(return_on_pagefault!(safe_read128s(addr)), r, imm);
  5532. }
  5533. #[no_mangle]
  5534. pub unsafe fn instr_F20FC2(source: u64, r: i32, imm8: i32) {
  5535. // cmpsd xmm, xmm/m64
  5536. let destination = read_xmm64s(r);
  5537. write_xmm64(
  5538. r,
  5539. if sse_comparison(imm8, f64::from_bits(destination), f64::from_bits(source)) {
  5540. (-1i32) as u64
  5541. }
  5542. else {
  5543. 0
  5544. },
  5545. );
  5546. }
  5547. #[no_mangle]
  5548. pub unsafe fn instr_F20FC2_reg(r1: i32, r2: i32, imm: i32) {
  5549. instr_F20FC2(read_xmm64s(r1), r2, imm);
  5550. }
  5551. #[no_mangle]
  5552. pub unsafe fn instr_F20FC2_mem(addr: i32, r: i32, imm: i32) {
  5553. instr_F20FC2(return_on_pagefault!(safe_read64s(addr)), r, imm);
  5554. }
  5555. #[no_mangle]
  5556. pub unsafe fn instr_F30FC2(source: f32, r: i32, imm8: i32) {
  5557. // cmpss xmm, xmm/m32
  5558. let destination = read_xmm_f32(r);
  5559. let result = if sse_comparison(imm8, destination as f64, source as f64) { -1 } else { 0 };
  5560. write_xmm32(r, result);
  5561. }
  5562. #[no_mangle]
  5563. pub unsafe fn instr_F30FC2_reg(r1: i32, r2: i32, imm: i32) {
  5564. instr_F30FC2(read_xmm_f32(r1), r2, imm);
  5565. }
  5566. #[no_mangle]
  5567. pub unsafe fn instr_F30FC2_mem(addr: i32, r: i32, imm: i32) {
  5568. instr_F30FC2(return_on_pagefault!(fpu_load_m32(addr)) as f32, r, imm);
  5569. }