instructions_0f.rs 175 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147
  1. #![allow(non_snake_case)]
  2. extern "C" {
  3. fn get_rand_int() -> i32;
  4. }
  5. unsafe fn undefined_instruction() {
  6. dbg_assert!(false, "Undefined instructions");
  7. trigger_ud()
  8. }
  9. unsafe fn unimplemented_sse() {
  10. dbg_assert!(false, "Unimplemented SSE instruction");
  11. trigger_ud()
  12. }
  13. use cpu::arith::{
  14. bsf16, bsf32, bsr16, bsr32, bt_mem, bt_reg, btc_mem, btc_reg, btr_mem, btr_reg, bts_mem,
  15. bts_reg, cmpxchg8, cmpxchg16, cmpxchg32, popcnt, shld16, shld32, shrd16, shrd32, xadd8, xadd16,
  16. xadd32,
  17. };
  18. use cpu::arith::{
  19. imul_reg16, imul_reg32, saturate_sd_to_sb, saturate_sd_to_sw, saturate_sd_to_ub,
  20. saturate_sw_to_sb, saturate_sw_to_ub, saturate_ud_to_ub, saturate_uw,
  21. };
  22. use cpu::cpu::*;
  23. use cpu::fpu::fpu_set_tag_word;
  24. use cpu::global_pointers::*;
  25. use cpu::misc_instr::{
  26. adjust_stack_reg, bswap, cmovcc16, cmovcc32, fxrstor, fxsave, get_stack_pointer, jmpcc16,
  27. jmpcc32, push16, push32_sreg, setcc_mem, setcc_reg, test_b, test_be, test_l, test_le, test_o,
  28. test_p, test_s, test_z,
  29. };
  30. use cpu::misc_instr::{lar, lsl, verr, verw};
  31. use cpu::misc_instr::{lss16, lss32};
  32. use cpu::sse_instr::*;
  33. #[no_mangle]
  34. pub unsafe fn instr16_0F00_0_mem(addr: i32) {
  35. // sldt
  36. if !*protected_mode || vm86_mode() {
  37. trigger_ud();
  38. return;
  39. }
  40. return_on_pagefault!(safe_write16(addr, *sreg.offset(LDTR as isize) as i32));
  41. }
  42. #[no_mangle]
  43. pub unsafe fn instr32_0F00_0_mem(addr: i32) { instr16_0F00_0_mem(addr) }
  44. #[no_mangle]
  45. pub unsafe fn instr16_0F00_0_reg(r: i32) {
  46. if !*protected_mode || vm86_mode() {
  47. trigger_ud();
  48. return;
  49. }
  50. write_reg16(r, *sreg.offset(LDTR as isize) as i32);
  51. }
  52. #[no_mangle]
  53. pub unsafe fn instr32_0F00_0_reg(r: i32) {
  54. if !*protected_mode || vm86_mode() {
  55. trigger_ud();
  56. return;
  57. }
  58. write_reg32(r, *sreg.offset(LDTR as isize) as i32);
  59. }
  60. #[no_mangle]
  61. pub unsafe fn instr16_0F00_1_mem(addr: i32) {
  62. // str
  63. if !*protected_mode || vm86_mode() {
  64. trigger_ud();
  65. return;
  66. }
  67. return_on_pagefault!(safe_write16(addr, *sreg.offset(TR as isize) as i32));
  68. }
  69. #[no_mangle]
  70. pub unsafe fn instr32_0F00_1_mem(addr: i32) { instr16_0F00_1_mem(addr) }
  71. #[no_mangle]
  72. pub unsafe fn instr16_0F00_1_reg(r: i32) {
  73. if !*protected_mode || vm86_mode() {
  74. trigger_ud();
  75. return;
  76. }
  77. write_reg16(r, *sreg.offset(TR as isize) as i32);
  78. }
  79. #[no_mangle]
  80. pub unsafe fn instr32_0F00_1_reg(r: i32) {
  81. if !*protected_mode || vm86_mode() {
  82. trigger_ud();
  83. return;
  84. }
  85. write_reg32(r, *sreg.offset(TR as isize) as i32);
  86. }
  87. #[no_mangle]
  88. pub unsafe fn instr16_0F00_2_mem(addr: i32) {
  89. // lldt
  90. if !*protected_mode || vm86_mode() {
  91. trigger_ud();
  92. }
  93. else if 0 != *cpl {
  94. trigger_gp(0);
  95. }
  96. else {
  97. return_on_pagefault!(load_ldt(return_on_pagefault!(safe_read16(addr))));
  98. };
  99. }
  100. #[no_mangle]
  101. pub unsafe fn instr32_0F00_2_mem(addr: i32) { instr16_0F00_2_mem(addr) }
  102. #[no_mangle]
  103. pub unsafe fn instr16_0F00_2_reg(r: i32) {
  104. if !*protected_mode || vm86_mode() {
  105. trigger_ud();
  106. }
  107. else if 0 != *cpl {
  108. trigger_gp(0);
  109. }
  110. else {
  111. return_on_pagefault!(load_ldt(read_reg16(r)));
  112. };
  113. }
  114. #[no_mangle]
  115. pub unsafe fn instr32_0F00_2_reg(r: i32) { instr16_0F00_2_reg(r) }
  116. #[no_mangle]
  117. pub unsafe fn instr16_0F00_3_mem(addr: i32) {
  118. // ltr
  119. if !*protected_mode || vm86_mode() {
  120. trigger_ud();
  121. }
  122. else if 0 != *cpl {
  123. trigger_gp(0);
  124. }
  125. else {
  126. load_tr(return_on_pagefault!(safe_read16(addr)));
  127. };
  128. }
  129. #[no_mangle]
  130. pub unsafe fn instr32_0F00_3_mem(addr: i32) { instr16_0F00_3_mem(addr); }
  131. #[no_mangle]
  132. pub unsafe fn instr16_0F00_3_reg(r: i32) {
  133. if !*protected_mode || vm86_mode() {
  134. trigger_ud();
  135. }
  136. else if 0 != *cpl {
  137. trigger_gp(0);
  138. }
  139. else {
  140. load_tr(read_reg16(r));
  141. };
  142. }
  143. #[no_mangle]
  144. pub unsafe fn instr32_0F00_3_reg(r: i32) { instr16_0F00_3_reg(r) }
  145. #[no_mangle]
  146. pub unsafe fn instr16_0F00_4_mem(addr: i32) {
  147. if !*protected_mode || vm86_mode() {
  148. dbg_log!("verr #ud");
  149. trigger_ud();
  150. return;
  151. }
  152. verr(return_on_pagefault!(safe_read16(addr)));
  153. }
  154. #[no_mangle]
  155. pub unsafe fn instr32_0F00_4_mem(addr: i32) { instr16_0F00_4_mem(addr) }
  156. #[no_mangle]
  157. pub unsafe fn instr16_0F00_4_reg(r: i32) {
  158. if !*protected_mode || vm86_mode() {
  159. dbg_log!("verr #ud");
  160. trigger_ud();
  161. return;
  162. }
  163. verr(read_reg16(r));
  164. }
  165. #[no_mangle]
  166. pub unsafe fn instr32_0F00_4_reg(r: i32) { instr16_0F00_4_reg(r) }
  167. #[no_mangle]
  168. pub unsafe fn instr16_0F00_5_mem(addr: i32) {
  169. if !*protected_mode || vm86_mode() {
  170. dbg_log!("verw #ud");
  171. trigger_ud();
  172. return;
  173. }
  174. verw(return_on_pagefault!(safe_read16(addr)));
  175. }
  176. #[no_mangle]
  177. pub unsafe fn instr32_0F00_5_mem(addr: i32) { instr16_0F00_5_mem(addr) }
  178. #[no_mangle]
  179. pub unsafe fn instr16_0F00_5_reg(r: i32) {
  180. if !*protected_mode || vm86_mode() {
  181. dbg_log!("verw #ud");
  182. trigger_ud();
  183. return;
  184. }
  185. verw(read_reg16(r));
  186. }
  187. #[no_mangle]
  188. pub unsafe fn instr32_0F00_5_reg(r: i32) { instr16_0F00_5_reg(r) }
  189. #[no_mangle]
  190. pub unsafe fn instr16_0F01_0_reg(_r: i32) { trigger_ud(); }
  191. #[no_mangle]
  192. pub unsafe fn instr32_0F01_0_reg(_r: i32) { trigger_ud(); }
  193. unsafe fn sgdt(addr: i32, mask: i32) {
  194. return_on_pagefault!(writable_or_pagefault(addr, 6));
  195. safe_write16(addr, *gdtr_size).unwrap();
  196. safe_write32(addr + 2, *gdtr_offset & mask).unwrap();
  197. }
  198. #[no_mangle]
  199. pub unsafe fn instr16_0F01_0_mem(addr: i32) { sgdt(addr, 0xFFFFFF) }
  200. #[no_mangle]
  201. pub unsafe fn instr32_0F01_0_mem(addr: i32) { sgdt(addr, -1) }
  202. #[no_mangle]
  203. pub unsafe fn instr16_0F01_1_reg(_r: i32) { trigger_ud(); }
  204. #[no_mangle]
  205. pub unsafe fn instr32_0F01_1_reg(_r: i32) { trigger_ud(); }
  206. unsafe fn sidt(addr: i32, mask: i32) {
  207. return_on_pagefault!(writable_or_pagefault(addr, 6));
  208. safe_write16(addr, *idtr_size).unwrap();
  209. safe_write32(addr + 2, *idtr_offset & mask).unwrap();
  210. }
  211. #[no_mangle]
  212. pub unsafe fn instr16_0F01_1_mem(addr: i32) { sidt(addr, 0xFFFFFF) }
  213. #[no_mangle]
  214. pub unsafe fn instr32_0F01_1_mem(addr: i32) { sidt(addr, -1) }
  215. #[no_mangle]
  216. pub unsafe fn instr16_0F01_2_reg(_r: i32) { trigger_ud(); }
  217. #[no_mangle]
  218. pub unsafe fn instr32_0F01_2_reg(_r: i32) { trigger_ud(); }
  219. unsafe fn lgdt(addr: i32, mask: i32) {
  220. if 0 != *cpl {
  221. trigger_gp(0);
  222. return;
  223. }
  224. let size = return_on_pagefault!(safe_read16(addr));
  225. let offset = return_on_pagefault!(safe_read32s(addr + 2));
  226. *gdtr_size = size;
  227. *gdtr_offset = offset & mask;
  228. }
  229. #[no_mangle]
  230. pub unsafe fn instr16_0F01_2_mem(addr: i32) { lgdt(addr, 0xFFFFFF); }
  231. #[no_mangle]
  232. pub unsafe fn instr32_0F01_2_mem(addr: i32) { lgdt(addr, -1); }
  233. #[no_mangle]
  234. pub unsafe fn instr16_0F01_3_reg(_r: i32) { trigger_ud(); }
  235. #[no_mangle]
  236. pub unsafe fn instr32_0F01_3_reg(_r: i32) { trigger_ud(); }
  237. unsafe fn lidt(addr: i32, mask: i32) {
  238. if 0 != *cpl {
  239. trigger_gp(0);
  240. return;
  241. }
  242. let size = return_on_pagefault!(safe_read16(addr));
  243. let offset = return_on_pagefault!(safe_read32s(addr + 2));
  244. *idtr_size = size;
  245. *idtr_offset = offset & mask;
  246. }
  247. #[no_mangle]
  248. pub unsafe fn instr16_0F01_3_mem(addr: i32) { lidt(addr, 0xFFFFFF); }
  249. #[no_mangle]
  250. pub unsafe fn instr32_0F01_3_mem(addr: i32) { lidt(addr, -1); }
  251. #[no_mangle]
  252. pub unsafe fn instr16_0F01_4_reg(r: i32) {
  253. // smsw
  254. write_reg16(r, *cr);
  255. }
  256. #[no_mangle]
  257. pub unsafe fn instr32_0F01_4_reg(r: i32) { write_reg32(r, *cr); }
  258. #[no_mangle]
  259. pub unsafe fn instr16_0F01_4_mem(addr: i32) {
  260. return_on_pagefault!(safe_write16(addr, *cr));
  261. }
  262. #[no_mangle]
  263. pub unsafe fn instr32_0F01_4_mem(addr: i32) {
  264. return_on_pagefault!(safe_write16(addr, *cr));
  265. }
  266. #[no_mangle]
  267. pub unsafe fn lmsw(mut new_cr0: i32) {
  268. new_cr0 = *cr & !15 | new_cr0 & 15;
  269. if *protected_mode {
  270. // lmsw cannot be used to switch back
  271. new_cr0 |= CR0_PE
  272. }
  273. set_cr0(new_cr0);
  274. }
  275. #[no_mangle]
  276. pub unsafe fn instr16_0F01_6_reg(r: i32) {
  277. if 0 != *cpl {
  278. trigger_gp(0);
  279. return;
  280. }
  281. lmsw(read_reg16(r));
  282. }
  283. #[no_mangle]
  284. pub unsafe fn instr32_0F01_6_reg(r: i32) { instr16_0F01_6_reg(r); }
  285. #[no_mangle]
  286. pub unsafe fn instr16_0F01_6_mem(addr: i32) {
  287. if 0 != *cpl {
  288. trigger_gp(0);
  289. return;
  290. }
  291. lmsw(return_on_pagefault!(safe_read16(addr)));
  292. }
  293. #[no_mangle]
  294. pub unsafe fn instr32_0F01_6_mem(addr: i32) { instr16_0F01_6_mem(addr) }
  295. #[no_mangle]
  296. pub unsafe fn instr16_0F01_7_reg(_r: i32) { trigger_ud(); }
  297. #[no_mangle]
  298. pub unsafe fn instr32_0F01_7_reg(_r: i32) { trigger_ud(); }
  299. #[no_mangle]
  300. pub unsafe fn instr16_0F01_7_mem(addr: i32) {
  301. // invlpg
  302. if 0 != *cpl {
  303. trigger_gp(0);
  304. return;
  305. }
  306. invlpg(addr);
  307. }
  308. #[no_mangle]
  309. pub unsafe fn instr32_0F01_7_mem(addr: i32) { instr16_0F01_7_mem(addr) }
  310. #[no_mangle]
  311. pub unsafe fn instr16_0F02_mem(addr: i32, r: i32) {
  312. if !*protected_mode || vm86_mode() {
  313. dbg_log!("lar #ud");
  314. trigger_ud();
  315. return;
  316. }
  317. write_reg16(
  318. r,
  319. lar(return_on_pagefault!(safe_read16(addr)), read_reg16(r)),
  320. );
  321. }
  322. #[no_mangle]
  323. pub unsafe fn instr16_0F02_reg(r1: i32, r: i32) {
  324. if !*protected_mode || vm86_mode() {
  325. dbg_log!("lar #ud");
  326. trigger_ud();
  327. return;
  328. }
  329. write_reg16(r, lar(read_reg16(r1), read_reg16(r)));
  330. }
  331. #[no_mangle]
  332. pub unsafe fn instr32_0F02_mem(addr: i32, r: i32) {
  333. if !*protected_mode || vm86_mode() {
  334. dbg_log!("lar #ud");
  335. trigger_ud();
  336. return;
  337. }
  338. write_reg32(
  339. r,
  340. lar(return_on_pagefault!(safe_read16(addr)), read_reg32(r)),
  341. );
  342. }
  343. #[no_mangle]
  344. pub unsafe fn instr32_0F02_reg(r1: i32, r: i32) {
  345. if !*protected_mode || vm86_mode() {
  346. dbg_log!("lar #ud");
  347. trigger_ud();
  348. return;
  349. }
  350. write_reg32(r, lar(read_reg16(r1), read_reg32(r)));
  351. }
  352. #[no_mangle]
  353. pub unsafe fn instr16_0F03_mem(addr: i32, r: i32) {
  354. if !*protected_mode || vm86_mode() {
  355. dbg_log!("lsl #ud");
  356. trigger_ud();
  357. return;
  358. }
  359. write_reg16(
  360. r,
  361. lsl(return_on_pagefault!(safe_read16(addr)), read_reg16(r)),
  362. );
  363. }
  364. #[no_mangle]
  365. pub unsafe fn instr16_0F03_reg(r1: i32, r: i32) {
  366. if !*protected_mode || vm86_mode() {
  367. dbg_log!("lsl #ud");
  368. trigger_ud();
  369. return;
  370. }
  371. write_reg16(r, lsl(read_reg16(r1), read_reg16(r)));
  372. }
  373. #[no_mangle]
  374. pub unsafe fn instr32_0F03_mem(addr: i32, r: i32) {
  375. if !*protected_mode || vm86_mode() {
  376. dbg_log!("lsl #ud");
  377. trigger_ud();
  378. return;
  379. }
  380. write_reg32(
  381. r,
  382. lsl(return_on_pagefault!(safe_read16(addr)), read_reg32(r)),
  383. );
  384. }
  385. #[no_mangle]
  386. pub unsafe fn instr32_0F03_reg(r1: i32, r: i32) {
  387. if !*protected_mode || vm86_mode() {
  388. dbg_log!("lsl #ud");
  389. trigger_ud();
  390. return;
  391. }
  392. write_reg32(r, lsl(read_reg16(r1), read_reg32(r)));
  393. }
  394. #[no_mangle]
  395. pub unsafe fn instr_0F04() { undefined_instruction(); }
  396. #[no_mangle]
  397. pub unsafe fn instr_0F05() { undefined_instruction(); }
  398. #[no_mangle]
  399. pub unsafe fn instr_0F06() {
  400. // clts
  401. if 0 != *cpl {
  402. dbg_log!("clts #gp");
  403. trigger_gp(0);
  404. }
  405. else {
  406. if false {
  407. dbg_log!("clts");
  408. }
  409. *cr &= !CR0_TS;
  410. };
  411. }
  412. #[no_mangle]
  413. pub unsafe fn instr_0F07() { undefined_instruction(); }
  414. #[no_mangle]
  415. pub unsafe fn instr_0F08() {
  416. // invd
  417. undefined_instruction();
  418. }
  419. #[no_mangle]
  420. pub unsafe fn instr_0F09() {
  421. if 0 != *cpl {
  422. dbg_log!("wbinvd #gp");
  423. trigger_gp(0);
  424. }
  425. else {
  426. // wbinvd
  427. };
  428. }
  429. #[no_mangle]
  430. pub unsafe fn instr_0F0A() { undefined_instruction(); }
  431. pub unsafe fn instr_0F0B() {
  432. // UD2
  433. trigger_ud();
  434. }
  435. #[no_mangle]
  436. pub unsafe fn instr_0F0C() { undefined_instruction(); }
  437. #[no_mangle]
  438. pub unsafe fn instr_0F0D() {
  439. // nop
  440. undefined_instruction();
  441. }
  442. #[no_mangle]
  443. pub unsafe fn instr_0F0E() { undefined_instruction(); }
  444. #[no_mangle]
  445. pub unsafe fn instr_0F0F() { undefined_instruction(); }
  446. pub unsafe fn instr_0F10(source: reg128, r: i32) {
  447. // movups xmm, xmm/m128
  448. mov_rm_r128(source, r);
  449. }
  450. pub unsafe fn instr_0F10_reg(r1: i32, r2: i32) { instr_0F10(read_xmm128s(r1), r2); }
  451. pub unsafe fn instr_0F10_mem(addr: i32, r: i32) {
  452. instr_0F10(return_on_pagefault!(safe_read128s(addr)), r);
  453. }
  454. pub unsafe fn instr_F30F10_reg(r1: i32, r2: i32) {
  455. // movss xmm, xmm/m32
  456. let data = read_xmm128s(r1);
  457. write_xmm32(r2, data.u32[0] as i32);
  458. }
  459. pub unsafe fn instr_F30F10_mem(addr: i32, r: i32) {
  460. // movss xmm, xmm/m32
  461. let data = return_on_pagefault!(safe_read32s(addr));
  462. write_xmm128(r, data, 0, 0, 0);
  463. }
  464. pub unsafe fn instr_660F10(source: reg128, r: i32) {
  465. // movupd xmm, xmm/m128
  466. mov_rm_r128(source, r);
  467. }
  468. pub unsafe fn instr_660F10_reg(r1: i32, r2: i32) { instr_660F10(read_xmm128s(r1), r2); }
  469. pub unsafe fn instr_660F10_mem(addr: i32, r: i32) {
  470. instr_660F10(return_on_pagefault!(safe_read128s(addr)), r);
  471. }
  472. pub unsafe fn instr_F20F10_reg(r1: i32, r2: i32) {
  473. // movsd xmm, xmm/m64
  474. let data = read_xmm128s(r1);
  475. write_xmm64(r2, data.u64[0]);
  476. }
  477. pub unsafe fn instr_F20F10_mem(addr: i32, r: i32) {
  478. // movsd xmm, xmm/m64
  479. let data = return_on_pagefault!(safe_read64s(addr));
  480. write_xmm128_2(r, data, 0);
  481. }
  482. pub unsafe fn instr_0F11_reg(r1: i32, r2: i32) {
  483. // movups xmm/m128, xmm
  484. mov_r_r128(r1, r2);
  485. }
  486. pub unsafe fn instr_0F11_mem(addr: i32, r: i32) {
  487. // movups xmm/m128, xmm
  488. mov_r_m128(addr, r);
  489. }
  490. pub unsafe fn instr_F30F11_reg(rm_dest: i32, reg_src: i32) {
  491. // movss xmm/m32, xmm
  492. let data = read_xmm128s(reg_src);
  493. write_xmm32(rm_dest, data.u32[0] as i32);
  494. }
  495. pub unsafe fn instr_F30F11_mem(addr: i32, r: i32) {
  496. // movss xmm/m32, xmm
  497. let data = read_xmm128s(r);
  498. return_on_pagefault!(safe_write32(addr, data.u32[0] as i32));
  499. }
  500. pub unsafe fn instr_660F11_reg(r1: i32, r2: i32) {
  501. // movupd xmm/m128, xmm
  502. mov_r_r128(r1, r2);
  503. }
  504. pub unsafe fn instr_660F11_mem(addr: i32, r: i32) {
  505. // movupd xmm/m128, xmm
  506. mov_r_m128(addr, r);
  507. }
  508. pub unsafe fn instr_F20F11_reg(r1: i32, r2: i32) {
  509. // movsd xmm/m64, xmm
  510. let data = read_xmm128s(r2);
  511. write_xmm64(r1, data.u64[0]);
  512. }
  513. pub unsafe fn instr_F20F11_mem(addr: i32, r: i32) {
  514. // movsd xmm/m64, xmm
  515. let data = read_xmm64s(r);
  516. return_on_pagefault!(safe_write64(addr, data));
  517. }
  518. pub unsafe fn instr_0F12_mem(addr: i32, r: i32) {
  519. // movlps xmm, m64
  520. let data = return_on_pagefault!(safe_read64s(addr));
  521. write_xmm64(r, data);
  522. }
  523. pub unsafe fn instr_0F12_reg(r1: i32, r2: i32) {
  524. // movhlps xmm, xmm
  525. let data = read_xmm128s(r1);
  526. write_xmm64(r2, data.u64[1]);
  527. }
  528. pub unsafe fn instr_660F12_reg(_r1: i32, _r: i32) { trigger_ud(); }
  529. pub unsafe fn instr_660F12_mem(addr: i32, r: i32) {
  530. // movlpd xmm, m64
  531. let data = return_on_pagefault!(safe_read64s(addr));
  532. write_xmm64(r, data);
  533. }
  534. #[no_mangle]
  535. pub unsafe fn instr_F20F12_mem(_addr: i32, _r: i32) { unimplemented_sse(); }
  536. #[no_mangle]
  537. pub unsafe fn instr_F20F12_reg(_r1: i32, _r2: i32) { unimplemented_sse(); }
  538. #[no_mangle]
  539. pub unsafe fn instr_F30F12_mem(_addr: i32, _r: i32) { unimplemented_sse(); }
  540. #[no_mangle]
  541. pub unsafe fn instr_F30F12_reg(_r1: i32, _r2: i32) { unimplemented_sse(); }
  542. pub unsafe fn instr_0F13_mem(addr: i32, r: i32) {
  543. // movlps m64, xmm
  544. movl_r128_m64(addr, r);
  545. }
  546. pub unsafe fn instr_0F13_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  547. pub unsafe fn instr_660F13_reg(_r1: i32, _r: i32) { trigger_ud(); }
  548. pub unsafe fn instr_660F13_mem(addr: i32, r: i32) {
  549. // movlpd xmm/m64, xmm
  550. movl_r128_m64(addr, r);
  551. }
  552. #[no_mangle]
  553. pub unsafe fn instr_0F14(source: u64, r: i32) {
  554. // unpcklps xmm, xmm/m128
  555. // XXX: Aligned access or #gp
  556. let destination = read_xmm64s(r);
  557. write_xmm128(
  558. r,
  559. destination as i32,
  560. source as i32,
  561. (destination >> 32) as i32,
  562. (source >> 32) as i32,
  563. );
  564. }
  565. pub unsafe fn instr_0F14_reg(r1: i32, r2: i32) { instr_0F14(read_xmm64s(r1), r2); }
  566. pub unsafe fn instr_0F14_mem(addr: i32, r: i32) {
  567. instr_0F14(return_on_pagefault!(safe_read64s(addr)), r);
  568. }
  569. #[no_mangle]
  570. pub unsafe fn instr_660F14(source: u64, r: i32) {
  571. // unpcklpd xmm, xmm/m128
  572. // XXX: Aligned access or #gp
  573. let destination = read_xmm64s(r);
  574. write_xmm128(
  575. r,
  576. destination as i32,
  577. (destination >> 32) as i32,
  578. source as i32,
  579. (source >> 32) as i32,
  580. );
  581. }
  582. pub unsafe fn instr_660F14_reg(r1: i32, r2: i32) { instr_660F14(read_xmm64s(r1), r2); }
  583. pub unsafe fn instr_660F14_mem(addr: i32, r: i32) {
  584. instr_660F14(return_on_pagefault!(safe_read64s(addr)), r);
  585. }
  586. #[no_mangle]
  587. pub unsafe fn instr_0F15(source: reg128, r: i32) {
  588. // unpckhps xmm, xmm/m128
  589. // XXX: Aligned access or #gp
  590. let destination = read_xmm128s(r);
  591. write_xmm128(
  592. r,
  593. destination.u32[2] as i32,
  594. source.u32[2] as i32,
  595. destination.u32[3] as i32,
  596. source.u32[3] as i32,
  597. );
  598. }
  599. pub unsafe fn instr_0F15_reg(r1: i32, r2: i32) { instr_0F15(read_xmm128s(r1), r2); }
  600. pub unsafe fn instr_0F15_mem(addr: i32, r: i32) {
  601. instr_0F15(return_on_pagefault!(safe_read128s(addr)), r);
  602. }
  603. #[no_mangle]
  604. pub unsafe fn instr_660F15(source: reg128, r: i32) {
  605. // unpckhpd xmm, xmm/m128
  606. // XXX: Aligned access or #gp
  607. let destination = read_xmm128s(r);
  608. write_xmm128(
  609. r,
  610. destination.u32[2] as i32,
  611. destination.u32[3] as i32,
  612. source.u32[2] as i32,
  613. source.u32[3] as i32,
  614. );
  615. }
  616. pub unsafe fn instr_660F15_reg(r1: i32, r2: i32) { instr_660F15(read_xmm128s(r1), r2); }
  617. pub unsafe fn instr_660F15_mem(addr: i32, r: i32) {
  618. instr_660F15(return_on_pagefault!(safe_read128s(addr)), r);
  619. }
  620. #[no_mangle]
  621. pub unsafe fn instr_0F16(source: u64, r: i32) { (*reg_xmm.offset(r as isize)).u64[1] = source; }
  622. pub unsafe fn instr_0F16_mem(addr: i32, r: i32) {
  623. // movhps xmm, m64
  624. instr_0F16(return_on_pagefault!(safe_read64s(addr)), r);
  625. }
  626. pub unsafe fn instr_0F16_reg(r1: i32, r2: i32) {
  627. // movlhps xmm, xmm
  628. instr_0F16(read_xmm64s(r1), r2);
  629. }
  630. pub unsafe fn instr_660F16_mem(addr: i32, r: i32) {
  631. // movhpd xmm, m64
  632. instr_0F16(return_on_pagefault!(safe_read64s(addr)), r);
  633. }
  634. pub unsafe fn instr_660F16_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  635. #[no_mangle]
  636. pub unsafe fn instr_F30F16_reg(_r1: i32, _r2: i32) { unimplemented_sse(); }
  637. #[no_mangle]
  638. pub unsafe fn instr_F30F16_mem(_addr: i32, _r: i32) { unimplemented_sse(); }
  639. pub unsafe fn instr_0F17_mem(addr: i32, r: i32) {
  640. // movhps m64, xmm
  641. movh_r128_m64(addr, r);
  642. }
  643. pub unsafe fn instr_0F17_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  644. pub unsafe fn instr_660F17_mem(addr: i32, r: i32) {
  645. // movhpd m64, xmm
  646. movh_r128_m64(addr, r);
  647. }
  648. pub unsafe fn instr_660F17_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  649. pub unsafe fn instr_0F18_reg(_r1: i32, _r2: i32) {
  650. // reserved nop
  651. }
  652. pub unsafe fn instr_0F18_mem(_addr: i32, _r: i32) {
  653. // prefetch
  654. // nop for us
  655. }
  656. pub unsafe fn instr_0F19_reg(_r1: i32, _r2: i32) {}
  657. pub unsafe fn instr_0F19_mem(_addr: i32, _r: i32) {}
  658. #[no_mangle]
  659. pub unsafe fn instr_0F1A() { undefined_instruction(); }
  660. #[no_mangle]
  661. pub unsafe fn instr_0F1B() { undefined_instruction(); }
  662. pub unsafe fn instr_0F1C_reg(_r1: i32, _r2: i32) {}
  663. pub unsafe fn instr_0F1C_mem(_addr: i32, _r: i32) {}
  664. pub unsafe fn instr_0F1D_reg(_r1: i32, _r2: i32) {}
  665. pub unsafe fn instr_0F1D_mem(_addr: i32, _r: i32) {}
  666. pub unsafe fn instr_0F1E_reg(_r1: i32, _r2: i32) {}
  667. pub unsafe fn instr_0F1E_mem(_addr: i32, _r: i32) {}
  668. pub unsafe fn instr_0F1F_reg(_r1: i32, _r2: i32) {}
  669. pub unsafe fn instr_0F1F_mem(_addr: i32, _r: i32) {}
  670. #[no_mangle]
  671. pub unsafe fn instr_0F20(r: i32, creg: i32) {
  672. if 0 != *cpl {
  673. trigger_gp(0);
  674. return;
  675. }
  676. match creg {
  677. 0 => {
  678. write_reg32(r, *cr);
  679. },
  680. 2 => {
  681. write_reg32(r, *cr.offset(2));
  682. },
  683. 3 => {
  684. write_reg32(r, *cr.offset(3));
  685. },
  686. 4 => {
  687. write_reg32(r, *cr.offset(4));
  688. },
  689. _ => {
  690. dbg_log!("{}", creg);
  691. undefined_instruction();
  692. },
  693. }
  694. }
  695. #[no_mangle]
  696. pub unsafe fn instr_0F21(r: i32, mut dreg_index: i32) {
  697. if 0 != *cpl {
  698. trigger_gp(0);
  699. return;
  700. }
  701. if dreg_index == 4 || dreg_index == 5 {
  702. if 0 != *cr.offset(4) & CR4_DE {
  703. dbg_log!("#ud mov dreg 4/5 with cr4.DE set");
  704. trigger_ud();
  705. return;
  706. }
  707. else {
  708. // DR4 and DR5 refer to DR6 and DR7 respectively
  709. dreg_index += 2
  710. }
  711. }
  712. write_reg32(r, *dreg.offset(dreg_index as isize));
  713. if false {
  714. dbg_log!(
  715. "read dr{}: {:x}",
  716. dreg_index,
  717. *dreg.offset(dreg_index as isize)
  718. );
  719. }
  720. }
  721. #[no_mangle]
  722. pub unsafe fn instr_0F22(r: i32, creg: i32) {
  723. if 0 != *cpl {
  724. trigger_gp(0);
  725. return;
  726. }
  727. let data = read_reg32(r);
  728. // mov cr, addr
  729. match creg {
  730. 0 => {
  731. if false {
  732. dbg_log!("cr0 <- {:x}", data);
  733. }
  734. set_cr0(data);
  735. },
  736. 2 => {
  737. dbg_log!("cr2 <- {:x}", data);
  738. *cr.offset(2) = data
  739. },
  740. 3 => set_cr3(data),
  741. 4 => {
  742. dbg_log!("cr4 <- {:x}", data);
  743. if 0 != data as u32
  744. & ((1 << 11 | 1 << 12 | 1 << 15 | 1 << 16 | 1 << 19) as u32 | 0xFFC00000)
  745. {
  746. dbg_log!("trigger_gp: Invalid cr4 bit");
  747. trigger_gp(0);
  748. return;
  749. }
  750. else {
  751. if 0 != (*cr.offset(4) ^ data) & (CR4_PGE | CR4_PSE | CR4_PAE) {
  752. full_clear_tlb();
  753. }
  754. if data & CR4_PAE != 0
  755. && 0 != (*cr.offset(4) ^ data) & (CR4_PGE | CR4_PSE | CR4_SMEP)
  756. {
  757. load_pdpte(*cr.offset(3));
  758. }
  759. *cr.offset(4) = data;
  760. }
  761. },
  762. _ => {
  763. dbg_log!("{}", creg);
  764. undefined_instruction();
  765. },
  766. }
  767. }
  768. #[no_mangle]
  769. pub unsafe fn instr_0F23(r: i32, mut dreg_index: i32) {
  770. if 0 != *cpl {
  771. trigger_gp(0);
  772. return;
  773. }
  774. if dreg_index == 4 || dreg_index == 5 {
  775. if 0 != *cr.offset(4) & CR4_DE {
  776. dbg_log!("#ud mov dreg 4/5 with cr4.DE set");
  777. trigger_ud();
  778. return;
  779. }
  780. else {
  781. // DR4 and DR5 refer to DR6 and DR7 respectively
  782. dreg_index += 2
  783. }
  784. }
  785. *dreg.offset(dreg_index as isize) = read_reg32(r);
  786. if false {
  787. dbg_log!(
  788. "write dr{}: {:x}",
  789. dreg_index,
  790. *dreg.offset(dreg_index as isize)
  791. );
  792. }
  793. }
  794. #[no_mangle]
  795. pub unsafe fn instr_0F24() { undefined_instruction(); }
  796. #[no_mangle]
  797. pub unsafe fn instr_0F25() { undefined_instruction(); }
  798. #[no_mangle]
  799. pub unsafe fn instr_0F26() { undefined_instruction(); }
  800. #[no_mangle]
  801. pub unsafe fn instr_0F27() { undefined_instruction(); }
  802. pub unsafe fn instr_0F28(source: reg128, r: i32) {
  803. // movaps xmm, xmm/m128
  804. // XXX: Aligned read or #gp
  805. mov_rm_r128(source, r);
  806. }
  807. pub unsafe fn instr_0F28_reg(r1: i32, r2: i32) { instr_0F28(read_xmm128s(r1), r2); }
  808. pub unsafe fn instr_0F28_mem(addr: i32, r: i32) {
  809. instr_0F28(return_on_pagefault!(safe_read128s(addr)), r);
  810. }
  811. pub unsafe fn instr_660F28(source: reg128, r: i32) {
  812. // movapd xmm, xmm/m128
  813. // XXX: Aligned read or #gp
  814. // Note: Same as movdqa (660F6F)
  815. mov_rm_r128(source, r);
  816. }
  817. pub unsafe fn instr_660F28_reg(r1: i32, r2: i32) { instr_660F28(read_xmm128s(r1), r2); }
  818. pub unsafe fn instr_660F28_mem(addr: i32, r: i32) {
  819. instr_660F28(return_on_pagefault!(safe_read128s(addr)), r);
  820. }
  821. pub unsafe fn instr_0F29_mem(addr: i32, r: i32) {
  822. // movaps m128, xmm
  823. let data = read_xmm128s(r);
  824. // XXX: Aligned write or #gp
  825. return_on_pagefault!(safe_write128(addr, data));
  826. }
  827. pub unsafe fn instr_0F29_reg(r1: i32, r2: i32) {
  828. // movaps xmm, xmm
  829. mov_r_r128(r1, r2);
  830. }
  831. pub unsafe fn instr_660F29_mem(addr: i32, r: i32) {
  832. // movapd m128, xmm
  833. let data = read_xmm128s(r);
  834. // XXX: Aligned write or #gp
  835. return_on_pagefault!(safe_write128(addr, data));
  836. }
  837. pub unsafe fn instr_660F29_reg(r1: i32, r2: i32) {
  838. // movapd xmm, xmm
  839. mov_r_r128(r1, r2);
  840. }
  841. #[no_mangle]
  842. pub unsafe fn instr_0F2A(source: u64, r: i32) {
  843. // cvtpi2ps xmm, mm/m64
  844. // Note: Casts here can fail
  845. // XXX: Should round according to round control
  846. let source: [i32; 2] = std::mem::transmute(source);
  847. let result = [source[0] as f32, source[1] as f32];
  848. write_xmm64(r, std::mem::transmute(result));
  849. transition_fpu_to_mmx();
  850. }
  851. pub unsafe fn instr_0F2A_reg(r1: i32, r2: i32) { instr_0F2A(read_mmx64s(r1), r2); }
  852. pub unsafe fn instr_0F2A_mem(addr: i32, r: i32) {
  853. instr_0F2A(return_on_pagefault!(safe_read64s(addr)), r);
  854. }
  855. #[no_mangle]
  856. pub unsafe fn instr_660F2A(source: u64, r: i32) {
  857. // cvtpi2pd xmm, xmm/m64
  858. // These casts can't fail
  859. let source: [i32; 2] = std::mem::transmute(source);
  860. let result = reg128 {
  861. f64: [source[0] as f64, source[1] as f64],
  862. };
  863. write_xmm_reg128(r, result);
  864. transition_fpu_to_mmx();
  865. }
  866. pub unsafe fn instr_660F2A_reg(r1: i32, r2: i32) { instr_660F2A(read_mmx64s(r1), r2); }
  867. pub unsafe fn instr_660F2A_mem(addr: i32, r: i32) {
  868. instr_660F2A(return_on_pagefault!(safe_read64s(addr)), r);
  869. }
  870. #[no_mangle]
  871. pub unsafe fn instr_F20F2A(source: i32, r: i32) {
  872. // cvtsi2sd xmm, r32/m32
  873. // This cast can't fail
  874. write_xmm_f64(r, source as f64);
  875. }
  876. pub unsafe fn instr_F20F2A_reg(r1: i32, r2: i32) { instr_F20F2A(read_reg32(r1), r2); }
  877. pub unsafe fn instr_F20F2A_mem(addr: i32, r: i32) {
  878. instr_F20F2A(return_on_pagefault!(safe_read32s(addr)), r);
  879. }
  880. #[no_mangle]
  881. pub unsafe fn instr_F30F2A(source: i32, r: i32) {
  882. // cvtsi2ss xmm, r/m32
  883. // Note: This cast can fail
  884. // XXX: Should round according to round control
  885. let result = source as f32;
  886. write_xmm_f32(r, result);
  887. }
  888. pub unsafe fn instr_F30F2A_reg(r1: i32, r2: i32) { instr_F30F2A(read_reg32(r1), r2); }
  889. pub unsafe fn instr_F30F2A_mem(addr: i32, r: i32) {
  890. instr_F30F2A(return_on_pagefault!(safe_read32s(addr)), r);
  891. }
  892. pub unsafe fn instr_0F2B_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  893. pub unsafe fn instr_0F2B_mem(addr: i32, r: i32) {
  894. // movntps m128, xmm
  895. // XXX: Aligned write or #gp
  896. mov_r_m128(addr, r);
  897. }
  898. pub unsafe fn instr_660F2B_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  899. pub unsafe fn instr_660F2B_mem(addr: i32, r: i32) {
  900. // movntpd m128, xmm
  901. // XXX: Aligned write or #gp
  902. mov_r_m128(addr, r);
  903. }
  904. pub unsafe fn instr_0F2C(source: u64, r: i32) {
  905. // cvttps2pi mm, xmm/m64
  906. let low = f32::from_bits(source as u32);
  907. let high = f32::from_bits((source >> 32) as u32);
  908. write_mmx_reg64(
  909. r,
  910. sse_convert_with_truncation_f32_to_i32(low) as u32 as u64
  911. | (sse_convert_with_truncation_f32_to_i32(high) as u32 as u64) << 32,
  912. );
  913. transition_fpu_to_mmx();
  914. }
  915. #[no_mangle]
  916. pub unsafe fn instr_0F2C_mem(addr: i32, r: i32) {
  917. instr_0F2C(return_on_pagefault!(safe_read64s(addr)), r);
  918. }
  919. #[no_mangle]
  920. pub unsafe fn instr_0F2C_reg(r1: i32, r2: i32) { instr_0F2C(read_xmm64s(r1), r2); }
  921. pub unsafe fn instr_660F2C(source: reg128, r: i32) {
  922. // cvttpd2pi mm, xmm/m128
  923. write_mmx_reg64(
  924. r,
  925. sse_convert_with_truncation_f64_to_i32(source.f64[0]) as u32 as u64
  926. | (sse_convert_with_truncation_f64_to_i32(source.f64[1]) as u32 as u64) << 32,
  927. );
  928. transition_fpu_to_mmx();
  929. }
  930. #[no_mangle]
  931. pub unsafe fn instr_660F2C_mem(addr: i32, r: i32) {
  932. instr_660F2C(return_on_pagefault!(safe_read128s(addr)), r);
  933. }
  934. #[no_mangle]
  935. pub unsafe fn instr_660F2C_reg(r1: i32, r2: i32) { instr_660F2C(read_xmm128s(r1), r2); }
  936. pub unsafe fn instr_F20F2C(source: u64, r: i32) {
  937. // cvttsd2si r32, xmm/m64
  938. let source = f64::from_bits(source);
  939. write_reg32(r, sse_convert_with_truncation_f64_to_i32(source));
  940. }
  941. #[no_mangle]
  942. pub unsafe fn instr_F20F2C_reg(r1: i32, r2: i32) { instr_F20F2C(read_xmm64s(r1), r2); }
  943. #[no_mangle]
  944. pub unsafe fn instr_F20F2C_mem(addr: i32, r: i32) {
  945. instr_F20F2C(return_on_pagefault!(safe_read64s(addr)), r);
  946. }
  947. pub unsafe fn instr_F30F2C(source: f32, r: i32) {
  948. // cvttss2si
  949. write_reg32(r, sse_convert_with_truncation_f32_to_i32(source));
  950. }
  951. #[no_mangle]
  952. pub unsafe fn instr_F30F2C_mem(addr: i32, r: i32) {
  953. instr_F30F2C(return_on_pagefault!(safe_read_f32(addr)), r);
  954. }
  955. #[no_mangle]
  956. pub unsafe fn instr_F30F2C_reg(r1: i32, r2: i32) { instr_F30F2C(read_xmm_f32(r1), r2); }
  957. pub unsafe fn instr_0F2D(source: u64, r: i32) {
  958. // cvtps2pi mm, xmm/m64
  959. let source: [f32; 2] = std::mem::transmute(source);
  960. let result = [
  961. sse_convert_f32_to_i32(source[0]),
  962. sse_convert_f32_to_i32(source[1]),
  963. ];
  964. write_mmx_reg64(r, std::mem::transmute(result));
  965. transition_fpu_to_mmx();
  966. }
  967. #[no_mangle]
  968. pub unsafe fn instr_0F2D_reg(r1: i32, r2: i32) { instr_0F2D(read_xmm64s(r1), r2); }
  969. #[no_mangle]
  970. pub unsafe fn instr_0F2D_mem(addr: i32, r: i32) {
  971. instr_0F2D(return_on_pagefault!(safe_read64s(addr)), r);
  972. }
  973. pub unsafe fn instr_660F2D(source: reg128, r: i32) {
  974. // cvtpd2pi mm, xmm/m128
  975. let result = [
  976. sse_convert_f64_to_i32(source.f64[0]),
  977. sse_convert_f64_to_i32(source.f64[1]),
  978. ];
  979. write_mmx_reg64(r, std::mem::transmute(result));
  980. transition_fpu_to_mmx();
  981. }
  982. #[no_mangle]
  983. pub unsafe fn instr_660F2D_reg(r1: i32, r2: i32) { instr_660F2D(read_xmm128s(r1), r2); }
  984. #[no_mangle]
  985. pub unsafe fn instr_660F2D_mem(addr: i32, r: i32) {
  986. instr_660F2D(return_on_pagefault!(safe_read128s(addr)), r);
  987. }
  988. pub unsafe fn instr_F20F2D(source: u64, r: i32) {
  989. // cvtsd2si r32, xmm/m64
  990. write_reg32(r, sse_convert_f64_to_i32(f64::from_bits(source)));
  991. }
  992. pub unsafe fn instr_F20F2D_reg(r1: i32, r2: i32) { instr_F20F2D(read_xmm64s(r1), r2); }
  993. pub unsafe fn instr_F20F2D_mem(addr: i32, r: i32) {
  994. instr_F20F2D(return_on_pagefault!(safe_read64s(addr)), r);
  995. }
  996. pub unsafe fn instr_F30F2D(source: f32, r: i32) {
  997. // cvtss2si r32, xmm1/m32
  998. write_reg32(r, sse_convert_f32_to_i32(source));
  999. }
  1000. pub unsafe fn instr_F30F2D_reg(r1: i32, r2: i32) { instr_F30F2D(read_xmm_f32(r1), r2); }
  1001. pub unsafe fn instr_F30F2D_mem(addr: i32, r: i32) {
  1002. instr_F30F2D(return_on_pagefault!(safe_read_f32(addr)), r);
  1003. }
  1004. #[no_mangle]
  1005. pub unsafe fn instr_0F2E(source: f32, r: i32) {
  1006. // ucomiss xmm1, xmm2/m32
  1007. let destination = read_xmm_f32(r);
  1008. *flags_changed = 0;
  1009. *flags &= !FLAGS_ALL;
  1010. if destination == source {
  1011. *flags |= FLAG_ZERO
  1012. }
  1013. else if destination < source {
  1014. *flags |= FLAG_CARRY
  1015. }
  1016. else if destination > source {
  1017. // all flags cleared
  1018. }
  1019. else {
  1020. // TODO: Signal on SNaN
  1021. *flags |= FLAG_ZERO | FLAG_PARITY | FLAG_CARRY
  1022. }
  1023. }
  1024. pub unsafe fn instr_0F2E_reg(r1: i32, r2: i32) { instr_0F2E(read_xmm_f32(r1), r2) }
  1025. pub unsafe fn instr_0F2E_mem(addr: i32, r: i32) {
  1026. instr_0F2E(return_on_pagefault!(safe_read_f32(addr)), r);
  1027. }
  1028. #[no_mangle]
  1029. pub unsafe fn instr_660F2E(source: u64, r: i32) {
  1030. // ucomisd xmm1, xmm2/m64
  1031. let destination = f64::from_bits(read_xmm64s(r));
  1032. let source = f64::from_bits(source);
  1033. *flags_changed = 0;
  1034. *flags &= !FLAGS_ALL;
  1035. if destination == source {
  1036. *flags |= FLAG_ZERO
  1037. }
  1038. else if destination < source {
  1039. *flags |= FLAG_CARRY
  1040. }
  1041. else if destination > source {
  1042. // all flags cleared
  1043. }
  1044. else {
  1045. // TODO: Signal on SNaN
  1046. *flags |= FLAG_ZERO | FLAG_PARITY | FLAG_CARRY
  1047. }
  1048. }
  1049. pub unsafe fn instr_660F2E_reg(r1: i32, r: i32) { instr_660F2E(read_xmm64s(r1), r); }
  1050. pub unsafe fn instr_660F2E_mem(addr: i32, r: i32) {
  1051. instr_660F2E(return_on_pagefault!(safe_read64s(addr)), r)
  1052. }
  1053. #[no_mangle]
  1054. pub unsafe fn instr_0F2F(source: f32, r: i32) {
  1055. // comiss xmm1, xmm2/m32
  1056. let destination = read_xmm_f32(r);
  1057. *flags_changed = 0;
  1058. *flags &= !FLAGS_ALL;
  1059. if destination == source {
  1060. *flags |= FLAG_ZERO
  1061. }
  1062. else if destination < source {
  1063. *flags |= FLAG_CARRY
  1064. }
  1065. else if destination > source {
  1066. // all flags cleared
  1067. }
  1068. else {
  1069. // TODO: Signal on SNaN or QNaN
  1070. *flags |= FLAG_ZERO | FLAG_PARITY | FLAG_CARRY
  1071. }
  1072. }
  1073. pub unsafe fn instr_0F2F_reg(r1: i32, r2: i32) { instr_0F2F(read_xmm_f32(r1), r2) }
  1074. pub unsafe fn instr_0F2F_mem(addr: i32, r: i32) {
  1075. instr_0F2F(return_on_pagefault!(safe_read_f32(addr)), r);
  1076. }
  1077. #[no_mangle]
  1078. pub unsafe fn instr_660F2F(source: u64, r: i32) {
  1079. // comisd xmm1, xmm2/m64
  1080. let destination = f64::from_bits(read_xmm64s(r));
  1081. let source = f64::from_bits(source);
  1082. *flags_changed = 0;
  1083. *flags &= !FLAGS_ALL;
  1084. if destination == source {
  1085. *flags |= FLAG_ZERO
  1086. }
  1087. else if destination < source {
  1088. *flags |= FLAG_CARRY
  1089. }
  1090. else if destination > source {
  1091. // all flags cleared
  1092. }
  1093. else {
  1094. // TODO: Signal on SNaN or QNaN
  1095. *flags |= FLAG_ZERO | FLAG_PARITY | FLAG_CARRY
  1096. }
  1097. }
  1098. pub unsafe fn instr_660F2F_reg(r1: i32, r: i32) { instr_660F2F(read_xmm64s(r1), r); }
  1099. pub unsafe fn instr_660F2F_mem(addr: i32, r: i32) {
  1100. instr_660F2F(return_on_pagefault!(safe_read64s(addr)), r)
  1101. }
  1102. #[no_mangle]
  1103. pub unsafe fn instr_0F30() {
  1104. // wrmsr - write maschine specific register
  1105. if 0 != *cpl {
  1106. trigger_gp(0);
  1107. return;
  1108. }
  1109. let index = read_reg32(ECX);
  1110. let low = read_reg32(EAX);
  1111. let high = read_reg32(EDX);
  1112. if index != IA32_SYSENTER_ESP {
  1113. dbg_log!("wrmsr ecx={:x} data={:x}:{:x}", index, high, low);
  1114. }
  1115. match index {
  1116. IA32_SYSENTER_CS => *sysenter_cs = low & 0xFFFF,
  1117. IA32_SYSENTER_EIP => *sysenter_eip = low,
  1118. IA32_SYSENTER_ESP => *sysenter_esp = low,
  1119. IA32_FEAT_CTL => {}, // linux 5.x
  1120. MSR_TEST_CTRL => {}, // linux 5.x
  1121. IA32_APIC_BASE => {
  1122. dbg_assert!(
  1123. high == 0,
  1124. ("Changing APIC address (high 32 bits) not supported")
  1125. );
  1126. let address = low & !(IA32_APIC_BASE_BSP | IA32_APIC_BASE_EXTD | IA32_APIC_BASE_EN);
  1127. dbg_assert!(
  1128. address == APIC_ADDRESS,
  1129. ("Changing APIC address not supported")
  1130. );
  1131. dbg_assert!(low & IA32_APIC_BASE_EXTD == 0, "x2apic not supported");
  1132. *apic_enabled = low & IA32_APIC_BASE_EN == IA32_APIC_BASE_EN
  1133. },
  1134. IA32_TIME_STAMP_COUNTER => set_tsc(low as u32, high as u32),
  1135. IA32_BIOS_SIGN_ID => {},
  1136. MISC_FEATURE_ENABLES => {
  1137. // Linux 4, see: https://patchwork.kernel.org/patch/9528279/
  1138. },
  1139. IA32_MISC_ENABLE => {
  1140. // Enable Misc. Processor Features
  1141. },
  1142. IA32_MCG_CAP => {}, // netbsd
  1143. IA32_KERNEL_GS_BASE => {
  1144. // Only used in 64 bit mode (by SWAPGS), but set by kvm-unit-test
  1145. dbg_log!("GS Base written");
  1146. },
  1147. IA32_PAT => {},
  1148. IA32_SPEC_CTRL => {}, // linux 5.19
  1149. IA32_TSX_CTRL => {}, // linux 5.19
  1150. MSR_TSX_FORCE_ABORT => {}, // linux 5.19
  1151. IA32_MCU_OPT_CTRL => {}, // linux 5.19
  1152. _ => {
  1153. dbg_log!("Unknown msr: {:x}", index);
  1154. dbg_assert!(false);
  1155. },
  1156. }
  1157. }
  1158. pub unsafe fn instr_0F31() {
  1159. // rdtsc - read timestamp counter
  1160. if 0 == *cpl || 0 == *cr.offset(4) & CR4_TSD {
  1161. let tsc = read_tsc();
  1162. write_reg32(EAX, tsc as i32);
  1163. write_reg32(EDX, (tsc >> 32) as i32);
  1164. if false {
  1165. dbg_log!("rdtsc edx:eax={:x}:{:x}", read_reg32(EDX), read_reg32(EAX));
  1166. }
  1167. }
  1168. else {
  1169. trigger_gp(0);
  1170. };
  1171. }
  1172. #[no_mangle]
  1173. pub unsafe fn instr_0F32() {
  1174. // rdmsr - read maschine specific register
  1175. if 0 != *cpl {
  1176. trigger_gp(0);
  1177. return;
  1178. }
  1179. let index = read_reg32(ECX);
  1180. dbg_log!("rdmsr ecx={:x}", index);
  1181. let mut low: i32 = 0;
  1182. let mut high: i32 = 0;
  1183. match index {
  1184. IA32_SYSENTER_CS => low = *sysenter_cs,
  1185. IA32_SYSENTER_EIP => low = *sysenter_eip,
  1186. IA32_SYSENTER_ESP => low = *sysenter_esp,
  1187. IA32_TIME_STAMP_COUNTER => {
  1188. let tsc = read_tsc();
  1189. low = tsc as i32;
  1190. high = (tsc >> 32) as i32
  1191. },
  1192. IA32_FEAT_CTL => {}, // linux 5.x
  1193. MSR_TEST_CTRL => {}, // linux 5.x
  1194. IA32_PLATFORM_ID => {},
  1195. IA32_APIC_BASE => {
  1196. if *acpi_enabled {
  1197. low = APIC_ADDRESS;
  1198. if *apic_enabled {
  1199. low |= IA32_APIC_BASE_EN
  1200. }
  1201. }
  1202. },
  1203. IA32_BIOS_SIGN_ID => {},
  1204. MSR_PLATFORM_INFO => low = 1 << 8,
  1205. MISC_FEATURE_ENABLES => {},
  1206. IA32_MISC_ENABLE => {
  1207. // Enable Misc. Processor Features
  1208. low = 1 << 0; // fast string
  1209. },
  1210. IA32_RTIT_CTL => {
  1211. // linux4
  1212. },
  1213. MSR_SMI_COUNT => {},
  1214. IA32_MCG_CAP => {
  1215. // netbsd
  1216. },
  1217. IA32_PAT => {},
  1218. MSR_PKG_C2_RESIDENCY => {},
  1219. IA32_SPEC_CTRL => {}, // linux 5.19
  1220. IA32_TSX_CTRL => {}, // linux 5.19
  1221. MSR_TSX_FORCE_ABORT => {}, // linux 5.19
  1222. IA32_MCU_OPT_CTRL => {}, // linux 5.19
  1223. MSR_AMD64_LS_CFG => {}, // linux 5.19
  1224. _ => {
  1225. dbg_log!("Unknown msr: {:x}", index);
  1226. dbg_assert!(false);
  1227. },
  1228. }
  1229. write_reg32(EAX, low);
  1230. write_reg32(EDX, high);
  1231. }
  1232. #[no_mangle]
  1233. pub unsafe fn instr_0F33() {
  1234. // rdpmc
  1235. undefined_instruction();
  1236. }
  1237. #[no_mangle]
  1238. pub unsafe fn instr_0F34() {
  1239. // sysenter
  1240. let seg = *sysenter_cs & 0xFFFC;
  1241. if !*protected_mode || seg == 0 {
  1242. trigger_gp(0);
  1243. return;
  1244. }
  1245. else {
  1246. *flags &= !FLAG_VM & !FLAG_INTERRUPT;
  1247. *instruction_pointer = *sysenter_eip;
  1248. write_reg32(ESP, *sysenter_esp);
  1249. *sreg.offset(CS as isize) = seg as u16;
  1250. *segment_is_null.offset(CS as isize) = false;
  1251. *segment_limits.offset(CS as isize) = -1i32 as u32;
  1252. *segment_offsets.offset(CS as isize) = 0;
  1253. update_cs_size(true);
  1254. *cpl = 0;
  1255. cpl_changed();
  1256. *sreg.offset(SS as isize) = (seg + 8) as u16;
  1257. *segment_is_null.offset(SS as isize) = false;
  1258. *segment_limits.offset(SS as isize) = -1i32 as u32;
  1259. *segment_offsets.offset(SS as isize) = 0;
  1260. *stack_size_32 = true;
  1261. update_state_flags();
  1262. return;
  1263. };
  1264. }
  1265. #[no_mangle]
  1266. pub unsafe fn instr_0F35() {
  1267. // sysexit
  1268. let seg = *sysenter_cs & 0xFFFC;
  1269. if !*protected_mode || 0 != *cpl || seg == 0 {
  1270. trigger_gp(0);
  1271. return;
  1272. }
  1273. else {
  1274. *instruction_pointer = read_reg32(EDX);
  1275. write_reg32(ESP, read_reg32(ECX));
  1276. *sreg.offset(CS as isize) = (seg + 16 | 3) as u16;
  1277. *segment_is_null.offset(CS as isize) = false;
  1278. *segment_limits.offset(CS as isize) = -1i32 as u32;
  1279. *segment_offsets.offset(CS as isize) = 0;
  1280. update_cs_size(true);
  1281. *cpl = 3;
  1282. cpl_changed();
  1283. *sreg.offset(SS as isize) = (seg + 24 | 3) as u16;
  1284. *segment_is_null.offset(SS as isize) = false;
  1285. *segment_limits.offset(SS as isize) = -1i32 as u32;
  1286. *segment_offsets.offset(SS as isize) = 0;
  1287. *stack_size_32 = true;
  1288. update_state_flags();
  1289. return;
  1290. };
  1291. }
  1292. #[no_mangle]
  1293. pub unsafe fn instr_0F36() { undefined_instruction(); }
  1294. #[no_mangle]
  1295. pub unsafe fn instr_0F37() {
  1296. // getsec
  1297. undefined_instruction();
  1298. }
  1299. #[no_mangle]
  1300. pub unsafe fn instr_0F38() { unimplemented_sse(); }
  1301. #[no_mangle]
  1302. pub unsafe fn instr_0F39() { unimplemented_sse(); }
  1303. #[no_mangle]
  1304. pub unsafe fn instr_0F3A() { unimplemented_sse(); }
  1305. #[no_mangle]
  1306. pub unsafe fn instr_0F3B() { unimplemented_sse(); }
  1307. #[no_mangle]
  1308. pub unsafe fn instr_0F3C() { unimplemented_sse(); }
  1309. #[no_mangle]
  1310. pub unsafe fn instr_0F3D() { unimplemented_sse(); }
  1311. #[no_mangle]
  1312. pub unsafe fn instr_0F3E() { unimplemented_sse(); }
  1313. #[no_mangle]
  1314. pub unsafe fn instr_0F3F() { unimplemented_sse(); }
  1315. pub unsafe fn instr16_0F40_mem(addr: i32, r: i32) {
  1316. cmovcc16(test_o(), return_on_pagefault!(safe_read16(addr)), r);
  1317. }
  1318. pub unsafe fn instr16_0F40_reg(r1: i32, r: i32) { cmovcc16(test_o(), read_reg16(r1), r); }
  1319. pub unsafe fn instr32_0F40_mem(addr: i32, r: i32) {
  1320. cmovcc32(test_o(), return_on_pagefault!(safe_read32s(addr)), r);
  1321. }
  1322. pub unsafe fn instr32_0F40_reg(r1: i32, r: i32) { cmovcc32(test_o(), read_reg32(r1), r); }
  1323. pub unsafe fn instr16_0F41_mem(addr: i32, r: i32) {
  1324. cmovcc16(!test_o(), return_on_pagefault!(safe_read16(addr)), r);
  1325. }
  1326. pub unsafe fn instr16_0F41_reg(r1: i32, r: i32) { cmovcc16(!test_o(), read_reg16(r1), r); }
  1327. pub unsafe fn instr32_0F41_mem(addr: i32, r: i32) {
  1328. cmovcc32(!test_o(), return_on_pagefault!(safe_read32s(addr)), r);
  1329. }
  1330. pub unsafe fn instr32_0F41_reg(r1: i32, r: i32) { cmovcc32(!test_o(), read_reg32(r1), r); }
  1331. pub unsafe fn instr16_0F42_mem(addr: i32, r: i32) {
  1332. cmovcc16(test_b(), return_on_pagefault!(safe_read16(addr)), r);
  1333. }
  1334. pub unsafe fn instr16_0F42_reg(r1: i32, r: i32) { cmovcc16(test_b(), read_reg16(r1), r); }
  1335. pub unsafe fn instr32_0F42_mem(addr: i32, r: i32) {
  1336. cmovcc32(test_b(), return_on_pagefault!(safe_read32s(addr)), r);
  1337. }
  1338. pub unsafe fn instr32_0F42_reg(r1: i32, r: i32) { cmovcc32(test_b(), read_reg32(r1), r); }
  1339. pub unsafe fn instr16_0F43_mem(addr: i32, r: i32) {
  1340. cmovcc16(!test_b(), return_on_pagefault!(safe_read16(addr)), r);
  1341. }
  1342. pub unsafe fn instr16_0F43_reg(r1: i32, r: i32) { cmovcc16(!test_b(), read_reg16(r1), r); }
  1343. pub unsafe fn instr32_0F43_mem(addr: i32, r: i32) {
  1344. cmovcc32(!test_b(), return_on_pagefault!(safe_read32s(addr)), r);
  1345. }
  1346. pub unsafe fn instr32_0F43_reg(r1: i32, r: i32) { cmovcc32(!test_b(), read_reg32(r1), r); }
  1347. pub unsafe fn instr16_0F44_mem(addr: i32, r: i32) {
  1348. cmovcc16(test_z(), return_on_pagefault!(safe_read16(addr)), r);
  1349. }
  1350. pub unsafe fn instr16_0F44_reg(r1: i32, r: i32) { cmovcc16(test_z(), read_reg16(r1), r); }
  1351. pub unsafe fn instr32_0F44_mem(addr: i32, r: i32) {
  1352. cmovcc32(test_z(), return_on_pagefault!(safe_read32s(addr)), r);
  1353. }
  1354. pub unsafe fn instr32_0F44_reg(r1: i32, r: i32) { cmovcc32(test_z(), read_reg32(r1), r); }
  1355. pub unsafe fn instr16_0F45_mem(addr: i32, r: i32) {
  1356. cmovcc16(!test_z(), return_on_pagefault!(safe_read16(addr)), r);
  1357. }
  1358. pub unsafe fn instr16_0F45_reg(r1: i32, r: i32) { cmovcc16(!test_z(), read_reg16(r1), r); }
  1359. pub unsafe fn instr32_0F45_mem(addr: i32, r: i32) {
  1360. cmovcc32(!test_z(), return_on_pagefault!(safe_read32s(addr)), r);
  1361. }
  1362. pub unsafe fn instr32_0F45_reg(r1: i32, r: i32) { cmovcc32(!test_z(), read_reg32(r1), r); }
  1363. pub unsafe fn instr16_0F46_mem(addr: i32, r: i32) {
  1364. cmovcc16(test_be(), return_on_pagefault!(safe_read16(addr)), r);
  1365. }
  1366. pub unsafe fn instr16_0F46_reg(r1: i32, r: i32) { cmovcc16(test_be(), read_reg16(r1), r); }
  1367. pub unsafe fn instr32_0F46_mem(addr: i32, r: i32) {
  1368. cmovcc32(test_be(), return_on_pagefault!(safe_read32s(addr)), r);
  1369. }
  1370. pub unsafe fn instr32_0F46_reg(r1: i32, r: i32) { cmovcc32(test_be(), read_reg32(r1), r); }
  1371. pub unsafe fn instr16_0F47_mem(addr: i32, r: i32) {
  1372. cmovcc16(!test_be(), return_on_pagefault!(safe_read16(addr)), r);
  1373. }
  1374. pub unsafe fn instr16_0F47_reg(r1: i32, r: i32) { cmovcc16(!test_be(), read_reg16(r1), r); }
  1375. pub unsafe fn instr32_0F47_mem(addr: i32, r: i32) {
  1376. cmovcc32(!test_be(), return_on_pagefault!(safe_read32s(addr)), r);
  1377. }
  1378. pub unsafe fn instr32_0F47_reg(r1: i32, r: i32) { cmovcc32(!test_be(), read_reg32(r1), r); }
  1379. pub unsafe fn instr16_0F48_mem(addr: i32, r: i32) {
  1380. cmovcc16(test_s(), return_on_pagefault!(safe_read16(addr)), r);
  1381. }
  1382. pub unsafe fn instr16_0F48_reg(r1: i32, r: i32) { cmovcc16(test_s(), read_reg16(r1), r); }
  1383. pub unsafe fn instr32_0F48_mem(addr: i32, r: i32) {
  1384. cmovcc32(test_s(), return_on_pagefault!(safe_read32s(addr)), r);
  1385. }
  1386. pub unsafe fn instr32_0F48_reg(r1: i32, r: i32) { cmovcc32(test_s(), read_reg32(r1), r); }
  1387. pub unsafe fn instr16_0F49_mem(addr: i32, r: i32) {
  1388. cmovcc16(!test_s(), return_on_pagefault!(safe_read16(addr)), r);
  1389. }
  1390. pub unsafe fn instr16_0F49_reg(r1: i32, r: i32) { cmovcc16(!test_s(), read_reg16(r1), r); }
  1391. pub unsafe fn instr32_0F49_mem(addr: i32, r: i32) {
  1392. cmovcc32(!test_s(), return_on_pagefault!(safe_read32s(addr)), r);
  1393. }
  1394. pub unsafe fn instr32_0F49_reg(r1: i32, r: i32) { cmovcc32(!test_s(), read_reg32(r1), r); }
  1395. pub unsafe fn instr16_0F4A_mem(addr: i32, r: i32) {
  1396. cmovcc16(test_p(), return_on_pagefault!(safe_read16(addr)), r);
  1397. }
  1398. pub unsafe fn instr16_0F4A_reg(r1: i32, r: i32) { cmovcc16(test_p(), read_reg16(r1), r); }
  1399. pub unsafe fn instr32_0F4A_mem(addr: i32, r: i32) {
  1400. cmovcc32(test_p(), return_on_pagefault!(safe_read32s(addr)), r);
  1401. }
  1402. pub unsafe fn instr32_0F4A_reg(r1: i32, r: i32) { cmovcc32(test_p(), read_reg32(r1), r); }
  1403. pub unsafe fn instr16_0F4B_mem(addr: i32, r: i32) {
  1404. cmovcc16(!test_p(), return_on_pagefault!(safe_read16(addr)), r);
  1405. }
  1406. pub unsafe fn instr16_0F4B_reg(r1: i32, r: i32) { cmovcc16(!test_p(), read_reg16(r1), r); }
  1407. pub unsafe fn instr32_0F4B_mem(addr: i32, r: i32) {
  1408. cmovcc32(!test_p(), return_on_pagefault!(safe_read32s(addr)), r);
  1409. }
  1410. pub unsafe fn instr32_0F4B_reg(r1: i32, r: i32) { cmovcc32(!test_p(), read_reg32(r1), r); }
  1411. pub unsafe fn instr16_0F4C_mem(addr: i32, r: i32) {
  1412. cmovcc16(test_l(), return_on_pagefault!(safe_read16(addr)), r);
  1413. }
  1414. pub unsafe fn instr16_0F4C_reg(r1: i32, r: i32) { cmovcc16(test_l(), read_reg16(r1), r); }
  1415. pub unsafe fn instr32_0F4C_mem(addr: i32, r: i32) {
  1416. cmovcc32(test_l(), return_on_pagefault!(safe_read32s(addr)), r);
  1417. }
  1418. pub unsafe fn instr32_0F4C_reg(r1: i32, r: i32) { cmovcc32(test_l(), read_reg32(r1), r); }
  1419. pub unsafe fn instr16_0F4D_mem(addr: i32, r: i32) {
  1420. cmovcc16(!test_l(), return_on_pagefault!(safe_read16(addr)), r);
  1421. }
  1422. pub unsafe fn instr16_0F4D_reg(r1: i32, r: i32) { cmovcc16(!test_l(), read_reg16(r1), r); }
  1423. pub unsafe fn instr32_0F4D_mem(addr: i32, r: i32) {
  1424. cmovcc32(!test_l(), return_on_pagefault!(safe_read32s(addr)), r);
  1425. }
  1426. pub unsafe fn instr32_0F4D_reg(r1: i32, r: i32) { cmovcc32(!test_l(), read_reg32(r1), r); }
  1427. pub unsafe fn instr16_0F4E_mem(addr: i32, r: i32) {
  1428. cmovcc16(test_le(), return_on_pagefault!(safe_read16(addr)), r);
  1429. }
  1430. pub unsafe fn instr16_0F4E_reg(r1: i32, r: i32) { cmovcc16(test_le(), read_reg16(r1), r); }
  1431. pub unsafe fn instr32_0F4E_mem(addr: i32, r: i32) {
  1432. cmovcc32(test_le(), return_on_pagefault!(safe_read32s(addr)), r);
  1433. }
  1434. pub unsafe fn instr32_0F4E_reg(r1: i32, r: i32) { cmovcc32(test_le(), read_reg32(r1), r); }
  1435. pub unsafe fn instr16_0F4F_mem(addr: i32, r: i32) {
  1436. cmovcc16(!test_le(), return_on_pagefault!(safe_read16(addr)), r);
  1437. }
  1438. pub unsafe fn instr16_0F4F_reg(r1: i32, r: i32) { cmovcc16(!test_le(), read_reg16(r1), r); }
  1439. pub unsafe fn instr32_0F4F_mem(addr: i32, r: i32) {
  1440. cmovcc32(!test_le(), return_on_pagefault!(safe_read32s(addr)), r);
  1441. }
  1442. pub unsafe fn instr32_0F4F_reg(r1: i32, r: i32) { cmovcc32(!test_le(), read_reg32(r1), r); }
  1443. #[no_mangle]
  1444. pub unsafe fn instr_0F50_reg(r1: i32, r2: i32) {
  1445. // movmskps r, xmm
  1446. let source = read_xmm128s(r1);
  1447. let data = (source.u32[0] >> 31
  1448. | source.u32[1] >> 31 << 1
  1449. | source.u32[2] >> 31 << 2
  1450. | source.u32[3] >> 31 << 3) as i32;
  1451. write_reg32(r2, data);
  1452. }
  1453. #[no_mangle]
  1454. pub unsafe fn instr_0F50_mem(_addr: i32, _r1: i32) { trigger_ud(); }
  1455. #[no_mangle]
  1456. pub unsafe fn instr_660F50_reg(r1: i32, r2: i32) {
  1457. // movmskpd r, xmm
  1458. let source = read_xmm128s(r1);
  1459. let data = (source.u32[1] >> 31 | source.u32[3] >> 31 << 1) as i32;
  1460. write_reg32(r2, data);
  1461. }
  1462. #[no_mangle]
  1463. pub unsafe fn instr_660F50_mem(_addr: i32, _r1: i32) { trigger_ud(); }
  1464. #[no_mangle]
  1465. pub unsafe fn instr_0F51(source: reg128, r: i32) {
  1466. // sqrtps xmm, xmm/mem128
  1467. // XXX: Should round according to round control
  1468. let result = reg128 {
  1469. f32: [
  1470. source.f32[0].sqrt(),
  1471. source.f32[1].sqrt(),
  1472. source.f32[2].sqrt(),
  1473. source.f32[3].sqrt(),
  1474. ],
  1475. };
  1476. write_xmm_reg128(r, result);
  1477. }
  1478. pub unsafe fn instr_0F51_reg(r1: i32, r2: i32) { instr_0F51(read_xmm128s(r1), r2); }
  1479. pub unsafe fn instr_0F51_mem(addr: i32, r: i32) {
  1480. instr_0F51(return_on_pagefault!(safe_read128s(addr)), r);
  1481. }
  1482. #[no_mangle]
  1483. pub unsafe fn instr_660F51(source: reg128, r: i32) {
  1484. // sqrtpd xmm, xmm/mem128
  1485. // XXX: Should round according to round control
  1486. let result = reg128 {
  1487. f64: [source.f64[0].sqrt(), source.f64[1].sqrt()],
  1488. };
  1489. write_xmm_reg128(r, result);
  1490. }
  1491. pub unsafe fn instr_660F51_reg(r1: i32, r2: i32) { instr_660F51(read_xmm128s(r1), r2); }
  1492. pub unsafe fn instr_660F51_mem(addr: i32, r: i32) {
  1493. instr_660F51(return_on_pagefault!(safe_read128s(addr)), r);
  1494. }
  1495. #[no_mangle]
  1496. pub unsafe fn instr_F20F51(source: u64, r: i32) {
  1497. // sqrtsd xmm, xmm/mem64
  1498. // XXX: Should round according to round control
  1499. write_xmm_f64(r, f64::from_bits(source).sqrt());
  1500. }
  1501. pub unsafe fn instr_F20F51_reg(r1: i32, r2: i32) { instr_F20F51(read_xmm64s(r1), r2); }
  1502. pub unsafe fn instr_F20F51_mem(addr: i32, r: i32) {
  1503. instr_F20F51(return_on_pagefault!(safe_read64s(addr)), r);
  1504. }
  1505. #[no_mangle]
  1506. pub unsafe fn instr_F30F51(source: f32, r: i32) {
  1507. // sqrtss xmm, xmm/mem32
  1508. // XXX: Should round according to round control
  1509. write_xmm_f32(r, source.sqrt());
  1510. }
  1511. pub unsafe fn instr_F30F51_reg(r1: i32, r2: i32) { instr_F30F51(read_xmm_f32(r1), r2); }
  1512. pub unsafe fn instr_F30F51_mem(addr: i32, r: i32) {
  1513. instr_F30F51(return_on_pagefault!(safe_read_f32(addr)), r);
  1514. }
  1515. #[no_mangle]
  1516. pub unsafe fn instr_0F52(source: reg128, r: i32) {
  1517. // rcpps xmm1, xmm2/m128
  1518. let result = reg128 {
  1519. f32: [
  1520. 1.0 / source.f32[0].sqrt(),
  1521. 1.0 / source.f32[1].sqrt(),
  1522. 1.0 / source.f32[2].sqrt(),
  1523. 1.0 / source.f32[3].sqrt(),
  1524. ],
  1525. };
  1526. write_xmm_reg128(r, result);
  1527. }
  1528. pub unsafe fn instr_0F52_reg(r1: i32, r2: i32) { instr_0F52(read_xmm128s(r1), r2); }
  1529. pub unsafe fn instr_0F52_mem(addr: i32, r: i32) {
  1530. instr_0F52(return_on_pagefault!(safe_read128s(addr)), r);
  1531. }
  1532. #[no_mangle]
  1533. pub unsafe fn instr_F30F52(source: f32, r: i32) {
  1534. // rsqrtss xmm1, xmm2/m32
  1535. write_xmm_f32(r, 1.0 / source.sqrt());
  1536. }
  1537. pub unsafe fn instr_F30F52_reg(r1: i32, r2: i32) { instr_F30F52(read_xmm_f32(r1), r2); }
  1538. pub unsafe fn instr_F30F52_mem(addr: i32, r: i32) {
  1539. instr_F30F52(return_on_pagefault!(safe_read_f32(addr)), r);
  1540. }
  1541. #[no_mangle]
  1542. pub unsafe fn instr_0F53(source: reg128, r: i32) {
  1543. // rcpps xmm, xmm/m128
  1544. let result = reg128 {
  1545. f32: [
  1546. 1.0 / source.f32[0],
  1547. 1.0 / source.f32[1],
  1548. 1.0 / source.f32[2],
  1549. 1.0 / source.f32[3],
  1550. ],
  1551. };
  1552. write_xmm_reg128(r, result);
  1553. }
  1554. pub unsafe fn instr_0F53_reg(r1: i32, r2: i32) { instr_0F53(read_xmm128s(r1), r2); }
  1555. pub unsafe fn instr_0F53_mem(addr: i32, r: i32) {
  1556. instr_0F53(return_on_pagefault!(safe_read128s(addr)), r);
  1557. }
  1558. #[no_mangle]
  1559. pub unsafe fn instr_F30F53(source: f32, r: i32) {
  1560. // rcpss xmm, xmm/m32
  1561. write_xmm_f32(r, 1.0 / source);
  1562. }
  1563. pub unsafe fn instr_F30F53_reg(r1: i32, r2: i32) { instr_F30F53(read_xmm_f32(r1), r2); }
  1564. pub unsafe fn instr_F30F53_mem(addr: i32, r: i32) {
  1565. instr_F30F53(return_on_pagefault!(safe_read_f32(addr)), r);
  1566. }
  1567. #[no_mangle]
  1568. pub unsafe fn instr_0F54(source: reg128, r: i32) {
  1569. // andps xmm, xmm/mem128
  1570. // XXX: Aligned access or #gp
  1571. pand_r128(source, r);
  1572. }
  1573. pub unsafe fn instr_0F54_reg(r1: i32, r2: i32) { instr_0F54(read_xmm128s(r1), r2); }
  1574. pub unsafe fn instr_0F54_mem(addr: i32, r: i32) {
  1575. instr_0F54(return_on_pagefault!(safe_read128s(addr)), r);
  1576. }
  1577. #[no_mangle]
  1578. pub unsafe fn instr_660F54(source: reg128, r: i32) {
  1579. // andpd xmm, xmm/mem128
  1580. // XXX: Aligned access or #gp
  1581. pand_r128(source, r);
  1582. }
  1583. pub unsafe fn instr_660F54_reg(r1: i32, r2: i32) { instr_660F54(read_xmm128s(r1), r2); }
  1584. pub unsafe fn instr_660F54_mem(addr: i32, r: i32) {
  1585. instr_660F54(return_on_pagefault!(safe_read128s(addr)), r);
  1586. }
  1587. #[no_mangle]
  1588. pub unsafe fn instr_0F55(source: reg128, r: i32) {
  1589. // andnps xmm, xmm/mem128
  1590. // XXX: Aligned access or #gp
  1591. pandn_r128(source, r);
  1592. }
  1593. pub unsafe fn instr_0F55_reg(r1: i32, r2: i32) { instr_0F55(read_xmm128s(r1), r2); }
  1594. pub unsafe fn instr_0F55_mem(addr: i32, r: i32) {
  1595. instr_0F55(return_on_pagefault!(safe_read128s(addr)), r);
  1596. }
  1597. #[no_mangle]
  1598. pub unsafe fn instr_660F55(source: reg128, r: i32) {
  1599. // andnpd xmm, xmm/mem128
  1600. // XXX: Aligned access or #gp
  1601. pandn_r128(source, r);
  1602. }
  1603. pub unsafe fn instr_660F55_reg(r1: i32, r2: i32) { instr_660F55(read_xmm128s(r1), r2); }
  1604. pub unsafe fn instr_660F55_mem(addr: i32, r: i32) {
  1605. instr_660F55(return_on_pagefault!(safe_read128s(addr)), r);
  1606. }
  1607. #[no_mangle]
  1608. pub unsafe fn instr_0F56(source: reg128, r: i32) {
  1609. // orps xmm, xmm/mem128
  1610. // XXX: Aligned access or #gp
  1611. por_r128(source, r);
  1612. }
  1613. pub unsafe fn instr_0F56_reg(r1: i32, r2: i32) { instr_0F56(read_xmm128s(r1), r2); }
  1614. pub unsafe fn instr_0F56_mem(addr: i32, r: i32) {
  1615. instr_0F56(return_on_pagefault!(safe_read128s(addr)), r);
  1616. }
  1617. #[no_mangle]
  1618. pub unsafe fn instr_660F56(source: reg128, r: i32) {
  1619. // orpd xmm, xmm/mem128
  1620. // XXX: Aligned access or #gp
  1621. por_r128(source, r);
  1622. }
  1623. pub unsafe fn instr_660F56_reg(r1: i32, r2: i32) { instr_660F56(read_xmm128s(r1), r2); }
  1624. pub unsafe fn instr_660F56_mem(addr: i32, r: i32) {
  1625. instr_660F56(return_on_pagefault!(safe_read128s(addr)), r);
  1626. }
  1627. #[no_mangle]
  1628. pub unsafe fn instr_0F57(source: reg128, r: i32) {
  1629. // xorps xmm, xmm/mem128
  1630. // XXX: Aligned access or #gp
  1631. pxor_r128(source, r);
  1632. }
  1633. pub unsafe fn instr_0F57_reg(r1: i32, r2: i32) { instr_0F57(read_xmm128s(r1), r2); }
  1634. pub unsafe fn instr_0F57_mem(addr: i32, r: i32) {
  1635. instr_0F57(return_on_pagefault!(safe_read128s(addr)), r);
  1636. }
  1637. #[no_mangle]
  1638. pub unsafe fn instr_660F57(source: reg128, r: i32) {
  1639. // xorpd xmm, xmm/mem128
  1640. // XXX: Aligned access or #gp
  1641. pxor_r128(source, r);
  1642. }
  1643. pub unsafe fn instr_660F57_reg(r1: i32, r2: i32) { instr_660F57(read_xmm128s(r1), r2); }
  1644. pub unsafe fn instr_660F57_mem(addr: i32, r: i32) {
  1645. instr_660F57(return_on_pagefault!(safe_read128s(addr)), r);
  1646. }
  1647. #[no_mangle]
  1648. pub unsafe fn instr_0F58(source: reg128, r: i32) {
  1649. // addps xmm, xmm/mem128
  1650. let destination = read_xmm128s(r);
  1651. let result = reg128 {
  1652. f32: [
  1653. source.f32[0] + destination.f32[0],
  1654. source.f32[1] + destination.f32[1],
  1655. source.f32[2] + destination.f32[2],
  1656. source.f32[3] + destination.f32[3],
  1657. ],
  1658. };
  1659. write_xmm_reg128(r, result);
  1660. }
  1661. pub unsafe fn instr_0F58_reg(r1: i32, r2: i32) { instr_0F58(read_xmm128s(r1), r2); }
  1662. pub unsafe fn instr_0F58_mem(addr: i32, r: i32) {
  1663. instr_0F58(return_on_pagefault!(safe_read128s(addr)), r);
  1664. }
  1665. #[no_mangle]
  1666. pub unsafe fn instr_660F58(source: reg128, r: i32) {
  1667. // addpd xmm, xmm/mem128
  1668. let destination = read_xmm128s(r);
  1669. let result = reg128 {
  1670. f64: [
  1671. source.f64[0] + destination.f64[0],
  1672. source.f64[1] + destination.f64[1],
  1673. ],
  1674. };
  1675. write_xmm_reg128(r, result);
  1676. }
  1677. pub unsafe fn instr_660F58_reg(r1: i32, r2: i32) { instr_660F58(read_xmm128s(r1), r2); }
  1678. pub unsafe fn instr_660F58_mem(addr: i32, r: i32) {
  1679. instr_660F58(return_on_pagefault!(safe_read128s(addr)), r);
  1680. }
  1681. #[no_mangle]
  1682. pub unsafe fn instr_F20F58(source: u64, r: i32) {
  1683. // addsd xmm, xmm/mem64
  1684. let destination = read_xmm64s(r);
  1685. write_xmm_f64(r, f64::from_bits(source) + f64::from_bits(destination));
  1686. }
  1687. pub unsafe fn instr_F20F58_reg(r1: i32, r2: i32) { instr_F20F58(read_xmm64s(r1), r2); }
  1688. pub unsafe fn instr_F20F58_mem(addr: i32, r: i32) {
  1689. instr_F20F58(return_on_pagefault!(safe_read64s(addr)), r);
  1690. }
  1691. #[no_mangle]
  1692. pub unsafe fn instr_F30F58(source: f32, r: i32) {
  1693. // addss xmm, xmm/mem32
  1694. let destination = read_xmm_f32(r);
  1695. let result = source + destination;
  1696. write_xmm_f32(r, result);
  1697. }
  1698. pub unsafe fn instr_F30F58_reg(r1: i32, r2: i32) { instr_F30F58(read_xmm_f32(r1), r2); }
  1699. pub unsafe fn instr_F30F58_mem(addr: i32, r: i32) {
  1700. instr_F30F58(return_on_pagefault!(safe_read_f32(addr)), r);
  1701. }
  1702. #[no_mangle]
  1703. pub unsafe fn instr_0F59(source: reg128, r: i32) {
  1704. // mulps xmm, xmm/mem128
  1705. let destination = read_xmm128s(r);
  1706. let result = reg128 {
  1707. f32: [
  1708. source.f32[0] * destination.f32[0],
  1709. source.f32[1] * destination.f32[1],
  1710. source.f32[2] * destination.f32[2],
  1711. source.f32[3] * destination.f32[3],
  1712. ],
  1713. };
  1714. write_xmm_reg128(r, result);
  1715. }
  1716. pub unsafe fn instr_0F59_reg(r1: i32, r2: i32) { instr_0F59(read_xmm128s(r1), r2); }
  1717. pub unsafe fn instr_0F59_mem(addr: i32, r: i32) {
  1718. instr_0F59(return_on_pagefault!(safe_read128s(addr)), r);
  1719. }
  1720. #[no_mangle]
  1721. pub unsafe fn instr_660F59(source: reg128, r: i32) {
  1722. // mulpd xmm, xmm/mem128
  1723. let destination = read_xmm128s(r);
  1724. let result = reg128 {
  1725. f64: [
  1726. source.f64[0] * destination.f64[0],
  1727. source.f64[1] * destination.f64[1],
  1728. ],
  1729. };
  1730. write_xmm_reg128(r, result);
  1731. }
  1732. pub unsafe fn instr_660F59_reg(r1: i32, r2: i32) { instr_660F59(read_xmm128s(r1), r2); }
  1733. pub unsafe fn instr_660F59_mem(addr: i32, r: i32) {
  1734. instr_660F59(return_on_pagefault!(safe_read128s(addr)), r);
  1735. }
  1736. #[no_mangle]
  1737. pub unsafe fn instr_F20F59(source: u64, r: i32) {
  1738. // mulsd xmm, xmm/mem64
  1739. let destination = read_xmm64s(r);
  1740. write_xmm_f64(r, f64::from_bits(source) * f64::from_bits(destination));
  1741. }
  1742. pub unsafe fn instr_F20F59_reg(r1: i32, r2: i32) { instr_F20F59(read_xmm64s(r1), r2); }
  1743. pub unsafe fn instr_F20F59_mem(addr: i32, r: i32) {
  1744. instr_F20F59(return_on_pagefault!(safe_read64s(addr)), r);
  1745. }
  1746. #[no_mangle]
  1747. pub unsafe fn instr_F30F59(source: f32, r: i32) {
  1748. // mulss xmm, xmm/mem32
  1749. let destination = read_xmm_f32(r);
  1750. let result = source * destination;
  1751. write_xmm_f32(r, result);
  1752. }
  1753. pub unsafe fn instr_F30F59_reg(r1: i32, r2: i32) { instr_F30F59(read_xmm_f32(r1), r2); }
  1754. pub unsafe fn instr_F30F59_mem(addr: i32, r: i32) {
  1755. instr_F30F59(return_on_pagefault!(safe_read_f32(addr)), r);
  1756. }
  1757. #[no_mangle]
  1758. pub unsafe fn instr_0F5A(source: u64, r: i32) {
  1759. // cvtps2pd xmm1, xmm2/m64
  1760. let source: [f32; 2] = std::mem::transmute(source);
  1761. let result = reg128 {
  1762. f64: [source[0] as f64, source[1] as f64],
  1763. };
  1764. write_xmm_reg128(r, result);
  1765. }
  1766. pub unsafe fn instr_0F5A_reg(r1: i32, r2: i32) { instr_0F5A(read_xmm64s(r1), r2); }
  1767. pub unsafe fn instr_0F5A_mem(addr: i32, r: i32) {
  1768. instr_0F5A(return_on_pagefault!(safe_read64s(addr)), r);
  1769. }
  1770. #[no_mangle]
  1771. pub unsafe fn instr_660F5A(source: reg128, r: i32) {
  1772. // cvtpd2ps xmm1, xmm2/m128
  1773. let result = reg128 {
  1774. // XXX: These conversions are lossy and should round according to the round control
  1775. f32: [source.f64[0] as f32, source.f64[1] as f32, 0., 0.],
  1776. };
  1777. write_xmm_reg128(r, result);
  1778. }
  1779. pub unsafe fn instr_660F5A_reg(r1: i32, r2: i32) { instr_660F5A(read_xmm128s(r1), r2); }
  1780. pub unsafe fn instr_660F5A_mem(addr: i32, r: i32) {
  1781. instr_660F5A(return_on_pagefault!(safe_read128s(addr)), r);
  1782. }
  1783. #[no_mangle]
  1784. pub unsafe fn instr_F20F5A(source: u64, r: i32) {
  1785. // cvtsd2ss xmm1, xmm2/m64
  1786. // XXX: This conversions is lossy and should round according to the round control
  1787. write_xmm_f32(r, f64::from_bits(source) as f32);
  1788. }
  1789. pub unsafe fn instr_F20F5A_reg(r1: i32, r2: i32) { instr_F20F5A(read_xmm64s(r1), r2); }
  1790. pub unsafe fn instr_F20F5A_mem(addr: i32, r: i32) {
  1791. instr_F20F5A(return_on_pagefault!(safe_read64s(addr)), r);
  1792. }
  1793. #[no_mangle]
  1794. pub unsafe fn instr_F30F5A(source: f32, r: i32) {
  1795. // cvtss2sd xmm1, xmm2/m32
  1796. write_xmm_f64(r, source as f64);
  1797. }
  1798. pub unsafe fn instr_F30F5A_reg(r1: i32, r2: i32) { instr_F30F5A(read_xmm_f32(r1), r2); }
  1799. pub unsafe fn instr_F30F5A_mem(addr: i32, r: i32) {
  1800. instr_F30F5A(return_on_pagefault!(safe_read_f32(addr)), r);
  1801. }
  1802. #[no_mangle]
  1803. pub unsafe fn instr_0F5B(source: reg128, r: i32) {
  1804. // cvtdq2ps xmm1, xmm2/m128
  1805. // XXX: Should round according to round control
  1806. let result = reg128 {
  1807. f32: [
  1808. // XXX: Precision exception
  1809. source.i32[0] as f32,
  1810. source.i32[1] as f32,
  1811. source.i32[2] as f32,
  1812. source.i32[3] as f32,
  1813. ],
  1814. };
  1815. write_xmm_reg128(r, result);
  1816. }
  1817. pub unsafe fn instr_0F5B_reg(r1: i32, r2: i32) { instr_0F5B(read_xmm128s(r1), r2); }
  1818. pub unsafe fn instr_0F5B_mem(addr: i32, r: i32) {
  1819. instr_0F5B(return_on_pagefault!(safe_read128s(addr)), r);
  1820. }
  1821. #[no_mangle]
  1822. pub unsafe fn instr_660F5B(source: reg128, r: i32) {
  1823. // cvtps2dq xmm1, xmm2/m128
  1824. let result = reg128 {
  1825. i32: [
  1826. // XXX: Precision exception
  1827. sse_convert_f32_to_i32(source.f32[0]),
  1828. sse_convert_f32_to_i32(source.f32[1]),
  1829. sse_convert_f32_to_i32(source.f32[2]),
  1830. sse_convert_f32_to_i32(source.f32[3]),
  1831. ],
  1832. };
  1833. write_xmm_reg128(r, result);
  1834. }
  1835. pub unsafe fn instr_660F5B_reg(r1: i32, r2: i32) { instr_660F5B(read_xmm128s(r1), r2); }
  1836. pub unsafe fn instr_660F5B_mem(addr: i32, r: i32) {
  1837. instr_660F5B(return_on_pagefault!(safe_read128s(addr)), r);
  1838. }
  1839. #[no_mangle]
  1840. pub unsafe fn instr_F30F5B(source: reg128, r: i32) {
  1841. // cvttps2dq xmm1, xmm2/m128
  1842. let result = reg128 {
  1843. i32: [
  1844. sse_convert_with_truncation_f32_to_i32(source.f32[0]),
  1845. sse_convert_with_truncation_f32_to_i32(source.f32[1]),
  1846. sse_convert_with_truncation_f32_to_i32(source.f32[2]),
  1847. sse_convert_with_truncation_f32_to_i32(source.f32[3]),
  1848. ],
  1849. };
  1850. write_xmm_reg128(r, result);
  1851. }
  1852. pub unsafe fn instr_F30F5B_reg(r1: i32, r2: i32) { instr_F30F5B(read_xmm128s(r1), r2); }
  1853. pub unsafe fn instr_F30F5B_mem(addr: i32, r: i32) {
  1854. instr_F30F5B(return_on_pagefault!(safe_read128s(addr)), r);
  1855. }
  1856. #[no_mangle]
  1857. pub unsafe fn instr_0F5C(source: reg128, r: i32) {
  1858. // subps xmm, xmm/mem128
  1859. let destination = read_xmm128s(r);
  1860. let result = reg128 {
  1861. f32: [
  1862. destination.f32[0] - source.f32[0],
  1863. destination.f32[1] - source.f32[1],
  1864. destination.f32[2] - source.f32[2],
  1865. destination.f32[3] - source.f32[3],
  1866. ],
  1867. };
  1868. write_xmm_reg128(r, result);
  1869. }
  1870. pub unsafe fn instr_0F5C_reg(r1: i32, r2: i32) { instr_0F5C(read_xmm128s(r1), r2); }
  1871. pub unsafe fn instr_0F5C_mem(addr: i32, r: i32) {
  1872. instr_0F5C(return_on_pagefault!(safe_read128s(addr)), r);
  1873. }
  1874. #[no_mangle]
  1875. pub unsafe fn instr_660F5C(source: reg128, r: i32) {
  1876. // subpd xmm, xmm/mem128
  1877. let destination = read_xmm128s(r);
  1878. let result = reg128 {
  1879. f64: [
  1880. destination.f64[0] - source.f64[0],
  1881. destination.f64[1] - source.f64[1],
  1882. ],
  1883. };
  1884. write_xmm_reg128(r, result);
  1885. }
  1886. pub unsafe fn instr_660F5C_reg(r1: i32, r2: i32) { instr_660F5C(read_xmm128s(r1), r2); }
  1887. pub unsafe fn instr_660F5C_mem(addr: i32, r: i32) {
  1888. instr_660F5C(return_on_pagefault!(safe_read128s(addr)), r);
  1889. }
  1890. #[no_mangle]
  1891. pub unsafe fn instr_F20F5C(source: u64, r: i32) {
  1892. // subsd xmm, xmm/mem64
  1893. let destination = read_xmm64s(r);
  1894. write_xmm_f64(r, f64::from_bits(destination) - f64::from_bits(source));
  1895. }
  1896. pub unsafe fn instr_F20F5C_reg(r1: i32, r2: i32) { instr_F20F5C(read_xmm64s(r1), r2); }
  1897. pub unsafe fn instr_F20F5C_mem(addr: i32, r: i32) {
  1898. instr_F20F5C(return_on_pagefault!(safe_read64s(addr)), r);
  1899. }
  1900. #[no_mangle]
  1901. pub unsafe fn instr_F30F5C(source: f32, r: i32) {
  1902. // subss xmm, xmm/mem32
  1903. let destination = read_xmm_f32(r);
  1904. let result = destination - source;
  1905. write_xmm_f32(r, result);
  1906. }
  1907. pub unsafe fn instr_F30F5C_reg(r1: i32, r2: i32) { instr_F30F5C(read_xmm_f32(r1), r2); }
  1908. pub unsafe fn instr_F30F5C_mem(addr: i32, r: i32) {
  1909. instr_F30F5C(return_on_pagefault!(safe_read_f32(addr)), r);
  1910. }
  1911. #[no_mangle]
  1912. pub unsafe fn instr_0F5D(source: reg128, r: i32) {
  1913. // minps xmm, xmm/mem128
  1914. let destination = read_xmm128s(r);
  1915. let result = reg128 {
  1916. f32: [
  1917. sse_min(destination.f32[0] as f64, source.f32[0] as f64) as f32,
  1918. sse_min(destination.f32[1] as f64, source.f32[1] as f64) as f32,
  1919. sse_min(destination.f32[2] as f64, source.f32[2] as f64) as f32,
  1920. sse_min(destination.f32[3] as f64, source.f32[3] as f64) as f32,
  1921. ],
  1922. };
  1923. write_xmm_reg128(r, result);
  1924. }
  1925. pub unsafe fn instr_0F5D_reg(r1: i32, r2: i32) { instr_0F5D(read_xmm128s(r1), r2); }
  1926. pub unsafe fn instr_0F5D_mem(addr: i32, r: i32) {
  1927. instr_0F5D(return_on_pagefault!(safe_read128s(addr)), r);
  1928. }
  1929. #[no_mangle]
  1930. pub unsafe fn instr_660F5D(source: reg128, r: i32) {
  1931. // minpd xmm, xmm/mem128
  1932. let destination = read_xmm128s(r);
  1933. let result = reg128 {
  1934. f64: [
  1935. sse_min(destination.f64[0], source.f64[0]),
  1936. sse_min(destination.f64[1], source.f64[1]),
  1937. ],
  1938. };
  1939. write_xmm_reg128(r, result);
  1940. }
  1941. pub unsafe fn instr_660F5D_reg(r1: i32, r2: i32) { instr_660F5D(read_xmm128s(r1), r2); }
  1942. pub unsafe fn instr_660F5D_mem(addr: i32, r: i32) {
  1943. instr_660F5D(return_on_pagefault!(safe_read128s(addr)), r);
  1944. }
  1945. #[no_mangle]
  1946. pub unsafe fn instr_F20F5D(source: u64, r: i32) {
  1947. // minsd xmm, xmm/mem64
  1948. let destination = read_xmm64s(r);
  1949. write_xmm_f64(
  1950. r,
  1951. sse_min(f64::from_bits(destination), f64::from_bits(source)),
  1952. );
  1953. }
  1954. pub unsafe fn instr_F20F5D_reg(r1: i32, r2: i32) { instr_F20F5D(read_xmm64s(r1), r2); }
  1955. pub unsafe fn instr_F20F5D_mem(addr: i32, r: i32) {
  1956. instr_F20F5D(return_on_pagefault!(safe_read64s(addr)), r);
  1957. }
  1958. #[no_mangle]
  1959. pub unsafe fn instr_F30F5D(source: f32, r: i32) {
  1960. // minss xmm, xmm/mem32
  1961. let destination = read_xmm_f32(r);
  1962. let result = sse_min(destination as f64, source as f64) as f32;
  1963. write_xmm_f32(r, result);
  1964. }
  1965. pub unsafe fn instr_F30F5D_reg(r1: i32, r2: i32) { instr_F30F5D(read_xmm_f32(r1), r2); }
  1966. pub unsafe fn instr_F30F5D_mem(addr: i32, r: i32) {
  1967. instr_F30F5D(return_on_pagefault!(safe_read_f32(addr)), r);
  1968. }
  1969. #[no_mangle]
  1970. pub unsafe fn instr_0F5E(source: reg128, r: i32) {
  1971. // divps xmm, xmm/mem128
  1972. let destination = read_xmm128s(r);
  1973. let result = reg128 {
  1974. f32: [
  1975. destination.f32[0] / source.f32[0],
  1976. destination.f32[1] / source.f32[1],
  1977. destination.f32[2] / source.f32[2],
  1978. destination.f32[3] / source.f32[3],
  1979. ],
  1980. };
  1981. write_xmm_reg128(r, result);
  1982. }
  1983. pub unsafe fn instr_0F5E_reg(r1: i32, r2: i32) { instr_0F5E(read_xmm128s(r1), r2); }
  1984. pub unsafe fn instr_0F5E_mem(addr: i32, r: i32) {
  1985. instr_0F5E(return_on_pagefault!(safe_read128s(addr)), r);
  1986. }
  1987. #[no_mangle]
  1988. pub unsafe fn instr_660F5E(source: reg128, r: i32) {
  1989. // divpd xmm, xmm/mem128
  1990. let destination = read_xmm128s(r);
  1991. let result = reg128 {
  1992. f64: [
  1993. destination.f64[0] / source.f64[0],
  1994. destination.f64[1] / source.f64[1],
  1995. ],
  1996. };
  1997. write_xmm_reg128(r, result);
  1998. }
  1999. pub unsafe fn instr_660F5E_reg(r1: i32, r2: i32) { instr_660F5E(read_xmm128s(r1), r2); }
  2000. pub unsafe fn instr_660F5E_mem(addr: i32, r: i32) {
  2001. instr_660F5E(return_on_pagefault!(safe_read128s(addr)), r);
  2002. }
  2003. #[no_mangle]
  2004. pub unsafe fn instr_F20F5E(source: u64, r: i32) {
  2005. // divsd xmm, xmm/mem64
  2006. let destination = read_xmm64s(r);
  2007. write_xmm_f64(r, f64::from_bits(destination) / f64::from_bits(source));
  2008. }
  2009. pub unsafe fn instr_F20F5E_reg(r1: i32, r2: i32) { instr_F20F5E(read_xmm64s(r1), r2); }
  2010. pub unsafe fn instr_F20F5E_mem(addr: i32, r: i32) {
  2011. instr_F20F5E(return_on_pagefault!(safe_read64s(addr)), r);
  2012. }
  2013. #[no_mangle]
  2014. pub unsafe fn instr_F30F5E(source: f32, r: i32) {
  2015. // divss xmm, xmm/mem32
  2016. let destination = read_xmm_f32(r);
  2017. let result = destination / source;
  2018. write_xmm_f32(r, result);
  2019. }
  2020. pub unsafe fn instr_F30F5E_reg(r1: i32, r2: i32) { instr_F30F5E(read_xmm_f32(r1), r2); }
  2021. pub unsafe fn instr_F30F5E_mem(addr: i32, r: i32) {
  2022. instr_F30F5E(return_on_pagefault!(safe_read_f32(addr)), r);
  2023. }
  2024. #[no_mangle]
  2025. pub unsafe fn instr_0F5F(source: reg128, r: i32) {
  2026. // maxps xmm, xmm/mem128
  2027. let destination = read_xmm128s(r);
  2028. let result = reg128 {
  2029. f32: [
  2030. sse_max(destination.f32[0] as f64, source.f32[0] as f64) as f32,
  2031. sse_max(destination.f32[1] as f64, source.f32[1] as f64) as f32,
  2032. sse_max(destination.f32[2] as f64, source.f32[2] as f64) as f32,
  2033. sse_max(destination.f32[3] as f64, source.f32[3] as f64) as f32,
  2034. ],
  2035. };
  2036. write_xmm_reg128(r, result);
  2037. }
  2038. pub unsafe fn instr_0F5F_reg(r1: i32, r2: i32) { instr_0F5F(read_xmm128s(r1), r2); }
  2039. pub unsafe fn instr_0F5F_mem(addr: i32, r: i32) {
  2040. instr_0F5F(return_on_pagefault!(safe_read128s(addr)), r);
  2041. }
  2042. #[no_mangle]
  2043. pub unsafe fn instr_660F5F(source: reg128, r: i32) {
  2044. // maxpd xmm, xmm/mem128
  2045. let destination = read_xmm128s(r);
  2046. let result = reg128 {
  2047. f64: [
  2048. sse_max(destination.f64[0], source.f64[0]),
  2049. sse_max(destination.f64[1], source.f64[1]),
  2050. ],
  2051. };
  2052. write_xmm_reg128(r, result);
  2053. }
  2054. pub unsafe fn instr_660F5F_reg(r1: i32, r2: i32) { instr_660F5F(read_xmm128s(r1), r2); }
  2055. pub unsafe fn instr_660F5F_mem(addr: i32, r: i32) {
  2056. instr_660F5F(return_on_pagefault!(safe_read128s(addr)), r);
  2057. }
  2058. #[no_mangle]
  2059. pub unsafe fn instr_F20F5F(source: u64, r: i32) {
  2060. // maxsd xmm, xmm/mem64
  2061. let destination = read_xmm64s(r);
  2062. write_xmm_f64(
  2063. r,
  2064. sse_max(f64::from_bits(destination), f64::from_bits(source)),
  2065. );
  2066. }
  2067. pub unsafe fn instr_F20F5F_reg(r1: i32, r2: i32) { instr_F20F5F(read_xmm64s(r1), r2); }
  2068. pub unsafe fn instr_F20F5F_mem(addr: i32, r: i32) {
  2069. instr_F20F5F(return_on_pagefault!(safe_read64s(addr)), r);
  2070. }
  2071. #[no_mangle]
  2072. pub unsafe fn instr_F30F5F(source: f32, r: i32) {
  2073. // maxss xmm, xmm/mem32
  2074. let destination = read_xmm_f32(r);
  2075. let result = sse_max(destination as f64, source as f64) as f32;
  2076. write_xmm_f32(r, result);
  2077. }
  2078. pub unsafe fn instr_F30F5F_reg(r1: i32, r2: i32) { instr_F30F5F(read_xmm_f32(r1), r2); }
  2079. pub unsafe fn instr_F30F5F_mem(addr: i32, r: i32) {
  2080. instr_F30F5F(return_on_pagefault!(safe_read_f32(addr)), r);
  2081. }
  2082. #[no_mangle]
  2083. pub unsafe fn instr_0F60(source: i32, r: i32) {
  2084. // punpcklbw mm, mm/m32
  2085. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  2086. let source: [u8; 4] = std::mem::transmute(source);
  2087. let mut result = [0; 8];
  2088. for i in 0..4 {
  2089. result[2 * i + 0] = destination[i];
  2090. result[2 * i + 1] = source[i];
  2091. }
  2092. write_mmx_reg64(r, std::mem::transmute(result));
  2093. transition_fpu_to_mmx();
  2094. }
  2095. pub unsafe fn instr_0F60_reg(r1: i32, r2: i32) { instr_0F60(read_mmx32s(r1), r2); }
  2096. pub unsafe fn instr_0F60_mem(addr: i32, r: i32) {
  2097. instr_0F60(return_on_pagefault!(safe_read32s(addr)), r);
  2098. }
  2099. #[no_mangle]
  2100. pub unsafe fn instr_660F60(source: reg128, r: i32) {
  2101. // punpcklbw xmm, xmm/m128
  2102. // XXX: Aligned access or #gp
  2103. let destination: [u8; 8] = std::mem::transmute(read_xmm64s(r));
  2104. let mut result = reg128 { i8: [0; 16] };
  2105. for i in 0..8 {
  2106. result.u8[2 * i + 0] = destination[i];
  2107. result.u8[2 * i + 1] = source.u8[i];
  2108. }
  2109. write_xmm_reg128(r, result);
  2110. }
  2111. pub unsafe fn instr_660F60_reg(r1: i32, r2: i32) { instr_660F60(read_xmm128s(r1), r2); }
  2112. pub unsafe fn instr_660F60_mem(addr: i32, r: i32) {
  2113. instr_660F60(return_on_pagefault!(safe_read128s(addr)), r);
  2114. }
  2115. #[no_mangle]
  2116. pub unsafe fn instr_0F61(source: i32, r: i32) {
  2117. // punpcklwd mm, mm/m32
  2118. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  2119. let source: [u16; 2] = std::mem::transmute(source);
  2120. let mut result = [0; 4];
  2121. for i in 0..2 {
  2122. result[2 * i + 0] = destination[i];
  2123. result[2 * i + 1] = source[i];
  2124. }
  2125. write_mmx_reg64(r, std::mem::transmute(result));
  2126. transition_fpu_to_mmx();
  2127. }
  2128. pub unsafe fn instr_0F61_reg(r1: i32, r2: i32) { instr_0F61(read_mmx32s(r1), r2); }
  2129. pub unsafe fn instr_0F61_mem(addr: i32, r: i32) {
  2130. instr_0F61(return_on_pagefault!(safe_read32s(addr)), r);
  2131. }
  2132. #[no_mangle]
  2133. pub unsafe fn instr_660F61(source: reg128, r: i32) {
  2134. // punpcklwd xmm, xmm/m128
  2135. // XXX: Aligned access or #gp
  2136. let destination: [u16; 4] = std::mem::transmute(read_xmm64s(r));
  2137. let mut result = reg128 { i8: [0; 16] };
  2138. for i in 0..4 {
  2139. result.u16[2 * i + 0] = destination[i];
  2140. result.u16[2 * i + 1] = source.u16[i];
  2141. }
  2142. write_xmm_reg128(r, result);
  2143. }
  2144. pub unsafe fn instr_660F61_reg(r1: i32, r2: i32) { instr_660F61(read_xmm128s(r1), r2); }
  2145. pub unsafe fn instr_660F61_mem(addr: i32, r: i32) {
  2146. instr_660F61(return_on_pagefault!(safe_read128s(addr)), r);
  2147. }
  2148. #[no_mangle]
  2149. pub unsafe fn instr_0F62(source: i32, r: i32) {
  2150. // punpckldq mm, mm/m32
  2151. let destination = read_mmx64s(r);
  2152. write_mmx_reg64(
  2153. r,
  2154. (destination & 0xFFFF_FFFF) | (source as u32 as u64) << 32,
  2155. );
  2156. transition_fpu_to_mmx();
  2157. }
  2158. pub unsafe fn instr_0F62_reg(r1: i32, r2: i32) { instr_0F62(read_mmx32s(r1), r2); }
  2159. pub unsafe fn instr_0F62_mem(addr: i32, r: i32) {
  2160. instr_0F62(return_on_pagefault!(safe_read32s(addr)), r);
  2161. }
  2162. pub unsafe fn instr_660F62(source: reg128, r: i32) {
  2163. // punpckldq xmm, xmm/m128
  2164. // XXX: Aligned access or #gp
  2165. let destination = read_xmm128s(r);
  2166. write_xmm128(
  2167. r,
  2168. destination.u32[0] as i32,
  2169. source.u32[0] as i32,
  2170. destination.u32[1] as i32,
  2171. source.u32[1] as i32,
  2172. );
  2173. }
  2174. pub unsafe fn instr_660F62_reg(r1: i32, r2: i32) { instr_660F62(read_xmm128s(r1), r2); }
  2175. pub unsafe fn instr_660F62_mem(addr: i32, r: i32) {
  2176. instr_660F62(return_on_pagefault!(safe_read128s(addr)), r);
  2177. }
  2178. #[no_mangle]
  2179. pub unsafe fn instr_0F63(source: u64, r: i32) {
  2180. // packsswb mm, mm/m64
  2181. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  2182. let source: [u16; 4] = std::mem::transmute(source);
  2183. let mut result: [u8; 8] = [0; 8];
  2184. for i in 0..4 {
  2185. result[i + 0] = saturate_sw_to_sb(destination[i] as i32);
  2186. result[i + 4] = saturate_sw_to_sb(source[i] as i32);
  2187. }
  2188. write_mmx_reg64(r, std::mem::transmute(result));
  2189. transition_fpu_to_mmx();
  2190. }
  2191. pub unsafe fn instr_0F63_reg(r1: i32, r2: i32) { instr_0F63(read_mmx64s(r1), r2); }
  2192. pub unsafe fn instr_0F63_mem(addr: i32, r: i32) {
  2193. instr_0F63(return_on_pagefault!(safe_read64s(addr)), r);
  2194. }
  2195. #[no_mangle]
  2196. pub unsafe fn instr_660F63(source: reg128, r: i32) {
  2197. // packsswb xmm, xmm/m128
  2198. // XXX: Aligned access or #gp
  2199. let destination = read_xmm128s(r);
  2200. let mut result = reg128 { i8: [0; 16] };
  2201. for i in 0..8 {
  2202. result.u8[i + 0] = saturate_sw_to_sb(destination.u16[i] as i32);
  2203. result.u8[i + 8] = saturate_sw_to_sb(source.u16[i] as i32);
  2204. }
  2205. write_xmm_reg128(r, result)
  2206. }
  2207. pub unsafe fn instr_660F63_reg(r1: i32, r2: i32) { instr_660F63(read_xmm128s(r1), r2); }
  2208. pub unsafe fn instr_660F63_mem(addr: i32, r: i32) {
  2209. instr_660F63(return_on_pagefault!(safe_read128s(addr)), r);
  2210. }
  2211. #[no_mangle]
  2212. pub unsafe fn instr_0F64(source: u64, r: i32) {
  2213. // pcmpgtb mm, mm/m64
  2214. let destination: [i8; 8] = std::mem::transmute(read_mmx64s(r));
  2215. let source: [i8; 8] = std::mem::transmute(source);
  2216. let mut result: [u8; 8] = [0; 8];
  2217. for i in 0..8 {
  2218. result[i] = if destination[i] > source[i] { 255 } else { 0 };
  2219. }
  2220. write_mmx_reg64(r, std::mem::transmute(result));
  2221. transition_fpu_to_mmx();
  2222. }
  2223. pub unsafe fn instr_0F64_reg(r1: i32, r2: i32) { instr_0F64(read_mmx64s(r1), r2); }
  2224. pub unsafe fn instr_0F64_mem(addr: i32, r: i32) {
  2225. instr_0F64(return_on_pagefault!(safe_read64s(addr)), r);
  2226. }
  2227. #[no_mangle]
  2228. pub unsafe fn instr_660F64(source: reg128, r: i32) {
  2229. // pcmpgtb xmm, xmm/m128
  2230. // XXX: Aligned access or #gp
  2231. let destination = read_xmm128s(r);
  2232. let mut result = reg128 { i8: [0; 16] };
  2233. for i in 0..16 {
  2234. result.u8[i] = if destination.i8[i] as i32 > source.i8[i] as i32 { 255 } else { 0 };
  2235. }
  2236. write_xmm_reg128(r, result);
  2237. }
  2238. pub unsafe fn instr_660F64_reg(r1: i32, r2: i32) { instr_660F64(read_xmm128s(r1), r2); }
  2239. pub unsafe fn instr_660F64_mem(addr: i32, r: i32) {
  2240. instr_660F64(return_on_pagefault!(safe_read128s(addr)), r);
  2241. }
  2242. #[no_mangle]
  2243. pub unsafe fn instr_0F65(source: u64, r: i32) {
  2244. // pcmpgtw mm, mm/m64
  2245. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  2246. let source: [i16; 4] = std::mem::transmute(source);
  2247. let mut result: [u16; 4] = [0; 4];
  2248. for i in 0..4 {
  2249. result[i] = if destination[i] > source[i] { 0xFFFF } else { 0 }
  2250. }
  2251. write_mmx_reg64(r, std::mem::transmute(result));
  2252. transition_fpu_to_mmx();
  2253. }
  2254. pub unsafe fn instr_0F65_reg(r1: i32, r2: i32) { instr_0F65(read_mmx64s(r1), r2); }
  2255. pub unsafe fn instr_0F65_mem(addr: i32, r: i32) {
  2256. instr_0F65(return_on_pagefault!(safe_read64s(addr)), r);
  2257. }
  2258. #[no_mangle]
  2259. pub unsafe fn instr_660F65(source: reg128, r: i32) {
  2260. // pcmpgtw xmm, xmm/m128
  2261. // XXX: Aligned access or #gp
  2262. let destination = read_xmm128s(r);
  2263. let mut result = reg128 { i8: [0; 16] };
  2264. for i in 0..8 {
  2265. result.u16[i] = if destination.i16[i] > source.i16[i] { 0xFFFF } else { 0 };
  2266. }
  2267. write_xmm_reg128(r, result);
  2268. }
  2269. pub unsafe fn instr_660F65_reg(r1: i32, r2: i32) { instr_660F65(read_xmm128s(r1), r2); }
  2270. pub unsafe fn instr_660F65_mem(addr: i32, r: i32) {
  2271. instr_660F65(return_on_pagefault!(safe_read128s(addr)), r);
  2272. }
  2273. #[no_mangle]
  2274. pub unsafe fn instr_0F66(source: u64, r: i32) {
  2275. // pcmpgtd mm, mm/m64
  2276. let destination: [i32; 2] = std::mem::transmute(read_mmx64s(r));
  2277. let source: [i32; 2] = std::mem::transmute(source);
  2278. let mut result = [0; 2];
  2279. for i in 0..2 {
  2280. result[i] = if destination[i] > source[i] { -1 } else { 0 }
  2281. }
  2282. write_mmx_reg64(r, std::mem::transmute(result));
  2283. transition_fpu_to_mmx();
  2284. }
  2285. pub unsafe fn instr_0F66_reg(r1: i32, r2: i32) { instr_0F66(read_mmx64s(r1), r2); }
  2286. pub unsafe fn instr_0F66_mem(addr: i32, r: i32) {
  2287. instr_0F66(return_on_pagefault!(safe_read64s(addr)), r);
  2288. }
  2289. #[no_mangle]
  2290. pub unsafe fn instr_660F66(source: reg128, r: i32) {
  2291. // pcmpgtd xmm, xmm/m128
  2292. // XXX: Aligned access or #gp
  2293. let destination = read_xmm128s(r);
  2294. write_xmm128(
  2295. r,
  2296. if destination.i32[0] > source.i32[0] { -1 } else { 0 },
  2297. if destination.i32[1] > source.i32[1] { -1 } else { 0 },
  2298. if destination.i32[2] > source.i32[2] { -1 } else { 0 },
  2299. if destination.i32[3] > source.i32[3] { -1 } else { 0 },
  2300. );
  2301. }
  2302. pub unsafe fn instr_660F66_reg(r1: i32, r2: i32) { instr_660F66(read_xmm128s(r1), r2); }
  2303. pub unsafe fn instr_660F66_mem(addr: i32, r: i32) {
  2304. instr_660F66(return_on_pagefault!(safe_read128s(addr)), r);
  2305. }
  2306. #[no_mangle]
  2307. pub unsafe fn instr_0F67(source: u64, r: i32) {
  2308. // packuswb mm, mm/m64
  2309. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  2310. let source: [u16; 4] = std::mem::transmute(source);
  2311. let mut result = [0; 8];
  2312. for i in 0..4 {
  2313. result[i + 0] = saturate_sw_to_ub(destination[i]);
  2314. result[i + 4] = saturate_sw_to_ub(source[i]);
  2315. }
  2316. write_mmx_reg64(r, std::mem::transmute(result));
  2317. transition_fpu_to_mmx();
  2318. }
  2319. pub unsafe fn instr_0F67_reg(r1: i32, r2: i32) { instr_0F67(read_mmx64s(r1), r2); }
  2320. pub unsafe fn instr_0F67_mem(addr: i32, r: i32) {
  2321. instr_0F67(return_on_pagefault!(safe_read64s(addr)), r);
  2322. }
  2323. #[no_mangle]
  2324. pub unsafe fn instr_660F67(source: reg128, r: i32) {
  2325. // packuswb xmm, xmm/m128
  2326. // XXX: Aligned access or #gp
  2327. let destination = read_xmm128s(r);
  2328. let mut result = reg128 { i8: [0; 16] };
  2329. for i in 0..8 {
  2330. result.u8[i + 0] = saturate_sw_to_ub(destination.u16[i]);
  2331. result.u8[i + 8] = saturate_sw_to_ub(source.u16[i]);
  2332. }
  2333. write_xmm_reg128(r, result);
  2334. }
  2335. pub unsafe fn instr_660F67_reg(r1: i32, r2: i32) { instr_660F67(read_xmm128s(r1), r2); }
  2336. pub unsafe fn instr_660F67_mem(addr: i32, r: i32) {
  2337. instr_660F67(return_on_pagefault!(safe_read128s(addr)), r);
  2338. }
  2339. #[no_mangle]
  2340. pub unsafe fn instr_0F68(source: u64, r: i32) {
  2341. // punpckhbw mm, mm/m64
  2342. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  2343. let source: [u8; 8] = std::mem::transmute(source);
  2344. let mut result: [u8; 8] = [0; 8];
  2345. for i in 0..4 {
  2346. result[2 * i + 0] = destination[i + 4];
  2347. result[2 * i + 1] = source[i + 4];
  2348. }
  2349. write_mmx_reg64(r, std::mem::transmute(result));
  2350. transition_fpu_to_mmx();
  2351. }
  2352. pub unsafe fn instr_0F68_reg(r1: i32, r2: i32) { instr_0F68(read_mmx64s(r1), r2); }
  2353. pub unsafe fn instr_0F68_mem(addr: i32, r: i32) {
  2354. instr_0F68(return_on_pagefault!(safe_read64s(addr)), r);
  2355. }
  2356. #[no_mangle]
  2357. pub unsafe fn instr_660F68(source: reg128, r: i32) {
  2358. // punpckhbw xmm, xmm/m128
  2359. // XXX: Aligned access or #gp
  2360. let destination = read_xmm128s(r);
  2361. let mut result = reg128 { i8: [0; 16] };
  2362. for i in 0..8 {
  2363. result.u8[2 * i + 0] = destination.u8[i + 8];
  2364. result.u8[2 * i + 1] = source.u8[i + 8];
  2365. }
  2366. write_xmm_reg128(r, result);
  2367. }
  2368. pub unsafe fn instr_660F68_reg(r1: i32, r2: i32) { instr_660F68(read_xmm128s(r1), r2); }
  2369. pub unsafe fn instr_660F68_mem(addr: i32, r: i32) {
  2370. instr_660F68(return_on_pagefault!(safe_read128s(addr)), r);
  2371. }
  2372. #[no_mangle]
  2373. pub unsafe fn instr_0F69(source: u64, r: i32) {
  2374. // punpckhwd mm, mm/m64
  2375. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  2376. let source: [u16; 4] = std::mem::transmute(source);
  2377. let result = [destination[2], source[2], destination[3], source[3]];
  2378. write_mmx_reg64(r, std::mem::transmute(result));
  2379. transition_fpu_to_mmx();
  2380. }
  2381. pub unsafe fn instr_0F69_reg(r1: i32, r2: i32) { instr_0F69(read_mmx64s(r1), r2); }
  2382. pub unsafe fn instr_0F69_mem(addr: i32, r: i32) {
  2383. instr_0F69(return_on_pagefault!(safe_read64s(addr)), r);
  2384. }
  2385. #[no_mangle]
  2386. pub unsafe fn instr_660F69(source: reg128, r: i32) {
  2387. // punpckhwd xmm, xmm/m128
  2388. // XXX: Aligned access or #gp
  2389. let destination = read_xmm128s(r);
  2390. let mut result = reg128 { i8: [0; 16] };
  2391. for i in 0..4 {
  2392. result.u16[2 * i + 0] = destination.u16[i + 4];
  2393. result.u16[2 * i + 1] = source.u16[i + 4];
  2394. }
  2395. write_xmm_reg128(r, result);
  2396. }
  2397. pub unsafe fn instr_660F69_reg(r1: i32, r2: i32) { instr_660F69(read_xmm128s(r1), r2); }
  2398. pub unsafe fn instr_660F69_mem(addr: i32, r: i32) {
  2399. instr_660F69(return_on_pagefault!(safe_read128s(addr)), r);
  2400. }
  2401. #[no_mangle]
  2402. pub unsafe fn instr_0F6A(source: u64, r: i32) {
  2403. // punpckhdq mm, mm/m64
  2404. let destination = read_mmx64s(r);
  2405. write_mmx_reg64(r, (destination >> 32) | (source >> 32 << 32));
  2406. transition_fpu_to_mmx();
  2407. }
  2408. pub unsafe fn instr_0F6A_reg(r1: i32, r2: i32) { instr_0F6A(read_mmx64s(r1), r2); }
  2409. pub unsafe fn instr_0F6A_mem(addr: i32, r: i32) {
  2410. instr_0F6A(return_on_pagefault!(safe_read64s(addr)), r);
  2411. }
  2412. #[no_mangle]
  2413. pub unsafe fn instr_660F6A(source: reg128, r: i32) {
  2414. // punpckhdq xmm, xmm/m128
  2415. // XXX: Aligned access or #gp
  2416. let destination = read_xmm128s(r);
  2417. write_xmm128(
  2418. r,
  2419. destination.u32[2] as i32,
  2420. source.u32[2] as i32,
  2421. destination.u32[3] as i32,
  2422. source.u32[3] as i32,
  2423. );
  2424. }
  2425. pub unsafe fn instr_660F6A_reg(r1: i32, r2: i32) { instr_660F6A(read_xmm128s(r1), r2); }
  2426. pub unsafe fn instr_660F6A_mem(addr: i32, r: i32) {
  2427. instr_660F6A(return_on_pagefault!(safe_read128s(addr)), r);
  2428. }
  2429. #[no_mangle]
  2430. pub unsafe fn instr_0F6B(source: u64, r: i32) {
  2431. // packssdw mm, mm/m64
  2432. let destination: [u32; 2] = std::mem::transmute(read_mmx64s(r));
  2433. let source: [u32; 2] = std::mem::transmute(source);
  2434. let mut result = [0; 4];
  2435. for i in 0..2 {
  2436. result[i + 0] = saturate_sd_to_sw(destination[i]);
  2437. result[i + 2] = saturate_sd_to_sw(source[i]);
  2438. }
  2439. write_mmx_reg64(r, std::mem::transmute(result));
  2440. transition_fpu_to_mmx();
  2441. }
  2442. pub unsafe fn instr_0F6B_reg(r1: i32, r2: i32) { instr_0F6B(read_mmx64s(r1), r2); }
  2443. pub unsafe fn instr_0F6B_mem(addr: i32, r: i32) {
  2444. instr_0F6B(return_on_pagefault!(safe_read64s(addr)), r);
  2445. }
  2446. #[no_mangle]
  2447. pub unsafe fn instr_660F6B(source: reg128, r: i32) {
  2448. // packssdw xmm, xmm/m128
  2449. // XXX: Aligned access or #gp
  2450. let destination = read_xmm128s(r);
  2451. let mut result = reg128 { i8: [0; 16] };
  2452. for i in 0..4 {
  2453. result.u16[i + 0] = saturate_sd_to_sw(destination.u32[i]);
  2454. result.u16[i + 4] = saturate_sd_to_sw(source.u32[i]);
  2455. }
  2456. write_xmm_reg128(r, result);
  2457. }
  2458. pub unsafe fn instr_660F6B_reg(r1: i32, r2: i32) { instr_660F6B(read_xmm128s(r1), r2); }
  2459. pub unsafe fn instr_660F6B_mem(addr: i32, r: i32) {
  2460. instr_660F6B(return_on_pagefault!(safe_read128s(addr)), r);
  2461. }
  2462. #[no_mangle]
  2463. pub unsafe fn instr_0F6C_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2464. #[no_mangle]
  2465. pub unsafe fn instr_0F6C_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  2466. #[no_mangle]
  2467. pub unsafe fn instr_660F6C(source: reg128, r: i32) {
  2468. // punpcklqdq xmm, xmm/m128
  2469. // XXX: Aligned access or #gp
  2470. let destination = read_xmm128s(r);
  2471. write_xmm128(
  2472. r,
  2473. destination.u32[0] as i32,
  2474. destination.u32[1] as i32,
  2475. source.u32[0] as i32,
  2476. source.u32[1] as i32,
  2477. );
  2478. }
  2479. pub unsafe fn instr_660F6C_reg(r1: i32, r2: i32) { instr_660F6C(read_xmm128s(r1), r2); }
  2480. pub unsafe fn instr_660F6C_mem(addr: i32, r: i32) {
  2481. instr_660F6C(return_on_pagefault!(safe_read128s(addr)), r);
  2482. }
  2483. #[no_mangle]
  2484. pub unsafe fn instr_0F6D_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2485. #[no_mangle]
  2486. pub unsafe fn instr_0F6D_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  2487. #[no_mangle]
  2488. pub unsafe fn instr_660F6D(source: reg128, r: i32) {
  2489. // punpckhqdq xmm, xmm/m128
  2490. // XXX: Aligned access or #gp
  2491. let destination = read_xmm128s(r);
  2492. write_xmm128(
  2493. r,
  2494. destination.u32[2] as i32,
  2495. destination.u32[3] as i32,
  2496. source.u32[2] as i32,
  2497. source.u32[3] as i32,
  2498. );
  2499. }
  2500. pub unsafe fn instr_660F6D_reg(r1: i32, r2: i32) { instr_660F6D(read_xmm128s(r1), r2); }
  2501. pub unsafe fn instr_660F6D_mem(addr: i32, r: i32) {
  2502. instr_660F6D(return_on_pagefault!(safe_read128s(addr)), r);
  2503. }
  2504. #[no_mangle]
  2505. pub unsafe fn instr_0F6E(source: i32, r: i32) {
  2506. // movd mm, r/m32
  2507. write_mmx_reg64(r, source as u32 as u64);
  2508. transition_fpu_to_mmx();
  2509. }
  2510. pub unsafe fn instr_0F6E_reg(r1: i32, r2: i32) { instr_0F6E(read_reg32(r1), r2); }
  2511. pub unsafe fn instr_0F6E_mem(addr: i32, r: i32) {
  2512. instr_0F6E(return_on_pagefault!(safe_read32s(addr)), r);
  2513. }
  2514. pub unsafe fn instr_660F6E(source: i32, r: i32) {
  2515. // movd mm, r/m32
  2516. write_xmm128(r, source, 0, 0, 0);
  2517. }
  2518. pub unsafe fn instr_660F6E_reg(r1: i32, r2: i32) { instr_660F6E(read_reg32(r1), r2); }
  2519. pub unsafe fn instr_660F6E_mem(addr: i32, r: i32) {
  2520. instr_660F6E(return_on_pagefault!(safe_read32s(addr)), r);
  2521. }
  2522. #[no_mangle]
  2523. pub unsafe fn instr_0F6F(source: u64, r: i32) {
  2524. // movq mm, mm/m64
  2525. write_mmx_reg64(r, source);
  2526. transition_fpu_to_mmx();
  2527. }
  2528. #[no_mangle]
  2529. pub unsafe fn instr_0F6F_reg(r1: i32, r2: i32) { instr_0F6F(read_mmx64s(r1), r2); }
  2530. pub unsafe fn instr_0F6F_mem(addr: i32, r: i32) {
  2531. instr_0F6F(return_on_pagefault!(safe_read64s(addr)), r);
  2532. }
  2533. pub unsafe fn instr_660F6F(source: reg128, r: i32) {
  2534. // movdqa xmm, xmm/mem128
  2535. // XXX: Aligned access or #gp
  2536. mov_rm_r128(source, r);
  2537. }
  2538. pub unsafe fn instr_660F6F_reg(r1: i32, r2: i32) { instr_660F6F(read_xmm128s(r1), r2); }
  2539. pub unsafe fn instr_660F6F_mem(addr: i32, r: i32) {
  2540. instr_660F6F(return_on_pagefault!(safe_read128s(addr)), r);
  2541. }
  2542. pub unsafe fn instr_F30F6F(source: reg128, r: i32) {
  2543. // movdqu xmm, xmm/m128
  2544. mov_rm_r128(source, r);
  2545. }
  2546. pub unsafe fn instr_F30F6F_reg(r1: i32, r2: i32) { instr_F30F6F(read_xmm128s(r1), r2); }
  2547. pub unsafe fn instr_F30F6F_mem(addr: i32, r: i32) {
  2548. instr_F30F6F(return_on_pagefault!(safe_read128s(addr)), r);
  2549. }
  2550. #[no_mangle]
  2551. pub unsafe fn instr_0F70(source: u64, r: i32, imm8: i32) {
  2552. // pshufw mm1, mm2/m64, imm8
  2553. let source: [u16; 4] = std::mem::transmute(source);
  2554. let mut result = [0; 4];
  2555. for i in 0..4 {
  2556. result[i] = source[(imm8 >> (2 * i) & 3) as usize]
  2557. }
  2558. write_mmx_reg64(r, std::mem::transmute(result));
  2559. transition_fpu_to_mmx();
  2560. }
  2561. pub unsafe fn instr_0F70_reg(r1: i32, r2: i32, imm: i32) { instr_0F70(read_mmx64s(r1), r2, imm); }
  2562. pub unsafe fn instr_0F70_mem(addr: i32, r: i32, imm: i32) {
  2563. instr_0F70(return_on_pagefault!(safe_read64s(addr)), r, imm);
  2564. }
  2565. pub unsafe fn instr_660F70(source: reg128, r: i32, imm8: i32) {
  2566. // pshufd xmm, xmm/mem128, imm8
  2567. // XXX: Aligned access or #gp
  2568. write_xmm128(
  2569. r,
  2570. source.u32[(imm8 & 3) as usize] as i32,
  2571. source.u32[(imm8 >> 2 & 3) as usize] as i32,
  2572. source.u32[(imm8 >> 4 & 3) as usize] as i32,
  2573. source.u32[(imm8 >> 6 & 3) as usize] as i32,
  2574. );
  2575. }
  2576. pub unsafe fn instr_660F70_reg(r1: i32, r2: i32, imm: i32) {
  2577. instr_660F70(read_xmm128s(r1), r2, imm);
  2578. }
  2579. pub unsafe fn instr_660F70_mem(addr: i32, r: i32, imm: i32) {
  2580. instr_660F70(return_on_pagefault!(safe_read128s(addr)), r, imm);
  2581. }
  2582. #[no_mangle]
  2583. pub unsafe fn instr_F20F70(source: reg128, r: i32, imm8: i32) {
  2584. // pshuflw xmm, xmm/m128, imm8
  2585. // XXX: Aligned access or #gp
  2586. write_xmm128(
  2587. r,
  2588. source.u16[(imm8 & 3) as usize] as i32
  2589. | (source.u16[(imm8 >> 2 & 3) as usize] as i32) << 16,
  2590. source.u16[(imm8 >> 4 & 3) as usize] as i32
  2591. | (source.u16[(imm8 >> 6 & 3) as usize] as i32) << 16,
  2592. source.u32[2] as i32,
  2593. source.u32[3] as i32,
  2594. );
  2595. }
  2596. pub unsafe fn instr_F20F70_reg(r1: i32, r2: i32, imm: i32) {
  2597. instr_F20F70(read_xmm128s(r1), r2, imm);
  2598. }
  2599. pub unsafe fn instr_F20F70_mem(addr: i32, r: i32, imm: i32) {
  2600. instr_F20F70(return_on_pagefault!(safe_read128s(addr)), r, imm);
  2601. }
  2602. #[no_mangle]
  2603. pub unsafe fn instr_F30F70(source: reg128, r: i32, imm8: i32) {
  2604. // pshufhw xmm, xmm/m128, imm8
  2605. // XXX: Aligned access or #gp
  2606. write_xmm128(
  2607. r,
  2608. source.u32[0] as i32,
  2609. source.u32[1] as i32,
  2610. source.u16[(imm8 & 3 | 4) as usize] as i32
  2611. | (source.u16[(imm8 >> 2 & 3 | 4) as usize] as i32) << 16,
  2612. source.u16[(imm8 >> 4 & 3 | 4) as usize] as i32
  2613. | (source.u16[(imm8 >> 6 & 3 | 4) as usize] as i32) << 16,
  2614. );
  2615. }
  2616. pub unsafe fn instr_F30F70_reg(r1: i32, r2: i32, imm: i32) {
  2617. instr_F30F70(read_xmm128s(r1), r2, imm);
  2618. }
  2619. pub unsafe fn instr_F30F70_mem(addr: i32, r: i32, imm: i32) {
  2620. instr_F30F70(return_on_pagefault!(safe_read128s(addr)), r, imm);
  2621. }
  2622. pub unsafe fn instr_0F71_2_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2623. pub unsafe fn instr_0F71_4_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2624. pub unsafe fn instr_0F71_6_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2625. #[no_mangle]
  2626. pub unsafe fn instr_0F71_2_reg(r: i32, imm8: i32) {
  2627. // psrlw mm, imm8
  2628. psrlw_r64(r, imm8 as u64);
  2629. }
  2630. #[no_mangle]
  2631. pub unsafe fn instr_0F71_4_reg(r: i32, imm8: i32) {
  2632. // psraw mm, imm8
  2633. psraw_r64(r, imm8 as u64);
  2634. }
  2635. #[no_mangle]
  2636. pub unsafe fn instr_0F71_6_reg(r: i32, imm8: i32) {
  2637. // psllw mm, imm8
  2638. psllw_r64(r, imm8 as u64);
  2639. }
  2640. pub unsafe fn instr_660F71_2_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2641. pub unsafe fn instr_660F71_4_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2642. pub unsafe fn instr_660F71_6_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2643. #[no_mangle]
  2644. pub unsafe fn instr_660F71_2_reg(r: i32, imm8: i32) {
  2645. // psrlw xmm, imm8
  2646. psrlw_r128(r, imm8 as u64);
  2647. }
  2648. #[no_mangle]
  2649. pub unsafe fn instr_660F71_4_reg(r: i32, imm8: i32) {
  2650. // psraw xmm, imm8
  2651. psraw_r128(r, imm8 as u64);
  2652. }
  2653. #[no_mangle]
  2654. pub unsafe fn instr_660F71_6_reg(r: i32, imm8: i32) {
  2655. // psllw xmm, imm8
  2656. psllw_r128(r, imm8 as u64);
  2657. }
  2658. pub unsafe fn instr_0F72_2_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2659. pub unsafe fn instr_0F72_4_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2660. pub unsafe fn instr_0F72_6_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2661. #[no_mangle]
  2662. pub unsafe fn instr_0F72_2_reg(r: i32, imm8: i32) {
  2663. // psrld mm, imm8
  2664. psrld_r64(r, imm8 as u64);
  2665. }
  2666. #[no_mangle]
  2667. pub unsafe fn instr_0F72_4_reg(r: i32, imm8: i32) {
  2668. // psrad mm, imm8
  2669. psrad_r64(r, imm8 as u64);
  2670. }
  2671. #[no_mangle]
  2672. pub unsafe fn instr_0F72_6_reg(r: i32, imm8: i32) {
  2673. // pslld mm, imm8
  2674. pslld_r64(r, imm8 as u64);
  2675. }
  2676. pub unsafe fn instr_660F72_2_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2677. pub unsafe fn instr_660F72_4_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2678. pub unsafe fn instr_660F72_6_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2679. #[no_mangle]
  2680. pub unsafe fn instr_660F72_2_reg(r: i32, imm8: i32) {
  2681. // psrld xmm, imm8
  2682. psrld_r128(r, imm8 as u64);
  2683. }
  2684. #[no_mangle]
  2685. pub unsafe fn instr_660F72_4_reg(r: i32, imm8: i32) {
  2686. // psrad xmm, imm8
  2687. psrad_r128(r, imm8 as u64);
  2688. }
  2689. #[no_mangle]
  2690. pub unsafe fn instr_660F72_6_reg(r: i32, imm8: i32) {
  2691. // pslld xmm, imm8
  2692. pslld_r128(r, imm8 as u64);
  2693. }
  2694. pub unsafe fn instr_0F73_2_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2695. pub unsafe fn instr_0F73_6_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2696. #[no_mangle]
  2697. pub unsafe fn instr_0F73_2_reg(r: i32, imm8: i32) {
  2698. // psrlq mm, imm8
  2699. psrlq_r64(r, imm8 as u64);
  2700. }
  2701. #[no_mangle]
  2702. pub unsafe fn instr_0F73_6_reg(r: i32, imm8: i32) {
  2703. // psllq mm, imm8
  2704. psllq_r64(r, imm8 as u64);
  2705. }
  2706. pub unsafe fn instr_660F73_2_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2707. pub unsafe fn instr_660F73_3_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2708. pub unsafe fn instr_660F73_6_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2709. pub unsafe fn instr_660F73_7_mem(_addr: i32, _r: i32) { trigger_ud(); }
  2710. #[no_mangle]
  2711. pub unsafe fn instr_660F73_2_reg(r: i32, imm8: i32) {
  2712. // psrlq xmm, imm8
  2713. psrlq_r128(r, imm8 as u64);
  2714. }
  2715. #[no_mangle]
  2716. pub unsafe fn instr_660F73_3_reg(r: i32, imm8: i32) {
  2717. // psrldq xmm, imm8
  2718. let destination = read_xmm128s(r);
  2719. let mut result = reg128 { i8: [0; 16] };
  2720. if imm8 == 0 {
  2721. return;
  2722. }
  2723. let shift = (if imm8 > 15 { 128 } else { imm8 << 3 }) as u32;
  2724. if shift <= 63 {
  2725. result.u64[0] = destination.u64[0] >> shift | destination.u64[1] << (64 - shift);
  2726. result.u64[1] = destination.u64[1] >> shift
  2727. }
  2728. else if shift <= 127 {
  2729. result.u64[0] = destination.u64[1] >> (shift - 64);
  2730. result.u64[1] = 0
  2731. }
  2732. write_xmm_reg128(r, result);
  2733. }
  2734. #[no_mangle]
  2735. pub unsafe fn instr_660F73_6_reg(r: i32, imm8: i32) {
  2736. // psllq xmm, imm8
  2737. psllq_r128(r, imm8 as u64);
  2738. }
  2739. #[no_mangle]
  2740. pub unsafe fn instr_660F73_7_reg(r: i32, imm8: i32) {
  2741. // pslldq xmm, imm8
  2742. if imm8 == 0 {
  2743. return;
  2744. }
  2745. let destination = read_xmm128s(r);
  2746. let mut result = reg128 { i8: [0; 16] };
  2747. let shift = (if imm8 > 15 { 128 } else { imm8 << 3 }) as u32;
  2748. if shift <= 63 {
  2749. result.u64[0] = destination.u64[0] << shift;
  2750. result.u64[1] = destination.u64[1] << shift | destination.u64[0] >> (64 - shift)
  2751. }
  2752. else if shift <= 127 {
  2753. result.u64[0] = 0;
  2754. result.u64[1] = destination.u64[0] << (shift - 64)
  2755. }
  2756. write_xmm_reg128(r, result);
  2757. }
  2758. #[no_mangle]
  2759. pub unsafe fn instr_0F74(source: u64, r: i32) {
  2760. // pcmpeqb mm, mm/m64
  2761. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  2762. let source: [u8; 8] = std::mem::transmute(source);
  2763. let mut result: [u8; 8] = [0; 8];
  2764. for i in 0..8 {
  2765. result[i] = if destination[i] == source[i] { 255 } else { 0 };
  2766. }
  2767. write_mmx_reg64(r, std::mem::transmute(result));
  2768. transition_fpu_to_mmx();
  2769. }
  2770. pub unsafe fn instr_0F74_reg(r1: i32, r2: i32) { instr_0F74(read_mmx64s(r1), r2); }
  2771. pub unsafe fn instr_0F74_mem(addr: i32, r: i32) {
  2772. instr_0F74(return_on_pagefault!(safe_read64s(addr)), r);
  2773. }
  2774. #[no_mangle]
  2775. pub unsafe fn instr_660F74(source: reg128, r: i32) {
  2776. // pcmpeqb xmm, xmm/m128
  2777. // XXX: Aligned access or #gp
  2778. let destination = read_xmm128s(r);
  2779. let mut result = reg128 { i8: [0; 16] };
  2780. for i in 0..16 {
  2781. result.u8[i] = if source.u8[i] == destination.u8[i] { 255 } else { 0 }
  2782. }
  2783. write_xmm_reg128(r, result);
  2784. }
  2785. pub unsafe fn instr_660F74_reg(r1: i32, r2: i32) { instr_660F74(read_xmm128s(r1), r2); }
  2786. pub unsafe fn instr_660F74_mem(addr: i32, r: i32) {
  2787. instr_660F74(return_on_pagefault!(safe_read128s(addr)), r);
  2788. }
  2789. #[no_mangle]
  2790. pub unsafe fn instr_0F75(source: u64, r: i32) {
  2791. // pcmpeqw mm, mm/m64
  2792. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  2793. let source: [i16; 4] = std::mem::transmute(source);
  2794. let mut result: [u16; 4] = [0; 4];
  2795. for i in 0..4 {
  2796. result[i] = if destination[i] == source[i] { 0xFFFF } else { 0 };
  2797. }
  2798. write_mmx_reg64(r, std::mem::transmute(result));
  2799. transition_fpu_to_mmx();
  2800. }
  2801. pub unsafe fn instr_0F75_reg(r1: i32, r2: i32) { instr_0F75(read_mmx64s(r1), r2); }
  2802. pub unsafe fn instr_0F75_mem(addr: i32, r: i32) {
  2803. instr_0F75(return_on_pagefault!(safe_read64s(addr)), r);
  2804. }
  2805. #[no_mangle]
  2806. pub unsafe fn instr_660F75(source: reg128, r: i32) {
  2807. // pcmpeqw xmm, xmm/m128
  2808. // XXX: Aligned access or #gp
  2809. let destination = read_xmm128s(r);
  2810. let mut result = reg128 { i8: [0; 16] };
  2811. for i in 0..8 {
  2812. result.u16[i] =
  2813. (if source.u16[i] as i32 == destination.u16[i] as i32 { 0xFFFF } else { 0 }) as u16;
  2814. }
  2815. write_xmm_reg128(r, result);
  2816. }
  2817. pub unsafe fn instr_660F75_reg(r1: i32, r2: i32) { instr_660F75(read_xmm128s(r1), r2); }
  2818. pub unsafe fn instr_660F75_mem(addr: i32, r: i32) {
  2819. instr_660F75(return_on_pagefault!(safe_read128s(addr)), r);
  2820. }
  2821. #[no_mangle]
  2822. pub unsafe fn instr_0F76(source: u64, r: i32) {
  2823. // pcmpeqd mm, mm/m64
  2824. let destination: [i32; 2] = std::mem::transmute(read_mmx64s(r));
  2825. let source: [i32; 2] = std::mem::transmute(source);
  2826. let mut result = [0; 2];
  2827. for i in 0..2 {
  2828. result[i] = if destination[i] == source[i] { -1 } else { 0 }
  2829. }
  2830. write_mmx_reg64(r, std::mem::transmute(result));
  2831. transition_fpu_to_mmx();
  2832. }
  2833. pub unsafe fn instr_0F76_reg(r1: i32, r2: i32) { instr_0F76(read_mmx64s(r1), r2); }
  2834. pub unsafe fn instr_0F76_mem(addr: i32, r: i32) {
  2835. instr_0F76(return_on_pagefault!(safe_read64s(addr)), r);
  2836. }
  2837. #[no_mangle]
  2838. pub unsafe fn instr_660F76(source: reg128, r: i32) {
  2839. // pcmpeqd xmm, xmm/m128
  2840. // XXX: Aligned access or #gp
  2841. let destination = read_xmm128s(r);
  2842. let mut result = reg128 { i8: [0; 16] };
  2843. for i in 0..4 {
  2844. result.i32[i] = if source.u32[i] == destination.u32[i] { -1 } else { 0 }
  2845. }
  2846. write_xmm_reg128(r, result);
  2847. }
  2848. pub unsafe fn instr_660F76_reg(r1: i32, r2: i32) { instr_660F76(read_xmm128s(r1), r2); }
  2849. pub unsafe fn instr_660F76_mem(addr: i32, r: i32) {
  2850. instr_660F76(return_on_pagefault!(safe_read128s(addr)), r);
  2851. }
  2852. #[no_mangle]
  2853. pub unsafe fn instr_0F77() {
  2854. // emms
  2855. fpu_set_tag_word(0xFFFF);
  2856. }
  2857. #[no_mangle]
  2858. pub unsafe fn instr_0F78() { unimplemented_sse(); }
  2859. #[no_mangle]
  2860. pub unsafe fn instr_0F79() { unimplemented_sse(); }
  2861. #[no_mangle]
  2862. pub unsafe fn instr_0F7A() { unimplemented_sse(); }
  2863. #[no_mangle]
  2864. pub unsafe fn instr_0F7B() { unimplemented_sse(); }
  2865. #[no_mangle]
  2866. pub unsafe fn instr_0F7C() { unimplemented_sse(); }
  2867. #[no_mangle]
  2868. pub unsafe fn instr_0F7D() { unimplemented_sse(); }
  2869. #[no_mangle]
  2870. pub unsafe fn instr_0F7E(r: i32) -> i32 {
  2871. // movd r/m32, mm
  2872. let data = read_mmx64s(r);
  2873. transition_fpu_to_mmx();
  2874. return data as i32;
  2875. }
  2876. pub unsafe fn instr_0F7E_reg(r1: i32, r2: i32) { write_reg32(r1, instr_0F7E(r2)); }
  2877. pub unsafe fn instr_0F7E_mem(addr: i32, r: i32) {
  2878. return_on_pagefault!(safe_write32(addr, instr_0F7E(r)));
  2879. }
  2880. pub unsafe fn instr_660F7E(r: i32) -> i32 {
  2881. // movd r/m32, xmm
  2882. let data = read_xmm64s(r);
  2883. return data as i32;
  2884. }
  2885. pub unsafe fn instr_660F7E_reg(r1: i32, r2: i32) { write_reg32(r1, instr_660F7E(r2)); }
  2886. pub unsafe fn instr_660F7E_mem(addr: i32, r: i32) {
  2887. return_on_pagefault!(safe_write32(addr, instr_660F7E(r)));
  2888. }
  2889. pub unsafe fn instr_F30F7E_mem(addr: i32, r: i32) {
  2890. // movq xmm, xmm/mem64
  2891. let data = return_on_pagefault!(safe_read64s(addr));
  2892. write_xmm128_2(r, data, 0);
  2893. }
  2894. pub unsafe fn instr_F30F7E_reg(r1: i32, r2: i32) {
  2895. // movq xmm, xmm/mem64
  2896. write_xmm128_2(r2, read_xmm64s(r1), 0);
  2897. }
  2898. #[no_mangle]
  2899. pub unsafe fn instr_0F7F(r: i32) -> u64 {
  2900. // movq mm/m64, mm
  2901. transition_fpu_to_mmx();
  2902. read_mmx64s(r)
  2903. }
  2904. pub unsafe fn instr_0F7F_mem(addr: i32, r: i32) {
  2905. // movq mm/m64, mm
  2906. mov_r_m64(addr, r);
  2907. }
  2908. #[no_mangle]
  2909. pub unsafe fn instr_0F7F_reg(r1: i32, r2: i32) {
  2910. // movq mm/m64, mm
  2911. write_mmx_reg64(r1, read_mmx64s(r2));
  2912. transition_fpu_to_mmx();
  2913. }
  2914. pub unsafe fn instr_660F7F_mem(addr: i32, r: i32) {
  2915. // movdqa xmm/m128, xmm
  2916. // XXX: Aligned write or #gp
  2917. mov_r_m128(addr, r);
  2918. }
  2919. pub unsafe fn instr_660F7F_reg(r1: i32, r2: i32) {
  2920. // movdqa xmm/m128, xmm
  2921. // XXX: Aligned access or #gp
  2922. mov_r_r128(r1, r2);
  2923. }
  2924. pub unsafe fn instr_F30F7F_mem(addr: i32, r: i32) {
  2925. // movdqu xmm/m128, xmm
  2926. mov_r_m128(addr, r);
  2927. }
  2928. pub unsafe fn instr_F30F7F_reg(r1: i32, r2: i32) {
  2929. // movdqu xmm/m128, xmm
  2930. mov_r_r128(r1, r2);
  2931. }
  2932. pub unsafe fn instr16_0F80(imm: i32) { jmpcc16(test_o(), imm); }
  2933. pub unsafe fn instr32_0F80(imm: i32) { jmpcc32(test_o(), imm); }
  2934. pub unsafe fn instr16_0F81(imm: i32) { jmpcc16(!test_o(), imm); }
  2935. pub unsafe fn instr32_0F81(imm: i32) { jmpcc32(!test_o(), imm); }
  2936. pub unsafe fn instr16_0F82(imm: i32) { jmpcc16(test_b(), imm); }
  2937. pub unsafe fn instr32_0F82(imm: i32) { jmpcc32(test_b(), imm); }
  2938. pub unsafe fn instr16_0F83(imm: i32) { jmpcc16(!test_b(), imm); }
  2939. pub unsafe fn instr32_0F83(imm: i32) { jmpcc32(!test_b(), imm); }
  2940. pub unsafe fn instr16_0F84(imm: i32) { jmpcc16(test_z(), imm); }
  2941. pub unsafe fn instr32_0F84(imm: i32) { jmpcc32(test_z(), imm); }
  2942. pub unsafe fn instr16_0F85(imm: i32) { jmpcc16(!test_z(), imm); }
  2943. pub unsafe fn instr32_0F85(imm: i32) { jmpcc32(!test_z(), imm); }
  2944. pub unsafe fn instr16_0F86(imm: i32) { jmpcc16(test_be(), imm); }
  2945. pub unsafe fn instr32_0F86(imm: i32) { jmpcc32(test_be(), imm); }
  2946. pub unsafe fn instr16_0F87(imm: i32) { jmpcc16(!test_be(), imm); }
  2947. pub unsafe fn instr32_0F87(imm: i32) { jmpcc32(!test_be(), imm); }
  2948. pub unsafe fn instr16_0F88(imm: i32) { jmpcc16(test_s(), imm); }
  2949. pub unsafe fn instr32_0F88(imm: i32) { jmpcc32(test_s(), imm); }
  2950. pub unsafe fn instr16_0F89(imm: i32) { jmpcc16(!test_s(), imm); }
  2951. pub unsafe fn instr32_0F89(imm: i32) { jmpcc32(!test_s(), imm); }
  2952. pub unsafe fn instr16_0F8A(imm: i32) { jmpcc16(test_p(), imm); }
  2953. pub unsafe fn instr32_0F8A(imm: i32) { jmpcc32(test_p(), imm); }
  2954. pub unsafe fn instr16_0F8B(imm: i32) { jmpcc16(!test_p(), imm); }
  2955. pub unsafe fn instr32_0F8B(imm: i32) { jmpcc32(!test_p(), imm); }
  2956. pub unsafe fn instr16_0F8C(imm: i32) { jmpcc16(test_l(), imm); }
  2957. pub unsafe fn instr32_0F8C(imm: i32) { jmpcc32(test_l(), imm); }
  2958. pub unsafe fn instr16_0F8D(imm: i32) { jmpcc16(!test_l(), imm); }
  2959. pub unsafe fn instr32_0F8D(imm: i32) { jmpcc32(!test_l(), imm); }
  2960. pub unsafe fn instr16_0F8E(imm: i32) { jmpcc16(test_le(), imm); }
  2961. pub unsafe fn instr32_0F8E(imm: i32) { jmpcc32(test_le(), imm); }
  2962. pub unsafe fn instr16_0F8F(imm: i32) { jmpcc16(!test_le(), imm); }
  2963. pub unsafe fn instr32_0F8F(imm: i32) { jmpcc32(!test_le(), imm); }
  2964. pub unsafe fn instr_0F90_reg(r: i32, _: i32) { setcc_reg(test_o(), r); }
  2965. pub unsafe fn instr_0F91_reg(r: i32, _: i32) { setcc_reg(!test_o(), r); }
  2966. pub unsafe fn instr_0F92_reg(r: i32, _: i32) { setcc_reg(test_b(), r); }
  2967. pub unsafe fn instr_0F93_reg(r: i32, _: i32) { setcc_reg(!test_b(), r); }
  2968. pub unsafe fn instr_0F94_reg(r: i32, _: i32) { setcc_reg(test_z(), r); }
  2969. pub unsafe fn instr_0F95_reg(r: i32, _: i32) { setcc_reg(!test_z(), r); }
  2970. pub unsafe fn instr_0F96_reg(r: i32, _: i32) { setcc_reg(test_be(), r); }
  2971. pub unsafe fn instr_0F97_reg(r: i32, _: i32) { setcc_reg(!test_be(), r); }
  2972. pub unsafe fn instr_0F98_reg(r: i32, _: i32) { setcc_reg(test_s(), r); }
  2973. pub unsafe fn instr_0F99_reg(r: i32, _: i32) { setcc_reg(!test_s(), r); }
  2974. pub unsafe fn instr_0F9A_reg(r: i32, _: i32) { setcc_reg(test_p(), r); }
  2975. pub unsafe fn instr_0F9B_reg(r: i32, _: i32) { setcc_reg(!test_p(), r); }
  2976. pub unsafe fn instr_0F9C_reg(r: i32, _: i32) { setcc_reg(test_l(), r); }
  2977. pub unsafe fn instr_0F9D_reg(r: i32, _: i32) { setcc_reg(!test_l(), r); }
  2978. pub unsafe fn instr_0F9E_reg(r: i32, _: i32) { setcc_reg(test_le(), r); }
  2979. pub unsafe fn instr_0F9F_reg(r: i32, _: i32) { setcc_reg(!test_le(), r); }
  2980. pub unsafe fn instr_0F90_mem(addr: i32, _: i32) { setcc_mem(test_o(), addr); }
  2981. pub unsafe fn instr_0F91_mem(addr: i32, _: i32) { setcc_mem(!test_o(), addr); }
  2982. pub unsafe fn instr_0F92_mem(addr: i32, _: i32) { setcc_mem(test_b(), addr); }
  2983. pub unsafe fn instr_0F93_mem(addr: i32, _: i32) { setcc_mem(!test_b(), addr); }
  2984. pub unsafe fn instr_0F94_mem(addr: i32, _: i32) { setcc_mem(test_z(), addr); }
  2985. pub unsafe fn instr_0F95_mem(addr: i32, _: i32) { setcc_mem(!test_z(), addr); }
  2986. pub unsafe fn instr_0F96_mem(addr: i32, _: i32) { setcc_mem(test_be(), addr); }
  2987. pub unsafe fn instr_0F97_mem(addr: i32, _: i32) { setcc_mem(!test_be(), addr); }
  2988. pub unsafe fn instr_0F98_mem(addr: i32, _: i32) { setcc_mem(test_s(), addr); }
  2989. pub unsafe fn instr_0F99_mem(addr: i32, _: i32) { setcc_mem(!test_s(), addr); }
  2990. pub unsafe fn instr_0F9A_mem(addr: i32, _: i32) { setcc_mem(test_p(), addr); }
  2991. pub unsafe fn instr_0F9B_mem(addr: i32, _: i32) { setcc_mem(!test_p(), addr); }
  2992. pub unsafe fn instr_0F9C_mem(addr: i32, _: i32) { setcc_mem(test_l(), addr); }
  2993. pub unsafe fn instr_0F9D_mem(addr: i32, _: i32) { setcc_mem(!test_l(), addr); }
  2994. pub unsafe fn instr_0F9E_mem(addr: i32, _: i32) { setcc_mem(test_le(), addr); }
  2995. pub unsafe fn instr_0F9F_mem(addr: i32, _: i32) { setcc_mem(!test_le(), addr); }
  2996. pub unsafe fn instr16_0FA0() {
  2997. return_on_pagefault!(push16(*sreg.offset(FS as isize) as i32));
  2998. }
  2999. pub unsafe fn instr32_0FA0() { return_on_pagefault!(push32_sreg(FS)) }
  3000. #[no_mangle]
  3001. pub unsafe fn instr16_0FA1() {
  3002. if !switch_seg(FS, return_on_pagefault!(safe_read16(get_stack_pointer(0)))) {
  3003. return;
  3004. }
  3005. else {
  3006. adjust_stack_reg(2);
  3007. return;
  3008. };
  3009. }
  3010. #[no_mangle]
  3011. pub unsafe fn instr32_0FA1() {
  3012. if !switch_seg(
  3013. FS,
  3014. return_on_pagefault!(safe_read32s(get_stack_pointer(0))) & 0xFFFF,
  3015. ) {
  3016. return;
  3017. }
  3018. else {
  3019. adjust_stack_reg(4);
  3020. return;
  3021. };
  3022. }
  3023. #[no_mangle]
  3024. pub unsafe fn instr_0FA2() {
  3025. // cpuid
  3026. // TODO: Fill in with less bogus values
  3027. // http://lxr.linux.no/linux+%2a/arch/x86/include/asm/cpufeature.h
  3028. // http://www.sandpile.org/x86/cpuid.htm
  3029. let mut eax = 0;
  3030. let mut ecx = 0;
  3031. let mut edx = 0;
  3032. let mut ebx = 0;
  3033. let level = read_reg32(EAX) as u32;
  3034. match level {
  3035. 0 => {
  3036. // maximum supported level (default 0x16, overwritten to 2 as a workaround for Windows NT)
  3037. eax = cpuid_level as i32;
  3038. ebx = 0x756E6547 | 0; // Genu
  3039. edx = 0x49656E69 | 0; // ineI
  3040. ecx = 0x6C65746E | 0; // ntel
  3041. },
  3042. 1 => {
  3043. // pentium
  3044. eax = 3 | 6 << 4 | 15 << 8;
  3045. ebx = 1 << 16 | 8 << 8; // cpu count, clflush size
  3046. ecx = 1 << 23 | 1 << 30; // popcnt, rdrand
  3047. let vme = 0 << 1;
  3048. if ::config::VMWARE_HYPERVISOR_PORT {
  3049. ecx |= 1 << 31
  3050. }; // hypervisor
  3051. edx = (if true /* have fpu */ { 1 } else { 0 }) | // fpu
  3052. vme | 1 << 3 | 1 << 4 | 1 << 5 | 1 << 6 | // vme, pse, tsc, msr, pae
  3053. 1 << 8 | 1 << 11 | 1 << 13 | 1 << 15 | // cx8, sep, pge, cmov
  3054. 1 << 23 | 1 << 24 | 1 << 25 | 1 << 26; // mmx, fxsr, sse1, sse2
  3055. if *acpi_enabled
  3056. //&& this.apic_enabled[0])
  3057. {
  3058. edx |= 1 << 9; // apic
  3059. }
  3060. },
  3061. 2 => {
  3062. // Taken from http://siyobik.info.gf/main/reference/instruction/CPUID
  3063. eax = 0x665B5001;
  3064. ebx = 0;
  3065. ecx = 0;
  3066. edx = 0x007A7000;
  3067. },
  3068. 4 => {
  3069. // from my local machine
  3070. match read_reg32(ECX) {
  3071. 0 => {
  3072. eax = 0x00000121;
  3073. ebx = 0x01c0003f;
  3074. ecx = 0x0000003f;
  3075. edx = 0x00000001;
  3076. },
  3077. 1 => {
  3078. eax = 0x00000122;
  3079. ebx = 0x01c0003f;
  3080. ecx = 0x0000003f;
  3081. edx = 0x00000001;
  3082. },
  3083. 2 => {
  3084. eax = 0x00000143;
  3085. ebx = 0x05c0003f;
  3086. ecx = 0x00000fff;
  3087. edx = 0x00000001;
  3088. },
  3089. _ => {},
  3090. }
  3091. },
  3092. 5 => {
  3093. // from my local machine
  3094. eax = 0x40;
  3095. ebx = 0x40;
  3096. ecx = 3;
  3097. edx = 0x00142120;
  3098. },
  3099. 7 => {
  3100. if read_reg32(ECX) == 0 {
  3101. eax = 0; // maximum supported sub-level
  3102. ebx = 1 << 9; // enhanced REP MOVSB/STOSB
  3103. ecx = 0;
  3104. edx = 0;
  3105. }
  3106. },
  3107. 0x80000000 => {
  3108. // maximum supported extended level
  3109. eax = 5;
  3110. // other registers are reserved
  3111. },
  3112. 0x40000000 => {
  3113. // hypervisor
  3114. if ::config::VMWARE_HYPERVISOR_PORT {
  3115. // h("Ware".split("").reduce((a, c, i) => a | c.charCodeAt(0) << i * 8, 0))
  3116. ebx = 0x61774D56 | 0; // VMwa
  3117. ecx = 0x4D566572 | 0; // reVM
  3118. edx = 0x65726177 | 0; // ware
  3119. }
  3120. },
  3121. 0x15 => {
  3122. eax = 1; // denominator
  3123. ebx = 1; // numerator
  3124. ecx = (TSC_RATE * 1000.0) as u32 as i32; // core crystal clock frequency in Hz
  3125. dbg_assert!(ecx > 0);
  3126. // (TSC frequency = core crystal clock frequency * EBX/EAX)
  3127. },
  3128. 0x16 => {
  3129. eax = (TSC_RATE / 1000.0).floor() as u32 as i32; // core base frequency in MHz
  3130. ebx = (TSC_RATE / 1000.0).floor() as u32 as i32; // core maximum frequency in MHz
  3131. ecx = 10; // bus (reference) frequency in MHz
  3132. // 16-bit values
  3133. dbg_assert!(eax < 0x10000);
  3134. dbg_assert!(ebx < 0x10000);
  3135. dbg_assert!(ecx < 0x10000);
  3136. },
  3137. x => {
  3138. dbg_log!("cpuid: unimplemented eax: {:x}", x);
  3139. },
  3140. }
  3141. if level == 4 || level == 7 {
  3142. dbg_log!(
  3143. "cpuid: eax={:08x} ecx={:02x}",
  3144. read_reg32(EAX),
  3145. read_reg32(ECX),
  3146. );
  3147. }
  3148. else if level != 0 && level != 2 && level != 0x80000000 {
  3149. dbg_log!("cpuid: eax={:08x}", read_reg32(EAX));
  3150. }
  3151. write_reg32(EAX, eax);
  3152. write_reg32(ECX, ecx);
  3153. write_reg32(EDX, edx);
  3154. write_reg32(EBX, ebx);
  3155. }
  3156. pub unsafe fn instr16_0FA3_reg(r1: i32, r2: i32) { bt_reg(read_reg16(r1), read_reg16(r2) & 15); }
  3157. pub unsafe fn instr16_0FA3_mem(addr: i32, r: i32) { bt_mem(addr, read_reg16(r) << 16 >> 16); }
  3158. pub unsafe fn instr32_0FA3_reg(r1: i32, r2: i32) { bt_reg(read_reg32(r1), read_reg32(r2) & 31); }
  3159. pub unsafe fn instr32_0FA3_mem(addr: i32, r: i32) { bt_mem(addr, read_reg32(r)); }
  3160. pub unsafe fn instr16_0FA4_mem(addr: i32, r: i32, imm: i32) {
  3161. safe_read_write16(addr, &|x| shld16(x, read_reg16(r), imm & 31))
  3162. }
  3163. pub unsafe fn instr16_0FA4_reg(r1: i32, r: i32, imm: i32) {
  3164. write_reg16(r1, shld16(read_reg16(r1), read_reg16(r), imm & 31));
  3165. }
  3166. pub unsafe fn instr32_0FA4_mem(addr: i32, r: i32, imm: i32) {
  3167. safe_read_write32(addr, &|x| shld32(x, read_reg32(r), imm & 31))
  3168. }
  3169. pub unsafe fn instr32_0FA4_reg(r1: i32, r: i32, imm: i32) {
  3170. write_reg32(r1, shld32(read_reg32(r1), read_reg32(r), imm & 31));
  3171. }
  3172. pub unsafe fn instr16_0FA5_mem(addr: i32, r: i32) {
  3173. safe_read_write16(addr, &|x| shld16(x, read_reg16(r), read_reg8(CL) & 31))
  3174. }
  3175. pub unsafe fn instr16_0FA5_reg(r1: i32, r: i32) {
  3176. write_reg16(
  3177. r1,
  3178. shld16(read_reg16(r1), read_reg16(r), read_reg8(CL) & 31),
  3179. );
  3180. }
  3181. pub unsafe fn instr32_0FA5_mem(addr: i32, r: i32) {
  3182. safe_read_write32(addr, &|x| shld32(x, read_reg32(r), read_reg8(CL) & 31))
  3183. }
  3184. pub unsafe fn instr32_0FA5_reg(r1: i32, r: i32) {
  3185. write_reg32(
  3186. r1,
  3187. shld32(read_reg32(r1), read_reg32(r), read_reg8(CL) & 31),
  3188. );
  3189. }
  3190. #[no_mangle]
  3191. pub unsafe fn instr_0FA6() {
  3192. // obsolete cmpxchg (os/2)
  3193. trigger_ud();
  3194. }
  3195. #[no_mangle]
  3196. pub unsafe fn instr_0FA7() { undefined_instruction(); }
  3197. pub unsafe fn instr16_0FA8() {
  3198. return_on_pagefault!(push16(*sreg.offset(GS as isize) as i32));
  3199. }
  3200. pub unsafe fn instr32_0FA8() { return_on_pagefault!(push32_sreg(GS)) }
  3201. #[no_mangle]
  3202. pub unsafe fn instr16_0FA9() {
  3203. if !switch_seg(GS, return_on_pagefault!(safe_read16(get_stack_pointer(0)))) {
  3204. return;
  3205. }
  3206. else {
  3207. adjust_stack_reg(2);
  3208. return;
  3209. };
  3210. }
  3211. #[no_mangle]
  3212. pub unsafe fn instr32_0FA9() {
  3213. if !switch_seg(
  3214. GS,
  3215. return_on_pagefault!(safe_read32s(get_stack_pointer(0))) & 0xFFFF,
  3216. ) {
  3217. return;
  3218. }
  3219. else {
  3220. adjust_stack_reg(4);
  3221. return;
  3222. };
  3223. }
  3224. #[no_mangle]
  3225. pub unsafe fn instr_0FAA() {
  3226. // rsm
  3227. undefined_instruction();
  3228. }
  3229. #[no_mangle]
  3230. pub unsafe fn instr16_0FAB_reg(r1: i32, r2: i32) {
  3231. write_reg16(r1, bts_reg(read_reg16(r1), read_reg16(r2) & 15));
  3232. }
  3233. #[no_mangle]
  3234. pub unsafe fn instr16_0FAB_mem(addr: i32, r: i32) { bts_mem(addr, read_reg16(r) << 16 >> 16); }
  3235. #[no_mangle]
  3236. pub unsafe fn instr32_0FAB_reg(r1: i32, r2: i32) {
  3237. write_reg32(r1, bts_reg(read_reg32(r1), read_reg32(r2) & 31));
  3238. }
  3239. #[no_mangle]
  3240. pub unsafe fn instr32_0FAB_mem(addr: i32, r: i32) { bts_mem(addr, read_reg32(r)); }
  3241. pub unsafe fn instr16_0FAC_mem(addr: i32, r: i32, imm: i32) {
  3242. safe_read_write16(addr, &|x| shrd16(x, read_reg16(r), imm & 31))
  3243. }
  3244. pub unsafe fn instr16_0FAC_reg(r1: i32, r: i32, imm: i32) {
  3245. write_reg16(r1, shrd16(read_reg16(r1), read_reg16(r), imm & 31));
  3246. }
  3247. pub unsafe fn instr32_0FAC_mem(addr: i32, r: i32, imm: i32) {
  3248. safe_read_write32(addr, &|x| shrd32(x, read_reg32(r), imm & 31))
  3249. }
  3250. pub unsafe fn instr32_0FAC_reg(r1: i32, r: i32, imm: i32) {
  3251. write_reg32(r1, shrd32(read_reg32(r1), read_reg32(r), imm & 31));
  3252. }
  3253. pub unsafe fn instr16_0FAD_mem(addr: i32, r: i32) {
  3254. safe_read_write16(addr, &|x| shrd16(x, read_reg16(r), read_reg8(CL) & 31))
  3255. }
  3256. pub unsafe fn instr16_0FAD_reg(r1: i32, r: i32) {
  3257. write_reg16(
  3258. r1,
  3259. shrd16(read_reg16(r1), read_reg16(r), read_reg8(CL) & 31),
  3260. );
  3261. }
  3262. pub unsafe fn instr32_0FAD_mem(addr: i32, r: i32) {
  3263. safe_read_write32(addr, &|x| shrd32(x, read_reg32(r), read_reg8(CL) & 31))
  3264. }
  3265. pub unsafe fn instr32_0FAD_reg(r1: i32, r: i32) {
  3266. write_reg32(
  3267. r1,
  3268. shrd32(read_reg32(r1), read_reg32(r), read_reg8(CL) & 31),
  3269. );
  3270. }
  3271. #[no_mangle]
  3272. pub unsafe fn instr_0FAE_0_reg(_r: i32) { trigger_ud(); }
  3273. #[no_mangle]
  3274. pub unsafe fn instr_0FAE_0_mem(addr: i32) { fxsave(addr); }
  3275. #[no_mangle]
  3276. pub unsafe fn instr_0FAE_1_reg(_r: i32) { trigger_ud(); }
  3277. #[no_mangle]
  3278. pub unsafe fn instr_0FAE_1_mem(addr: i32) { fxrstor(addr); }
  3279. #[no_mangle]
  3280. pub unsafe fn instr_0FAE_2_reg(_r: i32) { unimplemented_sse(); }
  3281. #[no_mangle]
  3282. pub unsafe fn instr_0FAE_2_mem(addr: i32) {
  3283. // ldmxcsr
  3284. let new_mxcsr = return_on_pagefault!(safe_read32s(addr));
  3285. if 0 != new_mxcsr & !MXCSR_MASK {
  3286. dbg_log!("Invalid mxcsr bits: {:x}", new_mxcsr & !MXCSR_MASK);
  3287. trigger_gp(0);
  3288. return;
  3289. }
  3290. else {
  3291. set_mxcsr(new_mxcsr);
  3292. return;
  3293. };
  3294. }
  3295. #[no_mangle]
  3296. pub unsafe fn instr_0FAE_3_reg(_r: i32) { trigger_ud(); }
  3297. #[no_mangle]
  3298. pub unsafe fn instr_0FAE_3_mem(addr: i32) {
  3299. // stmxcsr
  3300. return_on_pagefault!(safe_write32(addr, *mxcsr));
  3301. }
  3302. #[no_mangle]
  3303. pub unsafe fn instr_0FAE_4_reg(_r: i32) { trigger_ud(); }
  3304. #[no_mangle]
  3305. pub unsafe fn instr_0FAE_4_mem(_addr: i32) {
  3306. // xsave
  3307. undefined_instruction();
  3308. }
  3309. pub unsafe fn instr_0FAE_5_reg(_r: i32) {
  3310. // lfence
  3311. }
  3312. pub unsafe fn instr_0FAE_5_mem(_addr: i32) {
  3313. // xrstor
  3314. undefined_instruction();
  3315. }
  3316. #[no_mangle]
  3317. pub unsafe fn instr_0FAE_6_reg(_r: i32) {
  3318. // mfence
  3319. }
  3320. #[no_mangle]
  3321. pub unsafe fn instr_0FAE_6_mem(_addr: i32) {
  3322. // xsaveopt
  3323. undefined_instruction();
  3324. }
  3325. #[no_mangle]
  3326. pub unsafe fn instr_0FAE_7_reg(_r: i32) {
  3327. // sfence
  3328. }
  3329. #[no_mangle]
  3330. pub unsafe fn instr_0FAE_7_mem(_addr: i32) {
  3331. // clflush
  3332. undefined_instruction();
  3333. }
  3334. pub unsafe fn instr16_0FAF_mem(addr: i32, r: i32) {
  3335. write_reg16(
  3336. r,
  3337. imul_reg16(read_reg16(r), return_on_pagefault!(safe_read16(addr))),
  3338. );
  3339. }
  3340. pub unsafe fn instr16_0FAF_reg(r1: i32, r: i32) {
  3341. write_reg16(r, imul_reg16(read_reg16(r), read_reg16(r1)));
  3342. }
  3343. pub unsafe fn instr32_0FAF_mem(addr: i32, r: i32) {
  3344. write_reg32(
  3345. r,
  3346. imul_reg32(read_reg32(r), return_on_pagefault!(safe_read32s(addr))),
  3347. );
  3348. }
  3349. pub unsafe fn instr32_0FAF_reg(r1: i32, r: i32) {
  3350. write_reg32(r, imul_reg32(read_reg32(r), read_reg32(r1)));
  3351. }
  3352. #[no_mangle]
  3353. pub unsafe fn instr_0FB0_reg(r1: i32, r2: i32) { write_reg8(r1, cmpxchg8(read_reg8(r1), r2)); }
  3354. #[no_mangle]
  3355. pub unsafe fn instr_0FB0_mem(addr: i32, r: i32) { safe_read_write8(addr, &|x| cmpxchg8(x, r)) }
  3356. pub unsafe fn instr16_0FB1_reg(r1: i32, r2: i32) { write_reg16(r1, cmpxchg16(read_reg16(r1), r2)); }
  3357. pub unsafe fn instr16_0FB1_mem(addr: i32, r: i32) { safe_read_write16(addr, &|x| cmpxchg16(x, r)) }
  3358. pub unsafe fn instr32_0FB1_reg(r1: i32, r2: i32) { write_reg32(r1, cmpxchg32(read_reg32(r1), r2)); }
  3359. pub unsafe fn instr32_0FB1_mem(addr: i32, r: i32) { safe_read_write32(addr, &|x| cmpxchg32(x, r)) }
  3360. #[no_mangle]
  3361. pub unsafe fn instr16_0FB2_reg(_unused: i32, _unused2: i32) { trigger_ud(); }
  3362. #[no_mangle]
  3363. pub unsafe fn instr16_0FB2_mem(addr: i32, r: i32) { lss16(addr, r, SS); }
  3364. #[no_mangle]
  3365. pub unsafe fn instr32_0FB2_reg(_unused: i32, _unused2: i32) { trigger_ud(); }
  3366. #[no_mangle]
  3367. pub unsafe fn instr32_0FB2_mem(addr: i32, r: i32) { lss32(addr, r, SS); }
  3368. #[no_mangle]
  3369. pub unsafe fn instr16_0FB3_reg(r1: i32, r2: i32) {
  3370. write_reg16(r1, btr_reg(read_reg16(r1), read_reg16(r2) & 15));
  3371. }
  3372. #[no_mangle]
  3373. pub unsafe fn instr16_0FB3_mem(addr: i32, r: i32) { btr_mem(addr, read_reg16(r) << 16 >> 16); }
  3374. #[no_mangle]
  3375. pub unsafe fn instr32_0FB3_reg(r1: i32, r2: i32) {
  3376. write_reg32(r1, btr_reg(read_reg32(r1), read_reg32(r2) & 31));
  3377. }
  3378. #[no_mangle]
  3379. pub unsafe fn instr32_0FB3_mem(addr: i32, r: i32) { btr_mem(addr, read_reg32(r)); }
  3380. #[no_mangle]
  3381. pub unsafe fn instr16_0FB4_reg(_unused: i32, _unused2: i32) { trigger_ud(); }
  3382. #[no_mangle]
  3383. pub unsafe fn instr16_0FB4_mem(addr: i32, r: i32) { lss16(addr, r, FS); }
  3384. #[no_mangle]
  3385. pub unsafe fn instr32_0FB4_reg(_unused: i32, _unused2: i32) { trigger_ud(); }
  3386. #[no_mangle]
  3387. pub unsafe fn instr32_0FB4_mem(addr: i32, r: i32) { lss32(addr, r, FS); }
  3388. #[no_mangle]
  3389. pub unsafe fn instr16_0FB5_reg(_unused: i32, _unused2: i32) { trigger_ud(); }
  3390. #[no_mangle]
  3391. pub unsafe fn instr16_0FB5_mem(addr: i32, r: i32) { lss16(addr, r, GS); }
  3392. #[no_mangle]
  3393. pub unsafe fn instr32_0FB5_reg(_unused: i32, _unused2: i32) { trigger_ud(); }
  3394. #[no_mangle]
  3395. pub unsafe fn instr32_0FB5_mem(addr: i32, r: i32) { lss32(addr, r, GS); }
  3396. pub unsafe fn instr16_0FB6_mem(addr: i32, r: i32) {
  3397. write_reg16(r, return_on_pagefault!(safe_read8(addr)));
  3398. }
  3399. pub unsafe fn instr16_0FB6_reg(r1: i32, r: i32) { write_reg16(r, read_reg8(r1)); }
  3400. pub unsafe fn instr32_0FB6_mem(addr: i32, r: i32) {
  3401. write_reg32(r, return_on_pagefault!(safe_read8(addr)));
  3402. }
  3403. pub unsafe fn instr32_0FB6_reg(r1: i32, r: i32) { write_reg32(r, read_reg8(r1)); }
  3404. pub unsafe fn instr16_0FB7_mem(addr: i32, r: i32) {
  3405. write_reg16(r, return_on_pagefault!(safe_read16(addr)));
  3406. }
  3407. pub unsafe fn instr16_0FB7_reg(r1: i32, r: i32) { write_reg16(r, read_reg16(r1)); }
  3408. pub unsafe fn instr32_0FB7_mem(addr: i32, r: i32) {
  3409. write_reg32(r, return_on_pagefault!(safe_read16(addr)));
  3410. }
  3411. pub unsafe fn instr32_0FB7_reg(r1: i32, r: i32) { write_reg32(r, read_reg16(r1)); }
  3412. #[no_mangle]
  3413. pub unsafe fn instr16_0FB8_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  3414. #[no_mangle]
  3415. pub unsafe fn instr16_0FB8_mem(_addr: i32, _r: i32) { trigger_ud(); }
  3416. pub unsafe fn instr16_F30FB8_mem(addr: i32, r: i32) {
  3417. write_reg16(r, popcnt(return_on_pagefault!(safe_read16(addr))));
  3418. }
  3419. pub unsafe fn instr16_F30FB8_reg(r1: i32, r: i32) { write_reg16(r, popcnt(read_reg16(r1))); }
  3420. #[no_mangle]
  3421. pub unsafe fn instr32_0FB8_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  3422. #[no_mangle]
  3423. pub unsafe fn instr32_0FB8_mem(_addr: i32, _r: i32) { trigger_ud(); }
  3424. pub unsafe fn instr32_F30FB8_mem(addr: i32, r: i32) {
  3425. write_reg32(r, popcnt(return_on_pagefault!(safe_read32s(addr))));
  3426. }
  3427. pub unsafe fn instr32_F30FB8_reg(r1: i32, r: i32) { write_reg32(r, popcnt(read_reg32(r1))); }
  3428. #[no_mangle]
  3429. pub unsafe fn instr_0FB9() {
  3430. // UD2
  3431. trigger_ud();
  3432. }
  3433. pub unsafe fn instr16_0FBA_4_reg(r: i32, imm: i32) { bt_reg(read_reg16(r), imm & 15); }
  3434. pub unsafe fn instr16_0FBA_4_mem(addr: i32, imm: i32) { bt_mem(addr, imm & 15); }
  3435. #[no_mangle]
  3436. pub unsafe fn instr16_0FBA_5_reg(r: i32, imm: i32) {
  3437. write_reg16(r, bts_reg(read_reg16(r), imm & 15));
  3438. }
  3439. #[no_mangle]
  3440. pub unsafe fn instr16_0FBA_5_mem(addr: i32, imm: i32) { bts_mem(addr, imm & 15); }
  3441. #[no_mangle]
  3442. pub unsafe fn instr16_0FBA_6_reg(r: i32, imm: i32) {
  3443. write_reg16(r, btr_reg(read_reg16(r), imm & 15));
  3444. }
  3445. #[no_mangle]
  3446. pub unsafe fn instr16_0FBA_6_mem(addr: i32, imm: i32) { btr_mem(addr, imm & 15); }
  3447. #[no_mangle]
  3448. pub unsafe fn instr16_0FBA_7_reg(r: i32, imm: i32) {
  3449. write_reg16(r, btc_reg(read_reg16(r), imm & 15));
  3450. }
  3451. #[no_mangle]
  3452. pub unsafe fn instr16_0FBA_7_mem(addr: i32, imm: i32) { btc_mem(addr, imm & 15); }
  3453. pub unsafe fn instr32_0FBA_4_reg(r: i32, imm: i32) { bt_reg(read_reg32(r), imm & 31); }
  3454. pub unsafe fn instr32_0FBA_4_mem(addr: i32, imm: i32) { bt_mem(addr, imm & 31); }
  3455. #[no_mangle]
  3456. pub unsafe fn instr32_0FBA_5_reg(r: i32, imm: i32) {
  3457. write_reg32(r, bts_reg(read_reg32(r), imm & 31));
  3458. }
  3459. #[no_mangle]
  3460. pub unsafe fn instr32_0FBA_5_mem(addr: i32, imm: i32) { bts_mem(addr, imm & 31); }
  3461. #[no_mangle]
  3462. pub unsafe fn instr32_0FBA_6_reg(r: i32, imm: i32) {
  3463. write_reg32(r, btr_reg(read_reg32(r), imm & 31));
  3464. }
  3465. #[no_mangle]
  3466. pub unsafe fn instr32_0FBA_6_mem(addr: i32, imm: i32) { btr_mem(addr, imm & 31); }
  3467. #[no_mangle]
  3468. pub unsafe fn instr32_0FBA_7_reg(r: i32, imm: i32) {
  3469. write_reg32(r, btc_reg(read_reg32(r), imm & 31));
  3470. }
  3471. #[no_mangle]
  3472. pub unsafe fn instr32_0FBA_7_mem(addr: i32, imm: i32) { btc_mem(addr, imm & 31); }
  3473. #[no_mangle]
  3474. pub unsafe fn instr16_0FBB_reg(r1: i32, r2: i32) {
  3475. write_reg16(r1, btc_reg(read_reg16(r1), read_reg16(r2) & 15));
  3476. }
  3477. #[no_mangle]
  3478. pub unsafe fn instr16_0FBB_mem(addr: i32, r: i32) { btc_mem(addr, read_reg16(r) << 16 >> 16); }
  3479. #[no_mangle]
  3480. pub unsafe fn instr32_0FBB_reg(r1: i32, r2: i32) {
  3481. write_reg32(r1, btc_reg(read_reg32(r1), read_reg32(r2) & 31));
  3482. }
  3483. #[no_mangle]
  3484. pub unsafe fn instr32_0FBB_mem(addr: i32, r: i32) { btc_mem(addr, read_reg32(r)); }
  3485. pub unsafe fn instr16_0FBC_mem(addr: i32, r: i32) {
  3486. write_reg16(
  3487. r,
  3488. bsf16(read_reg16(r), return_on_pagefault!(safe_read16(addr))),
  3489. );
  3490. }
  3491. pub unsafe fn instr16_0FBC_reg(r1: i32, r: i32) {
  3492. write_reg16(r, bsf16(read_reg16(r), read_reg16(r1)));
  3493. }
  3494. pub unsafe fn instr32_0FBC_mem(addr: i32, r: i32) {
  3495. write_reg32(
  3496. r,
  3497. bsf32(read_reg32(r), return_on_pagefault!(safe_read32s(addr))),
  3498. );
  3499. }
  3500. pub unsafe fn instr32_0FBC_reg(r1: i32, r: i32) {
  3501. write_reg32(r, bsf32(read_reg32(r), read_reg32(r1)));
  3502. }
  3503. pub unsafe fn instr16_0FBD_mem(addr: i32, r: i32) {
  3504. write_reg16(
  3505. r,
  3506. bsr16(read_reg16(r), return_on_pagefault!(safe_read16(addr))),
  3507. );
  3508. }
  3509. pub unsafe fn instr16_0FBD_reg(r1: i32, r: i32) {
  3510. write_reg16(r, bsr16(read_reg16(r), read_reg16(r1)));
  3511. }
  3512. pub unsafe fn instr32_0FBD_mem(addr: i32, r: i32) {
  3513. write_reg32(
  3514. r,
  3515. bsr32(read_reg32(r), return_on_pagefault!(safe_read32s(addr))),
  3516. );
  3517. }
  3518. pub unsafe fn instr32_0FBD_reg(r1: i32, r: i32) {
  3519. write_reg32(r, bsr32(read_reg32(r), read_reg32(r1)));
  3520. }
  3521. pub unsafe fn instr16_0FBE_mem(addr: i32, r: i32) {
  3522. write_reg16(r, return_on_pagefault!(safe_read8(addr)) << 24 >> 24);
  3523. }
  3524. pub unsafe fn instr16_0FBE_reg(r1: i32, r: i32) { write_reg16(r, read_reg8(r1) << 24 >> 24); }
  3525. pub unsafe fn instr32_0FBE_mem(addr: i32, r: i32) {
  3526. write_reg32(r, return_on_pagefault!(safe_read8(addr)) << 24 >> 24);
  3527. }
  3528. pub unsafe fn instr32_0FBE_reg(r1: i32, r: i32) { write_reg32(r, read_reg8(r1) << 24 >> 24); }
  3529. pub unsafe fn instr16_0FBF_mem(addr: i32, r: i32) {
  3530. write_reg16(r, return_on_pagefault!(safe_read16(addr)) << 16 >> 16);
  3531. }
  3532. pub unsafe fn instr16_0FBF_reg(r1: i32, r: i32) { write_reg16(r, read_reg16(r1) << 16 >> 16); }
  3533. pub unsafe fn instr32_0FBF_mem(addr: i32, r: i32) {
  3534. write_reg32(r, return_on_pagefault!(safe_read16(addr)) << 16 >> 16);
  3535. }
  3536. pub unsafe fn instr32_0FBF_reg(r1: i32, r: i32) { write_reg32(r, read_reg16(r1) << 16 >> 16); }
  3537. #[no_mangle]
  3538. pub unsafe fn instr_0FC0_mem(addr: i32, r: i32) { safe_read_write8(addr, &|x| xadd8(x, r)) }
  3539. #[no_mangle]
  3540. pub unsafe fn instr_0FC0_reg(r1: i32, r: i32) { write_reg8(r1, xadd8(read_reg8(r1), r)); }
  3541. pub unsafe fn instr16_0FC1_mem(addr: i32, r: i32) { safe_read_write16(addr, &|x| xadd16(x, r)) }
  3542. pub unsafe fn instr16_0FC1_reg(r1: i32, r: i32) { write_reg16(r1, xadd16(read_reg16(r1), r)); }
  3543. pub unsafe fn instr32_0FC1_mem(addr: i32, r: i32) { safe_read_write32(addr, &|x| xadd32(x, r)) }
  3544. pub unsafe fn instr32_0FC1_reg(r1: i32, r: i32) { write_reg32(r1, xadd32(read_reg32(r1), r)); }
  3545. #[no_mangle]
  3546. pub unsafe fn instr_0FC2(source: reg128, r: i32, imm8: i32) {
  3547. // cmpps xmm, xmm/m128
  3548. let destination = read_xmm128s(r);
  3549. let mut result = reg128 { i8: [0; 16] };
  3550. for i in 0..4 {
  3551. result.i32[i] = if sse_comparison(imm8, destination.f32[i] as f64, source.f32[i] as f64) {
  3552. -1
  3553. }
  3554. else {
  3555. 0
  3556. };
  3557. }
  3558. write_xmm_reg128(r, result);
  3559. }
  3560. pub unsafe fn instr_0FC2_reg(r1: i32, r2: i32, imm: i32) { instr_0FC2(read_xmm128s(r1), r2, imm); }
  3561. pub unsafe fn instr_0FC2_mem(addr: i32, r: i32, imm: i32) {
  3562. instr_0FC2(return_on_pagefault!(safe_read128s(addr)), r, imm);
  3563. }
  3564. #[no_mangle]
  3565. pub unsafe fn instr_660FC2(source: reg128, r: i32, imm8: i32) {
  3566. // cmppd xmm, xmm/m128
  3567. let destination = read_xmm128s(r);
  3568. let result = reg128 {
  3569. i64: [
  3570. (if sse_comparison(imm8, destination.f64[0], source.f64[0]) { -1 } else { 0 }) as i64,
  3571. (if sse_comparison(imm8, destination.f64[1], source.f64[1]) { -1 } else { 0 }) as i64,
  3572. ],
  3573. };
  3574. write_xmm_reg128(r, result);
  3575. }
  3576. pub unsafe fn instr_660FC2_reg(r1: i32, r2: i32, imm: i32) {
  3577. instr_660FC2(read_xmm128s(r1), r2, imm);
  3578. }
  3579. pub unsafe fn instr_660FC2_mem(addr: i32, r: i32, imm: i32) {
  3580. instr_660FC2(return_on_pagefault!(safe_read128s(addr)), r, imm);
  3581. }
  3582. #[no_mangle]
  3583. pub unsafe fn instr_F20FC2(source: u64, r: i32, imm8: i32) {
  3584. // cmpsd xmm, xmm/m64
  3585. let destination = read_xmm64s(r);
  3586. write_xmm64(
  3587. r,
  3588. if sse_comparison(imm8, f64::from_bits(destination), f64::from_bits(source)) {
  3589. (-1i32) as u64
  3590. }
  3591. else {
  3592. 0
  3593. },
  3594. );
  3595. }
  3596. pub unsafe fn instr_F20FC2_reg(r1: i32, r2: i32, imm: i32) {
  3597. instr_F20FC2(read_xmm64s(r1), r2, imm);
  3598. }
  3599. pub unsafe fn instr_F20FC2_mem(addr: i32, r: i32, imm: i32) {
  3600. instr_F20FC2(return_on_pagefault!(safe_read64s(addr)), r, imm);
  3601. }
  3602. #[no_mangle]
  3603. pub unsafe fn instr_F30FC2(source: i32, r: i32, imm8: i32) {
  3604. // cmpss xmm, xmm/m32
  3605. let destination = read_xmm_f32(r);
  3606. let source: f32 = std::mem::transmute(source);
  3607. let result = if sse_comparison(imm8, destination as f64, source as f64) { -1 } else { 0 };
  3608. write_xmm32(r, result);
  3609. }
  3610. pub unsafe fn instr_F30FC2_reg(r1: i32, r2: i32, imm: i32) {
  3611. instr_F30FC2(read_xmm64s(r1) as i32, r2, imm);
  3612. }
  3613. pub unsafe fn instr_F30FC2_mem(addr: i32, r: i32, imm: i32) {
  3614. instr_F30FC2(return_on_pagefault!(safe_read32s(addr)), r, imm);
  3615. }
  3616. pub unsafe fn instr_0FC3_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  3617. pub unsafe fn instr_0FC3_mem(addr: i32, r: i32) {
  3618. // movnti
  3619. return_on_pagefault!(safe_write32(addr, read_reg32(r)));
  3620. }
  3621. #[no_mangle]
  3622. pub unsafe fn instr_0FC4(source: i32, r: i32, imm8: i32) {
  3623. // pinsrw mm, r32/m16, imm8
  3624. let mut destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  3625. destination[(imm8 & 3) as usize] = source as u16;
  3626. write_mmx_reg64(r, std::mem::transmute(destination));
  3627. transition_fpu_to_mmx();
  3628. }
  3629. pub unsafe fn instr_0FC4_reg(r1: i32, r2: i32, imm: i32) { instr_0FC4(read_reg32(r1), r2, imm); }
  3630. pub unsafe fn instr_0FC4_mem(addr: i32, r: i32, imm: i32) {
  3631. instr_0FC4(return_on_pagefault!(safe_read16(addr)), r, imm);
  3632. }
  3633. pub unsafe fn instr_660FC4(source: i32, r: i32, imm8: i32) {
  3634. // pinsrw xmm, r32/m16, imm8
  3635. let mut destination = read_xmm128s(r);
  3636. let index = (imm8 & 7) as u32;
  3637. destination.u16[index as usize] = (source & 0xFFFF) as u16;
  3638. write_xmm_reg128(r, destination);
  3639. }
  3640. pub unsafe fn instr_660FC4_reg(r1: i32, r2: i32, imm: i32) {
  3641. instr_660FC4(read_reg32(r1), r2, imm);
  3642. }
  3643. pub unsafe fn instr_660FC4_mem(addr: i32, r: i32, imm: i32) {
  3644. instr_660FC4(return_on_pagefault!(safe_read16(addr)), r, imm);
  3645. }
  3646. pub unsafe fn instr_0FC5_mem(_addr: i32, _r: i32, _imm8: i32) { trigger_ud(); }
  3647. #[no_mangle]
  3648. pub unsafe fn instr_0FC5_reg(r1: i32, r2: i32, imm8: i32) {
  3649. // pextrw r32, mm, imm8
  3650. let data: [u16; 4] = std::mem::transmute(read_mmx64s(r1));
  3651. write_reg32(r2, data[(imm8 & 3) as usize] as i32);
  3652. transition_fpu_to_mmx();
  3653. }
  3654. pub unsafe fn instr_660FC5_mem(_addr: i32, _r: i32, _imm8: i32) { trigger_ud(); }
  3655. pub unsafe fn instr_660FC5_reg(r1: i32, r2: i32, imm8: i32) {
  3656. // pextrw r32, xmm, imm8
  3657. let data = read_xmm128s(r1);
  3658. let index = (imm8 & 7) as u32;
  3659. let result = data.u16[index as usize] as u32;
  3660. write_reg32(r2, result as i32);
  3661. }
  3662. #[no_mangle]
  3663. pub unsafe fn instr_0FC6(source: reg128, r: i32, imm8: i32) {
  3664. // shufps xmm, xmm/mem128
  3665. // XXX: Aligned access or #gp
  3666. let destination = read_xmm128s(r);
  3667. write_xmm128(
  3668. r,
  3669. destination.u32[(imm8 & 3) as usize] as i32,
  3670. destination.u32[(imm8 >> 2 & 3) as usize] as i32,
  3671. source.u32[(imm8 >> 4 & 3) as usize] as i32,
  3672. source.u32[(imm8 >> 6 & 3) as usize] as i32,
  3673. );
  3674. }
  3675. pub unsafe fn instr_0FC6_reg(r1: i32, r2: i32, imm: i32) { instr_0FC6(read_xmm128s(r1), r2, imm); }
  3676. pub unsafe fn instr_0FC6_mem(addr: i32, r: i32, imm: i32) {
  3677. instr_0FC6(return_on_pagefault!(safe_read128s(addr)), r, imm);
  3678. }
  3679. #[no_mangle]
  3680. pub unsafe fn instr_660FC6(source: reg128, r: i32, imm8: i32) {
  3681. // shufpd xmm, xmm/mem128
  3682. let destination = read_xmm128s(r);
  3683. let result = reg128 {
  3684. i64: [
  3685. destination.i64[imm8 as usize & 1],
  3686. source.i64[imm8 as usize >> 1 & 1],
  3687. ],
  3688. };
  3689. write_xmm_reg128(r, result);
  3690. }
  3691. pub unsafe fn instr_660FC6_reg(r1: i32, r2: i32, imm: i32) {
  3692. instr_660FC6(read_xmm128s(r1), r2, imm);
  3693. }
  3694. pub unsafe fn instr_660FC6_mem(addr: i32, r: i32, imm: i32) {
  3695. instr_660FC6(return_on_pagefault!(safe_read128s(addr)), r, imm);
  3696. }
  3697. pub unsafe fn instr16_0FC7_1_reg(_r: i32) { trigger_ud(); }
  3698. pub unsafe fn instr32_0FC7_1_reg(_r: i32) { trigger_ud(); }
  3699. pub unsafe fn instr16_0FC7_1_mem(addr: i32) {
  3700. // cmpxchg8b
  3701. return_on_pagefault!(writable_or_pagefault(addr, 8));
  3702. let m64 = safe_read64s(addr).unwrap();
  3703. let m64_low = m64 as i32;
  3704. let m64_high = (m64 >> 32) as i32;
  3705. if read_reg32(EAX) == m64_low && read_reg32(EDX) == m64_high {
  3706. *flags |= FLAG_ZERO;
  3707. safe_write64(
  3708. addr,
  3709. read_reg32(EBX) as u32 as u64 | (read_reg32(ECX) as u32 as u64) << 32,
  3710. )
  3711. .unwrap();
  3712. }
  3713. else {
  3714. *flags &= !FLAG_ZERO;
  3715. write_reg32(EAX, m64_low);
  3716. write_reg32(EDX, m64_high);
  3717. }
  3718. *flags_changed &= !FLAG_ZERO;
  3719. }
  3720. pub unsafe fn instr32_0FC7_1_mem(addr: i32) { instr16_0FC7_1_mem(addr) }
  3721. #[no_mangle]
  3722. pub unsafe fn instr16_0FC7_6_reg(r: i32) {
  3723. // rdrand
  3724. let rand = get_rand_int();
  3725. write_reg16(r, rand);
  3726. *flags &= !FLAGS_ALL;
  3727. *flags |= 1;
  3728. *flags_changed = 0;
  3729. }
  3730. #[no_mangle]
  3731. pub unsafe fn instr32_0FC7_6_reg(r: i32) {
  3732. // rdrand
  3733. let rand = get_rand_int();
  3734. write_reg32(r, rand);
  3735. *flags &= !FLAGS_ALL;
  3736. *flags |= 1;
  3737. *flags_changed = 0;
  3738. }
  3739. #[no_mangle]
  3740. pub unsafe fn instr16_0FC7_6_mem(_addr: i32) { trigger_ud(); }
  3741. #[no_mangle]
  3742. pub unsafe fn instr32_0FC7_6_mem(_addr: i32) { trigger_ud(); }
  3743. #[no_mangle]
  3744. pub unsafe fn instr_0FC8() { bswap(EAX); }
  3745. #[no_mangle]
  3746. pub unsafe fn instr_0FC9() { bswap(ECX); }
  3747. #[no_mangle]
  3748. pub unsafe fn instr_0FCA() { bswap(EDX); }
  3749. #[no_mangle]
  3750. pub unsafe fn instr_0FCB() { bswap(EBX); }
  3751. #[no_mangle]
  3752. pub unsafe fn instr_0FCC() { bswap(ESP); }
  3753. #[no_mangle]
  3754. pub unsafe fn instr_0FCD() { bswap(EBP); }
  3755. #[no_mangle]
  3756. pub unsafe fn instr_0FCE() { bswap(ESI); }
  3757. #[no_mangle]
  3758. pub unsafe fn instr_0FCF() { bswap(EDI); }
  3759. #[no_mangle]
  3760. pub unsafe fn instr_0FD0() { unimplemented_sse(); }
  3761. #[no_mangle]
  3762. pub unsafe fn instr_0FD1(source: u64, r: i32) {
  3763. // psrlw mm, mm/m64
  3764. psrlw_r64(r, source);
  3765. }
  3766. pub unsafe fn instr_0FD1_reg(r1: i32, r2: i32) { instr_0FD1(read_mmx64s(r1), r2); }
  3767. pub unsafe fn instr_0FD1_mem(addr: i32, r: i32) {
  3768. instr_0FD1(return_on_pagefault!(safe_read64s(addr)), r);
  3769. }
  3770. #[no_mangle]
  3771. pub unsafe fn instr_660FD1(source: reg128, r: i32) {
  3772. // psrlw xmm, xmm/m128
  3773. // XXX: Aligned access or #gp
  3774. psrlw_r128(r, source.u64[0]);
  3775. }
  3776. pub unsafe fn instr_660FD1_reg(r1: i32, r2: i32) { instr_660FD1(read_xmm128s(r1), r2); }
  3777. pub unsafe fn instr_660FD1_mem(addr: i32, r: i32) {
  3778. instr_660FD1(return_on_pagefault!(safe_read128s(addr)), r);
  3779. }
  3780. #[no_mangle]
  3781. pub unsafe fn instr_0FD2(source: u64, r: i32) {
  3782. // psrld mm, mm/m64
  3783. psrld_r64(r, source);
  3784. }
  3785. pub unsafe fn instr_0FD2_reg(r1: i32, r2: i32) { instr_0FD2(read_mmx64s(r1), r2); }
  3786. pub unsafe fn instr_0FD2_mem(addr: i32, r: i32) {
  3787. instr_0FD2(return_on_pagefault!(safe_read64s(addr)), r);
  3788. }
  3789. #[no_mangle]
  3790. pub unsafe fn instr_660FD2(source: reg128, r: i32) {
  3791. // psrld xmm, xmm/m128
  3792. // XXX: Aligned access or #gp
  3793. psrld_r128(r, source.u64[0]);
  3794. }
  3795. pub unsafe fn instr_660FD2_reg(r1: i32, r2: i32) { instr_660FD2(read_xmm128s(r1), r2); }
  3796. pub unsafe fn instr_660FD2_mem(addr: i32, r: i32) {
  3797. instr_660FD2(return_on_pagefault!(safe_read128s(addr)), r);
  3798. }
  3799. #[no_mangle]
  3800. pub unsafe fn instr_0FD3(source: u64, r: i32) {
  3801. // psrlq mm, mm/m64
  3802. psrlq_r64(r, source);
  3803. }
  3804. pub unsafe fn instr_0FD3_reg(r1: i32, r2: i32) { instr_0FD3(read_mmx64s(r1), r2); }
  3805. pub unsafe fn instr_0FD3_mem(addr: i32, r: i32) {
  3806. instr_0FD3(return_on_pagefault!(safe_read64s(addr)), r);
  3807. }
  3808. #[no_mangle]
  3809. pub unsafe fn instr_660FD3(source: reg128, r: i32) {
  3810. // psrlq xmm, mm/m64
  3811. psrlq_r128(r, source.u64[0]);
  3812. }
  3813. pub unsafe fn instr_660FD3_reg(r1: i32, r2: i32) { instr_660FD3(read_xmm128s(r1), r2); }
  3814. pub unsafe fn instr_660FD3_mem(addr: i32, r: i32) {
  3815. instr_660FD3(return_on_pagefault!(safe_read128s(addr)), r);
  3816. }
  3817. #[no_mangle]
  3818. pub unsafe fn instr_0FD4(source: u64, r: i32) {
  3819. // paddq mm, mm/m64
  3820. let destination = read_mmx64s(r);
  3821. write_mmx_reg64(r, source + destination);
  3822. transition_fpu_to_mmx();
  3823. }
  3824. pub unsafe fn instr_0FD4_reg(r1: i32, r2: i32) { instr_0FD4(read_mmx64s(r1), r2); }
  3825. pub unsafe fn instr_0FD4_mem(addr: i32, r: i32) {
  3826. instr_0FD4(return_on_pagefault!(safe_read64s(addr)), r);
  3827. }
  3828. #[no_mangle]
  3829. pub unsafe fn instr_660FD4(source: reg128, r: i32) {
  3830. // paddq xmm, xmm/m128
  3831. // XXX: Aligned access or #gp
  3832. let destination = read_xmm128s(r);
  3833. let mut result = reg128 { i8: [0; 16] };
  3834. result.u64[0] = destination.u64[0] + source.u64[0];
  3835. result.u64[1] = destination.u64[1] + source.u64[1];
  3836. write_xmm_reg128(r, result);
  3837. }
  3838. pub unsafe fn instr_660FD4_reg(r1: i32, r2: i32) { instr_660FD4(read_xmm128s(r1), r2); }
  3839. pub unsafe fn instr_660FD4_mem(addr: i32, r: i32) {
  3840. instr_660FD4(return_on_pagefault!(safe_read128s(addr)), r);
  3841. }
  3842. #[no_mangle]
  3843. pub unsafe fn instr_0FD5(source: u64, r: i32) {
  3844. // pmullw mm, mm/m64
  3845. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  3846. let source: [i16; 4] = std::mem::transmute(source);
  3847. let mut result = [0; 4];
  3848. for i in 0..4 {
  3849. result[i] = destination[i] * source[i];
  3850. }
  3851. write_mmx_reg64(r, std::mem::transmute(result));
  3852. transition_fpu_to_mmx();
  3853. }
  3854. pub unsafe fn instr_0FD5_reg(r1: i32, r2: i32) { instr_0FD5(read_mmx64s(r1), r2); }
  3855. pub unsafe fn instr_0FD5_mem(addr: i32, r: i32) {
  3856. instr_0FD5(return_on_pagefault!(safe_read64s(addr)), r);
  3857. }
  3858. #[no_mangle]
  3859. pub unsafe fn instr_660FD5(source: reg128, r: i32) {
  3860. // pmullw xmm, xmm/m128
  3861. // XXX: Aligned access or #gp
  3862. let destination = read_xmm128s(r);
  3863. let mut result = reg128 { i8: [0; 16] };
  3864. for i in 0..8 {
  3865. result.u16[i] = destination.u16[i] * source.u16[i]
  3866. }
  3867. write_xmm_reg128(r, result);
  3868. }
  3869. pub unsafe fn instr_660FD5_reg(r1: i32, r2: i32) { instr_660FD5(read_xmm128s(r1), r2); }
  3870. pub unsafe fn instr_660FD5_mem(addr: i32, r: i32) {
  3871. instr_660FD5(return_on_pagefault!(safe_read128s(addr)), r);
  3872. }
  3873. #[no_mangle]
  3874. pub unsafe fn instr_0FD6_mem(_addr: i32, _r: i32) { trigger_ud(); }
  3875. #[no_mangle]
  3876. pub unsafe fn instr_0FD6_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  3877. pub unsafe fn instr_660FD6_mem(addr: i32, r: i32) {
  3878. // movq xmm/m64, xmm
  3879. movl_r128_m64(addr, r);
  3880. }
  3881. pub unsafe fn instr_660FD6_reg(r1: i32, r2: i32) {
  3882. // movq xmm/m64, xmm
  3883. write_xmm128_2(r1, read_xmm64s(r2), 0);
  3884. }
  3885. #[no_mangle]
  3886. pub unsafe fn instr_F20FD6_mem(_addr: i32, _r: i32) { trigger_ud(); }
  3887. #[no_mangle]
  3888. pub unsafe fn instr_F20FD6_reg(r1: i32, r2: i32) {
  3889. // movdq2q mm, xmm
  3890. write_mmx_reg64(r2, read_xmm128s(r1).u64[0]);
  3891. transition_fpu_to_mmx();
  3892. }
  3893. #[no_mangle]
  3894. pub unsafe fn instr_F30FD6_mem(_addr: i32, _r: i32) { trigger_ud(); }
  3895. #[no_mangle]
  3896. pub unsafe fn instr_F30FD6_reg(r1: i32, r2: i32) {
  3897. // movq2dq xmm, mm
  3898. let source = read_mmx64s(r1);
  3899. write_xmm_reg128(r2, reg128 { u64: [source, 0] });
  3900. transition_fpu_to_mmx();
  3901. }
  3902. pub unsafe fn instr_0FD7_mem(_addr: i32, _r: i32) { trigger_ud(); }
  3903. #[no_mangle]
  3904. pub unsafe fn instr_0FD7(r1: i32) -> i32 {
  3905. // pmovmskb r, mm
  3906. let x: [u8; 8] = std::mem::transmute(read_mmx64s(r1));
  3907. let mut result = 0;
  3908. for i in 0..8 {
  3909. result |= x[i] as i32 >> 7 << i
  3910. }
  3911. transition_fpu_to_mmx();
  3912. result
  3913. }
  3914. pub unsafe fn instr_0FD7_reg(r1: i32, r2: i32) { write_reg32(r2, instr_0FD7(r1)); }
  3915. pub unsafe fn instr_660FD7_mem(_addr: i32, _r: i32) { trigger_ud(); }
  3916. #[no_mangle]
  3917. pub unsafe fn instr_660FD7(r1: i32) -> i32 {
  3918. // pmovmskb reg, xmm
  3919. let x = read_xmm128s(r1);
  3920. let mut result = 0;
  3921. for i in 0..16 {
  3922. result |= x.u8[i] as i32 >> 7 << i
  3923. }
  3924. result
  3925. }
  3926. pub unsafe fn instr_660FD7_reg(r1: i32, r2: i32) { write_reg32(r2, instr_660FD7(r1)) }
  3927. #[no_mangle]
  3928. pub unsafe fn instr_0FD8(source: u64, r: i32) {
  3929. // psubusb mm, mm/m64
  3930. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  3931. let source: [u8; 8] = std::mem::transmute(source);
  3932. let mut result = [0; 8];
  3933. for i in 0..8 {
  3934. result[i] = saturate_sd_to_ub(destination[i] as i32 - source[i] as i32) as u8;
  3935. }
  3936. write_mmx_reg64(r, std::mem::transmute(result));
  3937. transition_fpu_to_mmx();
  3938. }
  3939. pub unsafe fn instr_0FD8_reg(r1: i32, r2: i32) { instr_0FD8(read_mmx64s(r1), r2); }
  3940. pub unsafe fn instr_0FD8_mem(addr: i32, r: i32) {
  3941. instr_0FD8(return_on_pagefault!(safe_read64s(addr)), r);
  3942. }
  3943. #[no_mangle]
  3944. pub unsafe fn instr_660FD8(source: reg128, r: i32) {
  3945. // psubusb xmm, xmm/m128
  3946. let destination = read_xmm128s(r);
  3947. let mut result = reg128 { i8: [0; 16] };
  3948. for i in 0..16 {
  3949. result.u8[i] = saturate_sd_to_ub(destination.u8[i] as i32 - source.u8[i] as i32) as u8;
  3950. }
  3951. write_xmm_reg128(r, result);
  3952. }
  3953. pub unsafe fn instr_660FD8_reg(r1: i32, r2: i32) { instr_660FD8(read_xmm128s(r1), r2); }
  3954. pub unsafe fn instr_660FD8_mem(addr: i32, r: i32) {
  3955. instr_660FD8(return_on_pagefault!(safe_read128s(addr)), r);
  3956. }
  3957. #[no_mangle]
  3958. pub unsafe fn instr_0FD9(source: u64, r: i32) {
  3959. // psubusw mm, mm/m64
  3960. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  3961. let source: [u16; 4] = std::mem::transmute(source);
  3962. let mut result = [0; 4];
  3963. for i in 0..4 {
  3964. result[i] = saturate_uw(destination[i] as u32 - source[i] as u32)
  3965. }
  3966. write_mmx_reg64(r, std::mem::transmute(result));
  3967. transition_fpu_to_mmx();
  3968. }
  3969. pub unsafe fn instr_0FD9_reg(r1: i32, r2: i32) { instr_0FD9(read_mmx64s(r1), r2); }
  3970. pub unsafe fn instr_0FD9_mem(addr: i32, r: i32) {
  3971. instr_0FD9(return_on_pagefault!(safe_read64s(addr)), r);
  3972. }
  3973. #[no_mangle]
  3974. pub unsafe fn instr_660FD9(source: reg128, r: i32) {
  3975. // psubusw xmm, xmm/m128
  3976. let destination = read_xmm128s(r);
  3977. let mut result = reg128 { i8: [0; 16] };
  3978. for i in 0..8 {
  3979. result.u16[i] = saturate_uw(destination.u16[i] as u32 - source.u16[i] as u32)
  3980. }
  3981. write_xmm_reg128(r, result);
  3982. }
  3983. pub unsafe fn instr_660FD9_reg(r1: i32, r2: i32) { instr_660FD9(read_xmm128s(r1), r2); }
  3984. pub unsafe fn instr_660FD9_mem(addr: i32, r: i32) {
  3985. instr_660FD9(return_on_pagefault!(safe_read128s(addr)), r);
  3986. }
  3987. #[no_mangle]
  3988. pub unsafe fn instr_0FDA(source: u64, r: i32) {
  3989. // pminub mm, mm/m64
  3990. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  3991. let source: [u8; 8] = std::mem::transmute(source);
  3992. let mut result = [0; 8];
  3993. for i in 0..8 {
  3994. result[i] = u8::min(source[i], destination[i])
  3995. }
  3996. write_mmx_reg64(r, std::mem::transmute(result));
  3997. transition_fpu_to_mmx();
  3998. }
  3999. pub unsafe fn instr_0FDA_reg(r1: i32, r2: i32) { instr_0FDA(read_mmx64s(r1), r2); }
  4000. pub unsafe fn instr_0FDA_mem(addr: i32, r: i32) {
  4001. instr_0FDA(return_on_pagefault!(safe_read64s(addr)), r);
  4002. }
  4003. #[no_mangle]
  4004. pub unsafe fn instr_660FDA(source: reg128, r: i32) {
  4005. // pminub xmm, xmm/m128
  4006. // XXX: Aligned access or #gp
  4007. let destination = read_xmm128s(r);
  4008. let mut result = reg128 { u8: [0; 16] };
  4009. for i in 0..16 {
  4010. result.u8[i] = u8::min(source.u8[i], destination.u8[i]);
  4011. }
  4012. write_xmm_reg128(r, result);
  4013. }
  4014. pub unsafe fn instr_660FDA_reg(r1: i32, r2: i32) { instr_660FDA(read_xmm128s(r1), r2); }
  4015. pub unsafe fn instr_660FDA_mem(addr: i32, r: i32) {
  4016. instr_660FDA(return_on_pagefault!(safe_read128s(addr)), r);
  4017. }
  4018. #[no_mangle]
  4019. pub unsafe fn instr_0FDB(source: u64, r: i32) {
  4020. // pand mm, mm/m64
  4021. let destination = read_mmx64s(r);
  4022. write_mmx_reg64(r, source & destination);
  4023. transition_fpu_to_mmx();
  4024. }
  4025. pub unsafe fn instr_0FDB_reg(r1: i32, r2: i32) { instr_0FDB(read_mmx64s(r1), r2); }
  4026. pub unsafe fn instr_0FDB_mem(addr: i32, r: i32) {
  4027. instr_0FDB(return_on_pagefault!(safe_read64s(addr)), r);
  4028. }
  4029. #[no_mangle]
  4030. pub unsafe fn instr_660FDB(source: reg128, r: i32) {
  4031. // pand xmm, xmm/m128
  4032. // XXX: Aligned access or #gp
  4033. pand_r128(source, r);
  4034. }
  4035. pub unsafe fn instr_660FDB_reg(r1: i32, r2: i32) { instr_660FDB(read_xmm128s(r1), r2); }
  4036. pub unsafe fn instr_660FDB_mem(addr: i32, r: i32) {
  4037. instr_660FDB(return_on_pagefault!(safe_read128s(addr)), r);
  4038. }
  4039. #[no_mangle]
  4040. pub unsafe fn instr_0FDC(source: u64, r: i32) {
  4041. // paddusb mm, mm/m64
  4042. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  4043. let source: [u8; 8] = std::mem::transmute(source);
  4044. let mut result = [0; 8];
  4045. for i in 0..8 {
  4046. result[i] = saturate_ud_to_ub(destination[i] as u32 + source[i] as u32);
  4047. }
  4048. write_mmx_reg64(r, std::mem::transmute(result));
  4049. transition_fpu_to_mmx();
  4050. }
  4051. pub unsafe fn instr_0FDC_reg(r1: i32, r2: i32) { instr_0FDC(read_mmx64s(r1), r2); }
  4052. pub unsafe fn instr_0FDC_mem(addr: i32, r: i32) {
  4053. instr_0FDC(return_on_pagefault!(safe_read64s(addr)), r);
  4054. }
  4055. #[no_mangle]
  4056. pub unsafe fn instr_660FDC(source: reg128, r: i32) {
  4057. // paddusb xmm, xmm/m128
  4058. // XXX: Aligned access or #gp
  4059. let destination = read_xmm128s(r);
  4060. let mut result = reg128 { i8: [0; 16] };
  4061. for i in 0..16 {
  4062. result.u8[i] = saturate_ud_to_ub(source.u8[i] as u32 + destination.u8[i] as u32);
  4063. }
  4064. write_xmm_reg128(r, result);
  4065. }
  4066. pub unsafe fn instr_660FDC_reg(r1: i32, r2: i32) { instr_660FDC(read_xmm128s(r1), r2); }
  4067. pub unsafe fn instr_660FDC_mem(addr: i32, r: i32) {
  4068. instr_660FDC(return_on_pagefault!(safe_read128s(addr)), r);
  4069. }
  4070. #[no_mangle]
  4071. pub unsafe fn instr_0FDD(source: u64, r: i32) {
  4072. // paddusw mm, mm/m64
  4073. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  4074. let source: [u16; 4] = std::mem::transmute(source);
  4075. let mut result = [0; 4];
  4076. for i in 0..4 {
  4077. result[i] = saturate_uw(destination[i] as u32 + source[i] as u32)
  4078. }
  4079. write_mmx_reg64(r, std::mem::transmute(result));
  4080. transition_fpu_to_mmx();
  4081. }
  4082. pub unsafe fn instr_0FDD_reg(r1: i32, r2: i32) { instr_0FDD(read_mmx64s(r1), r2); }
  4083. pub unsafe fn instr_0FDD_mem(addr: i32, r: i32) {
  4084. instr_0FDD(return_on_pagefault!(safe_read64s(addr)), r);
  4085. }
  4086. #[no_mangle]
  4087. pub unsafe fn instr_660FDD(source: reg128, r: i32) {
  4088. // paddusw xmm, xmm/m128
  4089. // XXX: Aligned access or #gp
  4090. let destination = read_xmm128s(r);
  4091. let mut result = reg128 { i8: [0; 16] };
  4092. for i in 0..8 {
  4093. result.u16[i] = saturate_uw(source.u16[i] as u32 + destination.u16[i] as u32)
  4094. }
  4095. write_xmm_reg128(r, result);
  4096. }
  4097. pub unsafe fn instr_660FDD_reg(r1: i32, r2: i32) { instr_660FDD(read_xmm128s(r1), r2); }
  4098. pub unsafe fn instr_660FDD_mem(addr: i32, r: i32) {
  4099. instr_660FDD(return_on_pagefault!(safe_read128s(addr)), r);
  4100. }
  4101. #[no_mangle]
  4102. pub unsafe fn instr_0FDE(source: u64, r: i32) {
  4103. // pmaxub mm, mm/m64
  4104. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  4105. let source: [u8; 8] = std::mem::transmute(source);
  4106. let mut result = [0; 8];
  4107. for i in 0..8 {
  4108. result[i] = u8::max(source[i], destination[i])
  4109. }
  4110. write_mmx_reg64(r, std::mem::transmute(result));
  4111. transition_fpu_to_mmx();
  4112. }
  4113. pub unsafe fn instr_0FDE_reg(r1: i32, r2: i32) { instr_0FDE(read_mmx64s(r1), r2); }
  4114. pub unsafe fn instr_0FDE_mem(addr: i32, r: i32) {
  4115. instr_0FDE(return_on_pagefault!(safe_read64s(addr)), r);
  4116. }
  4117. #[no_mangle]
  4118. pub unsafe fn instr_660FDE(source: reg128, r: i32) {
  4119. // pmaxub xmm, xmm/m128
  4120. // XXX: Aligned access or #gp
  4121. let destination = read_xmm128s(r);
  4122. let mut result = reg128 { i8: [0; 16] };
  4123. for i in 0..16 {
  4124. result.u8[i] = u8::max(source.u8[i], destination.u8[i]);
  4125. }
  4126. write_xmm_reg128(r, result);
  4127. }
  4128. pub unsafe fn instr_660FDE_reg(r1: i32, r2: i32) { instr_660FDE(read_xmm128s(r1), r2); }
  4129. pub unsafe fn instr_660FDE_mem(addr: i32, r: i32) {
  4130. instr_660FDE(return_on_pagefault!(safe_read128s(addr)), r);
  4131. }
  4132. #[no_mangle]
  4133. pub unsafe fn instr_0FDF(source: u64, r: i32) {
  4134. // pandn mm, mm/m64
  4135. let destination = read_mmx64s(r);
  4136. write_mmx_reg64(r, source & !destination);
  4137. transition_fpu_to_mmx();
  4138. }
  4139. pub unsafe fn instr_0FDF_reg(r1: i32, r2: i32) { instr_0FDF(read_mmx64s(r1), r2); }
  4140. pub unsafe fn instr_0FDF_mem(addr: i32, r: i32) {
  4141. instr_0FDF(return_on_pagefault!(safe_read64s(addr)), r);
  4142. }
  4143. #[no_mangle]
  4144. pub unsafe fn instr_660FDF(source: reg128, r: i32) {
  4145. // pandn xmm, xmm/m128
  4146. // XXX: Aligned access or #gp
  4147. pandn_r128(source, r);
  4148. }
  4149. pub unsafe fn instr_660FDF_reg(r1: i32, r2: i32) { instr_660FDF(read_xmm128s(r1), r2); }
  4150. pub unsafe fn instr_660FDF_mem(addr: i32, r: i32) {
  4151. instr_660FDF(return_on_pagefault!(safe_read128s(addr)), r);
  4152. }
  4153. #[no_mangle]
  4154. pub unsafe fn instr_0FE0(source: u64, r: i32) {
  4155. // pavgb mm, mm/m64
  4156. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  4157. let source: [u8; 8] = std::mem::transmute(source);
  4158. let mut result = [0; 8];
  4159. for i in 0..8 {
  4160. result[i] = (destination[i] as i32 + source[i] as i32 + 1 >> 1) as u8;
  4161. }
  4162. write_mmx_reg64(r, std::mem::transmute(result));
  4163. transition_fpu_to_mmx();
  4164. }
  4165. pub unsafe fn instr_0FE0_reg(r1: i32, r2: i32) { instr_0FE0(read_mmx64s(r1), r2); }
  4166. pub unsafe fn instr_0FE0_mem(addr: i32, r: i32) {
  4167. instr_0FE0(return_on_pagefault!(safe_read64s(addr)), r);
  4168. }
  4169. #[no_mangle]
  4170. pub unsafe fn instr_660FE0(source: reg128, r: i32) {
  4171. // pavgb xmm, xmm/m128
  4172. // XXX: Aligned access or #gp
  4173. let destination = read_xmm128s(r);
  4174. let mut result = reg128 { i8: [0; 16] };
  4175. for i in 0..16 {
  4176. result.u8[i] = (destination.u8[i] as i32 + source.u8[i] as i32 + 1 >> 1) as u8;
  4177. }
  4178. write_xmm_reg128(r, result);
  4179. }
  4180. pub unsafe fn instr_660FE0_reg(r1: i32, r2: i32) { instr_660FE0(read_xmm128s(r1), r2); }
  4181. pub unsafe fn instr_660FE0_mem(addr: i32, r: i32) {
  4182. instr_660FE0(return_on_pagefault!(safe_read128s(addr)), r);
  4183. }
  4184. #[no_mangle]
  4185. pub unsafe fn instr_0FE1(source: u64, r: i32) {
  4186. // psraw mm, mm/m64
  4187. psraw_r64(r, source);
  4188. }
  4189. pub unsafe fn instr_0FE1_reg(r1: i32, r2: i32) { instr_0FE1(read_mmx64s(r1), r2); }
  4190. pub unsafe fn instr_0FE1_mem(addr: i32, r: i32) {
  4191. instr_0FE1(return_on_pagefault!(safe_read64s(addr)), r);
  4192. }
  4193. #[no_mangle]
  4194. pub unsafe fn instr_660FE1(source: reg128, r: i32) {
  4195. // psraw xmm, xmm/m128
  4196. // XXX: Aligned access or #gp
  4197. psraw_r128(r, source.u64[0]);
  4198. }
  4199. pub unsafe fn instr_660FE1_reg(r1: i32, r2: i32) { instr_660FE1(read_xmm128s(r1), r2); }
  4200. pub unsafe fn instr_660FE1_mem(addr: i32, r: i32) {
  4201. instr_660FE1(return_on_pagefault!(safe_read128s(addr)), r);
  4202. }
  4203. #[no_mangle]
  4204. pub unsafe fn instr_0FE2(source: u64, r: i32) {
  4205. // psrad mm, mm/m64
  4206. psrad_r64(r, source);
  4207. }
  4208. pub unsafe fn instr_0FE2_reg(r1: i32, r2: i32) { instr_0FE2(read_mmx64s(r1), r2); }
  4209. pub unsafe fn instr_0FE2_mem(addr: i32, r: i32) {
  4210. instr_0FE2(return_on_pagefault!(safe_read64s(addr)), r);
  4211. }
  4212. #[no_mangle]
  4213. pub unsafe fn instr_660FE2(source: reg128, r: i32) {
  4214. // psrad xmm, xmm/m128
  4215. // XXX: Aligned access or #gp
  4216. psrad_r128(r, source.u64[0]);
  4217. }
  4218. pub unsafe fn instr_660FE2_reg(r1: i32, r2: i32) { instr_660FE2(read_xmm128s(r1), r2); }
  4219. pub unsafe fn instr_660FE2_mem(addr: i32, r: i32) {
  4220. instr_660FE2(return_on_pagefault!(safe_read128s(addr)), r);
  4221. }
  4222. #[no_mangle]
  4223. pub unsafe fn instr_0FE3(source: u64, r: i32) {
  4224. // pavgw mm, mm/m64
  4225. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  4226. let source: [u16; 4] = std::mem::transmute(source);
  4227. let mut result = [0; 4];
  4228. for i in 0..4 {
  4229. result[i] = (destination[i] as i32 + source[i] as i32 + 1 >> 1) as u16
  4230. }
  4231. write_mmx_reg64(r, std::mem::transmute(result));
  4232. transition_fpu_to_mmx();
  4233. }
  4234. pub unsafe fn instr_0FE3_reg(r1: i32, r2: i32) { instr_0FE3(read_mmx64s(r1), r2); }
  4235. pub unsafe fn instr_0FE3_mem(addr: i32, r: i32) {
  4236. instr_0FE3(return_on_pagefault!(safe_read64s(addr)), r);
  4237. }
  4238. #[no_mangle]
  4239. pub unsafe fn instr_660FE3(source: reg128, r: i32) {
  4240. // pavgw xmm, xmm/m128
  4241. // XXX: Aligned access or #gp
  4242. let mut destination = read_xmm128s(r);
  4243. for i in 0..8 {
  4244. destination.u16[i] = (destination.u16[i] as i32 + source.u16[i] as i32 + 1 >> 1) as u16;
  4245. }
  4246. write_xmm_reg128(r, destination);
  4247. }
  4248. pub unsafe fn instr_660FE3_reg(r1: i32, r2: i32) { instr_660FE3(read_xmm128s(r1), r2); }
  4249. pub unsafe fn instr_660FE3_mem(addr: i32, r: i32) {
  4250. instr_660FE3(return_on_pagefault!(safe_read128s(addr)), r);
  4251. }
  4252. #[no_mangle]
  4253. pub unsafe fn instr_0FE4(source: u64, r: i32) {
  4254. // pmulhuw mm, mm/m64
  4255. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  4256. let source: [u16; 4] = std::mem::transmute(source);
  4257. let mut result = [0; 4];
  4258. for i in 0..4 {
  4259. result[i] = ((destination[i] as i32 * source[i] as i32) >> 16) as u16
  4260. }
  4261. write_mmx_reg64(r, std::mem::transmute(result));
  4262. transition_fpu_to_mmx();
  4263. }
  4264. pub unsafe fn instr_0FE4_reg(r1: i32, r2: i32) { instr_0FE4(read_mmx64s(r1), r2); }
  4265. pub unsafe fn instr_0FE4_mem(addr: i32, r: i32) {
  4266. instr_0FE4(return_on_pagefault!(safe_read64s(addr)), r);
  4267. }
  4268. #[no_mangle]
  4269. pub unsafe fn instr_660FE4(source: reg128, r: i32) {
  4270. // pmulhuw xmm, xmm/m128
  4271. // XXX: Aligned access or #gp
  4272. let destination = read_xmm128s(r);
  4273. let mut result = reg128 { i8: [0; 16] };
  4274. for i in 0..8 {
  4275. result.u16[i] = (source.u16[i] as i32 * destination.u16[i] as i32 >> 16) as u16;
  4276. }
  4277. write_xmm_reg128(r, result);
  4278. }
  4279. pub unsafe fn instr_660FE4_reg(r1: i32, r2: i32) { instr_660FE4(read_xmm128s(r1), r2); }
  4280. pub unsafe fn instr_660FE4_mem(addr: i32, r: i32) {
  4281. instr_660FE4(return_on_pagefault!(safe_read128s(addr)), r);
  4282. }
  4283. #[no_mangle]
  4284. pub unsafe fn instr_0FE5(source: u64, r: i32) {
  4285. // pmulhw mm, mm/m64
  4286. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  4287. let source: [i16; 4] = std::mem::transmute(source);
  4288. let mut result = [0; 4];
  4289. for i in 0..4 {
  4290. result[i] = ((destination[i] as i32 * source[i] as i32) >> 16) as i16
  4291. }
  4292. write_mmx_reg64(r, std::mem::transmute(result));
  4293. transition_fpu_to_mmx();
  4294. }
  4295. pub unsafe fn instr_0FE5_reg(r1: i32, r2: i32) { instr_0FE5(read_mmx64s(r1), r2); }
  4296. pub unsafe fn instr_0FE5_mem(addr: i32, r: i32) {
  4297. instr_0FE5(return_on_pagefault!(safe_read64s(addr)), r);
  4298. }
  4299. #[no_mangle]
  4300. pub unsafe fn instr_660FE5(source: reg128, r: i32) {
  4301. // pmulhw xmm, xmm/m128
  4302. // XXX: Aligned access or #gp
  4303. let destination = read_xmm128s(r);
  4304. let mut result = reg128 { i8: [0; 16] };
  4305. for i in 0..8 {
  4306. result.u16[i] = (destination.i16[i] as i32 * source.i16[i] as i32 >> 16) as u16
  4307. }
  4308. write_xmm_reg128(r, result);
  4309. }
  4310. pub unsafe fn instr_660FE5_reg(r1: i32, r2: i32) { instr_660FE5(read_xmm128s(r1), r2); }
  4311. pub unsafe fn instr_660FE5_mem(addr: i32, r: i32) {
  4312. instr_660FE5(return_on_pagefault!(safe_read128s(addr)), r);
  4313. }
  4314. #[no_mangle]
  4315. pub unsafe fn instr_0FE6_mem(_addr: i32, _r: i32) { trigger_ud(); }
  4316. #[no_mangle]
  4317. pub unsafe fn instr_0FE6_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  4318. #[no_mangle]
  4319. pub unsafe fn instr_660FE6(source: reg128, r: i32) {
  4320. // cvttpd2dq xmm1, xmm2/m128
  4321. let result = reg128 {
  4322. i32: [
  4323. sse_convert_with_truncation_f64_to_i32(source.f64[0]),
  4324. sse_convert_with_truncation_f64_to_i32(source.f64[1]),
  4325. 0,
  4326. 0,
  4327. ],
  4328. };
  4329. write_xmm_reg128(r, result);
  4330. }
  4331. pub unsafe fn instr_660FE6_mem(addr: i32, r: i32) {
  4332. instr_660FE6(return_on_pagefault!(safe_read128s(addr)), r);
  4333. }
  4334. pub unsafe fn instr_660FE6_reg(r1: i32, r2: i32) { instr_660FE6(read_xmm128s(r1), r2); }
  4335. #[no_mangle]
  4336. pub unsafe fn instr_F20FE6(source: reg128, r: i32) {
  4337. // cvtpd2dq xmm1, xmm2/m128
  4338. let result = reg128 {
  4339. i32: [
  4340. // XXX: Precision exception
  4341. sse_convert_f64_to_i32(source.f64[0]),
  4342. sse_convert_f64_to_i32(source.f64[1]),
  4343. 0,
  4344. 0,
  4345. ],
  4346. };
  4347. write_xmm_reg128(r, result);
  4348. }
  4349. pub unsafe fn instr_F20FE6_mem(addr: i32, r: i32) {
  4350. instr_F20FE6(return_on_pagefault!(safe_read128s(addr)), r);
  4351. }
  4352. pub unsafe fn instr_F20FE6_reg(r1: i32, r2: i32) { instr_F20FE6(read_xmm128s(r1), r2); }
  4353. #[no_mangle]
  4354. pub unsafe fn instr_F30FE6(source: u64, r: i32) {
  4355. // cvtdq2pd xmm1, xmm2/m64
  4356. let result = reg128 {
  4357. f64: [
  4358. // Note: Conversion never fails (i32 fits into f64)
  4359. source as i32 as f64,
  4360. (source >> 32) as i32 as f64,
  4361. ],
  4362. };
  4363. write_xmm_reg128(r, result);
  4364. }
  4365. pub unsafe fn instr_F30FE6_mem(addr: i32, r: i32) {
  4366. instr_F30FE6(return_on_pagefault!(safe_read64s(addr)), r);
  4367. }
  4368. pub unsafe fn instr_F30FE6_reg(r1: i32, r2: i32) { instr_F30FE6(read_xmm64s(r1), r2); }
  4369. #[no_mangle]
  4370. pub unsafe fn instr_0FE7_mem(addr: i32, r: i32) {
  4371. // movntq m64, mm
  4372. mov_r_m64(addr, r);
  4373. }
  4374. #[no_mangle]
  4375. pub unsafe fn instr_0FE7_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  4376. pub unsafe fn instr_660FE7_reg(_r1: i32, _r2: i32) { trigger_ud(); }
  4377. pub unsafe fn instr_660FE7_mem(addr: i32, r: i32) {
  4378. // movntdq m128, xmm
  4379. mov_r_m128(addr, r);
  4380. }
  4381. #[no_mangle]
  4382. pub unsafe fn instr_0FE8(source: u64, r: i32) {
  4383. // psubsb mm, mm/m64
  4384. let destination: [i8; 8] = std::mem::transmute(read_mmx64s(r));
  4385. let source: [i8; 8] = std::mem::transmute(source);
  4386. let mut result = [0; 8];
  4387. for i in 0..8 {
  4388. result[i] = saturate_sd_to_sb(destination[i] as u32 - source[i] as u32);
  4389. }
  4390. write_mmx_reg64(r, std::mem::transmute(result));
  4391. transition_fpu_to_mmx();
  4392. }
  4393. pub unsafe fn instr_0FE8_reg(r1: i32, r2: i32) { instr_0FE8(read_mmx64s(r1), r2); }
  4394. pub unsafe fn instr_0FE8_mem(addr: i32, r: i32) {
  4395. instr_0FE8(return_on_pagefault!(safe_read64s(addr)), r);
  4396. }
  4397. #[no_mangle]
  4398. pub unsafe fn instr_660FE8(source: reg128, r: i32) {
  4399. // psubsb xmm, xmm/m128
  4400. // XXX: Aligned access or #gp
  4401. let destination = read_xmm128s(r);
  4402. let mut result = reg128 { i8: [0; 16] };
  4403. for i in 0..16 {
  4404. result.i8[i] = saturate_sd_to_sb(destination.i8[i] as u32 - source.i8[i] as u32);
  4405. }
  4406. write_xmm_reg128(r, result);
  4407. }
  4408. pub unsafe fn instr_660FE8_reg(r1: i32, r2: i32) { instr_660FE8(read_xmm128s(r1), r2); }
  4409. pub unsafe fn instr_660FE8_mem(addr: i32, r: i32) {
  4410. instr_660FE8(return_on_pagefault!(safe_read128s(addr)), r);
  4411. }
  4412. #[no_mangle]
  4413. pub unsafe fn instr_0FE9(source: u64, r: i32) {
  4414. // psubsw mm, mm/m64
  4415. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  4416. let source: [i16; 4] = std::mem::transmute(source);
  4417. let mut result = [0; 4];
  4418. for i in 0..4 {
  4419. result[i] = saturate_sd_to_sw(destination[i] as u32 - source[i] as u32)
  4420. }
  4421. write_mmx_reg64(r, std::mem::transmute(result));
  4422. transition_fpu_to_mmx();
  4423. }
  4424. pub unsafe fn instr_0FE9_reg(r1: i32, r2: i32) { instr_0FE9(read_mmx64s(r1), r2); }
  4425. pub unsafe fn instr_0FE9_mem(addr: i32, r: i32) {
  4426. instr_0FE9(return_on_pagefault!(safe_read64s(addr)), r);
  4427. }
  4428. #[no_mangle]
  4429. pub unsafe fn instr_660FE9(source: reg128, r: i32) {
  4430. // psubsw xmm, xmm/m128
  4431. // XXX: Aligned access or #gp
  4432. let destination = read_xmm128s(r);
  4433. let mut result = reg128 { i8: [0; 16] };
  4434. for i in 0..8 {
  4435. result.u16[i] = saturate_sd_to_sw(destination.i16[i] as u32 - source.i16[i] as u32)
  4436. }
  4437. write_xmm_reg128(r, result);
  4438. }
  4439. pub unsafe fn instr_660FE9_reg(r1: i32, r2: i32) { instr_660FE9(read_xmm128s(r1), r2); }
  4440. pub unsafe fn instr_660FE9_mem(addr: i32, r: i32) {
  4441. instr_660FE9(return_on_pagefault!(safe_read128s(addr)), r);
  4442. }
  4443. #[no_mangle]
  4444. pub unsafe fn instr_0FEA(source: u64, r: i32) {
  4445. // pminsw mm, mm/m64
  4446. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  4447. let source: [i16; 4] = std::mem::transmute(source);
  4448. let mut result = [0; 4];
  4449. for i in 0..4 {
  4450. result[i] = i16::min(destination[i], source[i])
  4451. }
  4452. write_mmx_reg64(r, std::mem::transmute(result));
  4453. transition_fpu_to_mmx();
  4454. }
  4455. pub unsafe fn instr_0FEA_reg(r1: i32, r2: i32) { instr_0FEA(read_mmx64s(r1), r2); }
  4456. pub unsafe fn instr_0FEA_mem(addr: i32, r: i32) {
  4457. instr_0FEA(return_on_pagefault!(safe_read64s(addr)), r);
  4458. }
  4459. #[no_mangle]
  4460. pub unsafe fn instr_660FEA(source: reg128, r: i32) {
  4461. // pminsw xmm, xmm/m128
  4462. // XXX: Aligned access or #gp
  4463. let destination = read_xmm128s(r);
  4464. let mut result = reg128 { i8: [0; 16] };
  4465. for i in 0..8 {
  4466. result.i16[i] = i16::min(destination.i16[i], source.i16[i])
  4467. }
  4468. write_xmm_reg128(r, result);
  4469. }
  4470. pub unsafe fn instr_660FEA_reg(r1: i32, r2: i32) { instr_660FEA(read_xmm128s(r1), r2); }
  4471. pub unsafe fn instr_660FEA_mem(addr: i32, r: i32) {
  4472. instr_660FEA(return_on_pagefault!(safe_read128s(addr)), r);
  4473. }
  4474. #[no_mangle]
  4475. pub unsafe fn instr_0FEB(source: u64, r: i32) {
  4476. // por mm, mm/m64
  4477. let destination = read_mmx64s(r);
  4478. write_mmx_reg64(r, source | destination);
  4479. transition_fpu_to_mmx();
  4480. }
  4481. pub unsafe fn instr_0FEB_reg(r1: i32, r2: i32) { instr_0FEB(read_mmx64s(r1), r2); }
  4482. pub unsafe fn instr_0FEB_mem(addr: i32, r: i32) {
  4483. instr_0FEB(return_on_pagefault!(safe_read64s(addr)), r);
  4484. }
  4485. #[no_mangle]
  4486. pub unsafe fn instr_660FEB(source: reg128, r: i32) {
  4487. // por xmm, xmm/m128
  4488. // XXX: Aligned access or #gp
  4489. por_r128(source, r);
  4490. }
  4491. pub unsafe fn instr_660FEB_reg(r1: i32, r2: i32) { instr_660FEB(read_xmm128s(r1), r2); }
  4492. pub unsafe fn instr_660FEB_mem(addr: i32, r: i32) {
  4493. instr_660FEB(return_on_pagefault!(safe_read128s(addr)), r);
  4494. }
  4495. #[no_mangle]
  4496. pub unsafe fn instr_0FEC(source: u64, r: i32) {
  4497. // paddsb mm, mm/m64
  4498. let destination: [i8; 8] = std::mem::transmute(read_mmx64s(r));
  4499. let source: [i8; 8] = std::mem::transmute(source);
  4500. let mut result = [0; 8];
  4501. for i in 0..8 {
  4502. result[i] = saturate_sd_to_sb(destination[i] as u32 + source[i] as u32);
  4503. }
  4504. write_mmx_reg64(r, std::mem::transmute(result));
  4505. transition_fpu_to_mmx();
  4506. }
  4507. pub unsafe fn instr_0FEC_reg(r1: i32, r2: i32) { instr_0FEC(read_mmx64s(r1), r2); }
  4508. pub unsafe fn instr_0FEC_mem(addr: i32, r: i32) {
  4509. instr_0FEC(return_on_pagefault!(safe_read64s(addr)), r);
  4510. }
  4511. #[no_mangle]
  4512. pub unsafe fn instr_660FEC(source: reg128, r: i32) {
  4513. // paddsb xmm, xmm/m128
  4514. // XXX: Aligned access or #gp
  4515. let destination = read_xmm128s(r);
  4516. let mut result = reg128 { i8: [0; 16] };
  4517. for i in 0..16 {
  4518. result.i8[i] = saturate_sd_to_sb(destination.i8[i] as u32 + source.i8[i] as u32);
  4519. }
  4520. write_xmm_reg128(r, result);
  4521. }
  4522. pub unsafe fn instr_660FEC_reg(r1: i32, r2: i32) { instr_660FEC(read_xmm128s(r1), r2); }
  4523. pub unsafe fn instr_660FEC_mem(addr: i32, r: i32) {
  4524. instr_660FEC(return_on_pagefault!(safe_read128s(addr)), r);
  4525. }
  4526. #[no_mangle]
  4527. pub unsafe fn instr_0FED(source: u64, r: i32) {
  4528. // paddsw mm, mm/m64
  4529. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  4530. let source: [i16; 4] = std::mem::transmute(source);
  4531. let mut result = [0; 4];
  4532. for i in 0..4 {
  4533. result[i] = saturate_sd_to_sw(destination[i] as u32 + source[i] as u32)
  4534. }
  4535. write_mmx_reg64(r, std::mem::transmute(result));
  4536. transition_fpu_to_mmx();
  4537. }
  4538. pub unsafe fn instr_0FED_reg(r1: i32, r2: i32) { instr_0FED(read_mmx64s(r1), r2); }
  4539. pub unsafe fn instr_0FED_mem(addr: i32, r: i32) {
  4540. instr_0FED(return_on_pagefault!(safe_read64s(addr)), r);
  4541. }
  4542. #[no_mangle]
  4543. pub unsafe fn instr_660FED(source: reg128, r: i32) {
  4544. // paddsw xmm, xmm/m128
  4545. // XXX: Aligned access or #gp
  4546. let destination = read_xmm128s(r);
  4547. let mut result = reg128 { i8: [0; 16] };
  4548. for i in 0..8 {
  4549. result.u16[i] = saturate_sd_to_sw(destination.i16[i] as u32 + source.i16[i] as u32)
  4550. }
  4551. write_xmm_reg128(r, result);
  4552. }
  4553. pub unsafe fn instr_660FED_reg(r1: i32, r2: i32) { instr_660FED(read_xmm128s(r1), r2); }
  4554. pub unsafe fn instr_660FED_mem(addr: i32, r: i32) {
  4555. instr_660FED(return_on_pagefault!(safe_read128s(addr)), r);
  4556. }
  4557. #[no_mangle]
  4558. pub unsafe fn instr_0FEE(source: u64, r: i32) {
  4559. // pmaxsw mm, mm/m64
  4560. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  4561. let source: [i16; 4] = std::mem::transmute(source);
  4562. let mut result = [0; 4];
  4563. for i in 0..4 {
  4564. result[i] = i16::max(destination[i], source[i])
  4565. }
  4566. write_mmx_reg64(r, std::mem::transmute(result));
  4567. transition_fpu_to_mmx();
  4568. }
  4569. pub unsafe fn instr_0FEE_reg(r1: i32, r2: i32) { instr_0FEE(read_mmx64s(r1), r2); }
  4570. pub unsafe fn instr_0FEE_mem(addr: i32, r: i32) {
  4571. instr_0FEE(return_on_pagefault!(safe_read64s(addr)), r);
  4572. }
  4573. #[no_mangle]
  4574. pub unsafe fn instr_660FEE(source: reg128, r: i32) {
  4575. // pmaxsw xmm, xmm/m128
  4576. // XXX: Aligned access or #gp
  4577. let destination = read_xmm128s(r);
  4578. let mut result = reg128 { i8: [0; 16] };
  4579. for i in 0..8 {
  4580. result.i16[i] = i16::max(destination.i16[i], source.i16[i])
  4581. }
  4582. write_xmm_reg128(r, result);
  4583. }
  4584. pub unsafe fn instr_660FEE_reg(r1: i32, r2: i32) { instr_660FEE(read_xmm128s(r1), r2); }
  4585. pub unsafe fn instr_660FEE_mem(addr: i32, r: i32) {
  4586. instr_660FEE(return_on_pagefault!(safe_read128s(addr)), r);
  4587. }
  4588. #[no_mangle]
  4589. pub unsafe fn instr_0FEF(source: u64, r: i32) {
  4590. // pxor mm, mm/m64
  4591. let destination = read_mmx64s(r);
  4592. write_mmx_reg64(r, source ^ destination);
  4593. transition_fpu_to_mmx();
  4594. }
  4595. pub unsafe fn instr_0FEF_reg(r1: i32, r2: i32) { instr_0FEF(read_mmx64s(r1), r2); }
  4596. pub unsafe fn instr_0FEF_mem(addr: i32, r: i32) {
  4597. instr_0FEF(return_on_pagefault!(safe_read64s(addr)), r);
  4598. }
  4599. #[no_mangle]
  4600. pub unsafe fn instr_660FEF(source: reg128, r: i32) {
  4601. // pxor xmm, xmm/m128
  4602. // XXX: Aligned access or #gp
  4603. pxor_r128(source, r);
  4604. }
  4605. pub unsafe fn instr_660FEF_reg(r1: i32, r2: i32) { instr_660FEF(read_xmm128s(r1), r2); }
  4606. pub unsafe fn instr_660FEF_mem(addr: i32, r: i32) {
  4607. instr_660FEF(return_on_pagefault!(safe_read128s(addr)), r);
  4608. }
  4609. #[no_mangle]
  4610. pub unsafe fn instr_0FF0() { unimplemented_sse(); }
  4611. #[no_mangle]
  4612. pub unsafe fn instr_0FF1(source: u64, r: i32) {
  4613. // psllw mm, mm/m64
  4614. psllw_r64(r, source);
  4615. }
  4616. pub unsafe fn instr_0FF1_reg(r1: i32, r2: i32) { instr_0FF1(read_mmx64s(r1), r2); }
  4617. pub unsafe fn instr_0FF1_mem(addr: i32, r: i32) {
  4618. instr_0FF1(return_on_pagefault!(safe_read64s(addr)), r);
  4619. }
  4620. #[no_mangle]
  4621. pub unsafe fn instr_660FF1(source: reg128, r: i32) {
  4622. // psllw xmm, xmm/m128
  4623. // XXX: Aligned access or #gp
  4624. psllw_r128(r, source.u64[0]);
  4625. }
  4626. pub unsafe fn instr_660FF1_reg(r1: i32, r2: i32) { instr_660FF1(read_xmm128s(r1), r2); }
  4627. pub unsafe fn instr_660FF1_mem(addr: i32, r: i32) {
  4628. instr_660FF1(return_on_pagefault!(safe_read128s(addr)), r);
  4629. }
  4630. #[no_mangle]
  4631. pub unsafe fn instr_0FF2(source: u64, r: i32) {
  4632. // pslld mm, mm/m64
  4633. pslld_r64(r, source);
  4634. }
  4635. pub unsafe fn instr_0FF2_reg(r1: i32, r2: i32) { instr_0FF2(read_mmx64s(r1), r2); }
  4636. pub unsafe fn instr_0FF2_mem(addr: i32, r: i32) {
  4637. instr_0FF2(return_on_pagefault!(safe_read64s(addr)), r);
  4638. }
  4639. #[no_mangle]
  4640. pub unsafe fn instr_660FF2(source: reg128, r: i32) {
  4641. // pslld xmm, xmm/m128
  4642. // XXX: Aligned access or #gp
  4643. pslld_r128(r, source.u64[0]);
  4644. }
  4645. pub unsafe fn instr_660FF2_reg(r1: i32, r2: i32) { instr_660FF2(read_xmm128s(r1), r2); }
  4646. pub unsafe fn instr_660FF2_mem(addr: i32, r: i32) {
  4647. instr_660FF2(return_on_pagefault!(safe_read128s(addr)), r);
  4648. }
  4649. #[no_mangle]
  4650. pub unsafe fn instr_0FF3(source: u64, r: i32) {
  4651. // psllq mm, mm/m64
  4652. psllq_r64(r, source);
  4653. }
  4654. pub unsafe fn instr_0FF3_reg(r1: i32, r2: i32) { instr_0FF3(read_mmx64s(r1), r2); }
  4655. pub unsafe fn instr_0FF3_mem(addr: i32, r: i32) {
  4656. instr_0FF3(return_on_pagefault!(safe_read64s(addr)), r);
  4657. }
  4658. #[no_mangle]
  4659. pub unsafe fn instr_660FF3(source: reg128, r: i32) {
  4660. // psllq xmm, xmm/m128
  4661. // XXX: Aligned access or #gp
  4662. psllq_r128(r, source.u64[0]);
  4663. }
  4664. pub unsafe fn instr_660FF3_reg(r1: i32, r2: i32) { instr_660FF3(read_xmm128s(r1), r2); }
  4665. pub unsafe fn instr_660FF3_mem(addr: i32, r: i32) {
  4666. instr_660FF3(return_on_pagefault!(safe_read128s(addr)), r);
  4667. }
  4668. #[no_mangle]
  4669. pub unsafe fn instr_0FF4(source: u64, r: i32) {
  4670. // pmuludq mm, mm/m64
  4671. let destination = read_mmx64s(r);
  4672. write_mmx_reg64(r, (source as u32 as u64) * (destination as u32 as u64));
  4673. transition_fpu_to_mmx();
  4674. }
  4675. pub unsafe fn instr_0FF4_reg(r1: i32, r2: i32) { instr_0FF4(read_mmx64s(r1), r2); }
  4676. pub unsafe fn instr_0FF4_mem(addr: i32, r: i32) {
  4677. instr_0FF4(return_on_pagefault!(safe_read64s(addr)), r);
  4678. }
  4679. #[no_mangle]
  4680. pub unsafe fn instr_660FF4(source: reg128, r: i32) {
  4681. // pmuludq xmm, xmm/m128
  4682. // XXX: Aligned access or #gp
  4683. let destination = read_xmm128s(r);
  4684. let mut result = reg128 { i8: [0; 16] };
  4685. result.u64[0] = source.u32[0] as u64 * destination.u32[0] as u64;
  4686. result.u64[1] = source.u32[2] as u64 * destination.u32[2] as u64;
  4687. write_xmm_reg128(r, result);
  4688. }
  4689. pub unsafe fn instr_660FF4_reg(r1: i32, r2: i32) { instr_660FF4(read_xmm128s(r1), r2); }
  4690. pub unsafe fn instr_660FF4_mem(addr: i32, r: i32) {
  4691. instr_660FF4(return_on_pagefault!(safe_read128s(addr)), r);
  4692. }
  4693. #[no_mangle]
  4694. pub unsafe fn instr_0FF5(source: u64, r: i32) {
  4695. // pmaddwd mm, mm/m64
  4696. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  4697. let source: [i16; 4] = std::mem::transmute(source);
  4698. let mul0 = destination[0] as i32 * source[0] as i32;
  4699. let mul1 = destination[1] as i32 * source[1] as i32;
  4700. let mul2 = destination[2] as i32 * source[2] as i32;
  4701. let mul3 = destination[3] as i32 * source[3] as i32;
  4702. let low = mul0 + mul1;
  4703. let high = mul2 + mul3;
  4704. write_mmx_reg64(r, low as u32 as u64 | (high as u64) << 32);
  4705. transition_fpu_to_mmx();
  4706. }
  4707. pub unsafe fn instr_0FF5_reg(r1: i32, r2: i32) { instr_0FF5(read_mmx64s(r1), r2); }
  4708. pub unsafe fn instr_0FF5_mem(addr: i32, r: i32) {
  4709. instr_0FF5(return_on_pagefault!(safe_read64s(addr)), r);
  4710. }
  4711. #[no_mangle]
  4712. pub unsafe fn instr_660FF5(source: reg128, r: i32) {
  4713. // pmaddwd xmm, xmm/m128
  4714. // XXX: Aligned access or #gp
  4715. let destination = read_xmm128s(r);
  4716. let mut result = reg128 { i8: [0; 16] };
  4717. for i in 0..4 {
  4718. result.i32[i] = destination.i16[2 * i] as i32 * source.i16[2 * i] as i32
  4719. + destination.i16[2 * i + 1] as i32 * source.i16[2 * i + 1] as i32
  4720. }
  4721. write_xmm_reg128(r, result);
  4722. }
  4723. pub unsafe fn instr_660FF5_reg(r1: i32, r2: i32) { instr_660FF5(read_xmm128s(r1), r2); }
  4724. pub unsafe fn instr_660FF5_mem(addr: i32, r: i32) {
  4725. instr_660FF5(return_on_pagefault!(safe_read128s(addr)), r);
  4726. }
  4727. #[no_mangle]
  4728. pub unsafe fn instr_0FF6(source: u64, r: i32) {
  4729. // psadbw mm, mm/m64
  4730. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  4731. let source: [u8; 8] = std::mem::transmute(source);
  4732. let mut sum = 0;
  4733. for i in 0..8 {
  4734. sum += (destination[i] as i32 - source[i] as i32).abs() as u64;
  4735. }
  4736. write_mmx_reg64(r, sum);
  4737. transition_fpu_to_mmx();
  4738. }
  4739. pub unsafe fn instr_0FF6_reg(r1: i32, r2: i32) { instr_0FF6(read_mmx64s(r1), r2); }
  4740. pub unsafe fn instr_0FF6_mem(addr: i32, r: i32) {
  4741. instr_0FF6(return_on_pagefault!(safe_read64s(addr)), r);
  4742. }
  4743. #[no_mangle]
  4744. pub unsafe fn instr_660FF6(source: reg128, r: i32) {
  4745. // psadbw xmm, xmm/m128
  4746. // XXX: Aligned access or #gp
  4747. let destination = read_xmm128s(r);
  4748. let mut sum0 = 0;
  4749. let mut sum1 = 0;
  4750. for i in 0..8 {
  4751. sum0 += (destination.u8[i + 0] as i32 - source.u8[i + 0] as i32).abs() as u32;
  4752. sum1 += (destination.u8[i + 8] as i32 - source.u8[i + 8] as i32).abs() as u32;
  4753. }
  4754. write_xmm128(r, sum0 as i32, 0, sum1 as i32, 0);
  4755. }
  4756. pub unsafe fn instr_660FF6_reg(r1: i32, r2: i32) { instr_660FF6(read_xmm128s(r1), r2); }
  4757. pub unsafe fn instr_660FF6_mem(addr: i32, r: i32) {
  4758. instr_660FF6(return_on_pagefault!(safe_read128s(addr)), r);
  4759. }
  4760. pub unsafe fn instr_0FF7_mem(_addr: i32, _r: i32) { trigger_ud(); }
  4761. #[no_mangle]
  4762. pub unsafe fn maskmovq(r1: i32, r2: i32, addr: i32) {
  4763. // maskmovq mm, mm
  4764. let source: [u8; 8] = std::mem::transmute(read_mmx64s(r2));
  4765. let mask: [u8; 8] = std::mem::transmute(read_mmx64s(r1));
  4766. match writable_or_pagefault(addr, 8) {
  4767. Ok(()) => *page_fault = false,
  4768. Err(()) => {
  4769. *page_fault = true;
  4770. return;
  4771. },
  4772. }
  4773. for i in 0..8 {
  4774. if 0 != mask[i] & 0x80 {
  4775. safe_write8(addr + i as i32, source[i] as i32).unwrap();
  4776. }
  4777. }
  4778. transition_fpu_to_mmx();
  4779. }
  4780. pub unsafe fn instr_0FF7_reg(r1: i32, r2: i32) {
  4781. maskmovq(
  4782. r1,
  4783. r2,
  4784. return_on_pagefault!(get_seg_prefix_ds(get_reg_asize(EDI))),
  4785. )
  4786. }
  4787. pub unsafe fn instr_660FF7_mem(_addr: i32, _r: i32) { trigger_ud(); }
  4788. #[no_mangle]
  4789. pub unsafe fn maskmovdqu(r1: i32, r2: i32, addr: i32) {
  4790. // maskmovdqu xmm, xmm
  4791. let source = read_xmm128s(r2);
  4792. let mask = read_xmm128s(r1);
  4793. match writable_or_pagefault(addr, 16) {
  4794. Ok(()) => *page_fault = false,
  4795. Err(()) => {
  4796. *page_fault = true;
  4797. return;
  4798. },
  4799. }
  4800. for i in 0..16 {
  4801. if 0 != mask.u8[i] & 0x80 {
  4802. safe_write8(addr + i as i32, source.u8[i] as i32).unwrap();
  4803. }
  4804. }
  4805. }
  4806. pub unsafe fn instr_660FF7_reg(r1: i32, r2: i32) {
  4807. maskmovdqu(
  4808. r1,
  4809. r2,
  4810. return_on_pagefault!(get_seg_prefix_ds(get_reg_asize(EDI))),
  4811. )
  4812. }
  4813. #[no_mangle]
  4814. pub unsafe fn instr_0FF8(source: u64, r: i32) {
  4815. // psubb mm, mm/m64
  4816. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  4817. let source: [u8; 8] = std::mem::transmute(source);
  4818. let mut result = [0; 8];
  4819. for i in 0..8 {
  4820. result[i] = destination[i] - source[i];
  4821. }
  4822. write_mmx_reg64(r, std::mem::transmute(result));
  4823. transition_fpu_to_mmx();
  4824. }
  4825. pub unsafe fn instr_0FF8_reg(r1: i32, r2: i32) { instr_0FF8(read_mmx64s(r1), r2); }
  4826. pub unsafe fn instr_0FF8_mem(addr: i32, r: i32) {
  4827. instr_0FF8(return_on_pagefault!(safe_read64s(addr)), r);
  4828. }
  4829. #[no_mangle]
  4830. pub unsafe fn instr_660FF8(source: reg128, r: i32) {
  4831. // psubb xmm, xmm/m128
  4832. // XXX: Aligned access or #gp
  4833. let destination = read_xmm128s(r);
  4834. let mut result = reg128 { i8: [0; 16] };
  4835. for i in 0..16 {
  4836. result.u8[i] = destination.u8[i] - source.u8[i];
  4837. }
  4838. write_xmm_reg128(r, result);
  4839. }
  4840. pub unsafe fn instr_660FF8_reg(r1: i32, r2: i32) { instr_660FF8(read_xmm128s(r1), r2); }
  4841. pub unsafe fn instr_660FF8_mem(addr: i32, r: i32) {
  4842. instr_660FF8(return_on_pagefault!(safe_read128s(addr)), r);
  4843. }
  4844. #[no_mangle]
  4845. pub unsafe fn instr_0FF9(source: u64, r: i32) {
  4846. // psubw mm, mm/m64
  4847. let destination: [i16; 4] = std::mem::transmute(read_mmx64s(r));
  4848. let source: [i16; 4] = std::mem::transmute(source);
  4849. let mut result = [0; 4];
  4850. for i in 0..4 {
  4851. result[i] = destination[i] - source[i]
  4852. }
  4853. write_mmx_reg64(r, std::mem::transmute(result));
  4854. transition_fpu_to_mmx();
  4855. }
  4856. pub unsafe fn instr_0FF9_reg(r1: i32, r2: i32) { instr_0FF9(read_mmx64s(r1), r2); }
  4857. pub unsafe fn instr_0FF9_mem(addr: i32, r: i32) {
  4858. instr_0FF9(return_on_pagefault!(safe_read64s(addr)), r);
  4859. }
  4860. #[no_mangle]
  4861. pub unsafe fn instr_660FF9(source: reg128, r: i32) {
  4862. // psubw xmm, xmm/m128
  4863. // XXX: Aligned access or #gp
  4864. let destination = read_xmm128s(r);
  4865. let mut result = reg128 { i8: [0; 16] };
  4866. for i in 0..8 {
  4867. result.i16[i] = destination.i16[i] - source.i16[i]
  4868. }
  4869. write_xmm_reg128(r, result);
  4870. }
  4871. pub unsafe fn instr_660FF9_reg(r1: i32, r2: i32) { instr_660FF9(read_xmm128s(r1), r2); }
  4872. pub unsafe fn instr_660FF9_mem(addr: i32, r: i32) {
  4873. instr_660FF9(return_on_pagefault!(safe_read128s(addr)), r);
  4874. }
  4875. #[no_mangle]
  4876. pub unsafe fn instr_0FFA(source: u64, r: i32) {
  4877. // psubd mm, mm/m64
  4878. let destination: [i32; 2] = std::mem::transmute(read_mmx64s(r));
  4879. let source: [i32; 2] = std::mem::transmute(source);
  4880. let mut result = [0; 2];
  4881. for i in 0..2 {
  4882. result[i] = destination[i] - source[i]
  4883. }
  4884. write_mmx_reg64(r, std::mem::transmute(result));
  4885. transition_fpu_to_mmx();
  4886. }
  4887. pub unsafe fn instr_0FFA_reg(r1: i32, r2: i32) { instr_0FFA(read_mmx64s(r1), r2); }
  4888. pub unsafe fn instr_0FFA_mem(addr: i32, r: i32) {
  4889. instr_0FFA(return_on_pagefault!(safe_read64s(addr)), r);
  4890. }
  4891. #[no_mangle]
  4892. pub unsafe fn instr_660FFA(source: reg128, r: i32) {
  4893. // psubd xmm, xmm/m128
  4894. // XXX: Aligned access or #gp
  4895. let destination = read_xmm128s(r);
  4896. write_xmm128(
  4897. r,
  4898. destination.i32[0] - source.i32[0],
  4899. destination.i32[1] - source.i32[1],
  4900. destination.i32[2] - source.i32[2],
  4901. destination.i32[3] - source.i32[3],
  4902. );
  4903. }
  4904. pub unsafe fn instr_660FFA_reg(r1: i32, r2: i32) { instr_660FFA(read_xmm128s(r1), r2); }
  4905. pub unsafe fn instr_660FFA_mem(addr: i32, r: i32) {
  4906. instr_660FFA(return_on_pagefault!(safe_read128s(addr)), r);
  4907. }
  4908. #[no_mangle]
  4909. pub unsafe fn instr_0FFB(source: u64, r: i32) {
  4910. // psubq mm, mm/m64
  4911. write_mmx_reg64(r, read_mmx64s(r) - source);
  4912. transition_fpu_to_mmx();
  4913. }
  4914. pub unsafe fn instr_0FFB_reg(r1: i32, r2: i32) { instr_0FFB(read_mmx64s(r1), r2); }
  4915. pub unsafe fn instr_0FFB_mem(addr: i32, r: i32) {
  4916. instr_0FFB(return_on_pagefault!(safe_read64s(addr)), r);
  4917. }
  4918. #[no_mangle]
  4919. pub unsafe fn instr_660FFB(source: reg128, r: i32) {
  4920. // psubq xmm, xmm/m128
  4921. // XXX: Aligned access or #gp
  4922. let mut destination = read_xmm128s(r);
  4923. destination.u64[0] = destination.u64[0] - source.u64[0];
  4924. destination.u64[1] = destination.u64[1] - source.u64[1];
  4925. write_xmm_reg128(r, destination);
  4926. }
  4927. pub unsafe fn instr_660FFB_reg(r1: i32, r2: i32) { instr_660FFB(read_xmm128s(r1), r2); }
  4928. pub unsafe fn instr_660FFB_mem(addr: i32, r: i32) {
  4929. instr_660FFB(return_on_pagefault!(safe_read128s(addr)), r);
  4930. }
  4931. #[no_mangle]
  4932. pub unsafe fn instr_0FFC(source: u64, r: i32) {
  4933. // paddb mm, mm/m64
  4934. let destination: [u8; 8] = std::mem::transmute(read_mmx64s(r));
  4935. let source: [u8; 8] = std::mem::transmute(source);
  4936. let mut result = [0; 8];
  4937. for i in 0..8 {
  4938. result[i] = destination[i] + source[i];
  4939. }
  4940. write_mmx_reg64(r, std::mem::transmute(result));
  4941. transition_fpu_to_mmx();
  4942. }
  4943. pub unsafe fn instr_0FFC_reg(r1: i32, r2: i32) { instr_0FFC(read_mmx64s(r1), r2); }
  4944. pub unsafe fn instr_0FFC_mem(addr: i32, r: i32) {
  4945. instr_0FFC(return_on_pagefault!(safe_read64s(addr)), r);
  4946. }
  4947. #[no_mangle]
  4948. pub unsafe fn instr_660FFC(source: reg128, r: i32) {
  4949. // paddb xmm, xmm/m128
  4950. // XXX: Aligned access or #gp
  4951. let destination = read_xmm128s(r);
  4952. let mut result = reg128 { i8: [0; 16] };
  4953. for i in 0..16 {
  4954. result.u8[i] = destination.u8[i] + source.u8[i];
  4955. }
  4956. write_xmm_reg128(r, result);
  4957. }
  4958. pub unsafe fn instr_660FFC_reg(r1: i32, r2: i32) { instr_660FFC(read_xmm128s(r1), r2); }
  4959. pub unsafe fn instr_660FFC_mem(addr: i32, r: i32) {
  4960. instr_660FFC(return_on_pagefault!(safe_read128s(addr)), r);
  4961. }
  4962. #[no_mangle]
  4963. pub unsafe fn instr_0FFD(source: u64, r: i32) {
  4964. // paddw mm, mm/m64
  4965. let destination: [u16; 4] = std::mem::transmute(read_mmx64s(r));
  4966. let source: [u16; 4] = std::mem::transmute(source);
  4967. let mut result = [0; 4];
  4968. for i in 0..4 {
  4969. result[i] = destination[i] + source[i]
  4970. }
  4971. write_mmx_reg64(r, std::mem::transmute(result));
  4972. transition_fpu_to_mmx();
  4973. }
  4974. pub unsafe fn instr_0FFD_reg(r1: i32, r2: i32) { instr_0FFD(read_mmx64s(r1), r2); }
  4975. pub unsafe fn instr_0FFD_mem(addr: i32, r: i32) {
  4976. instr_0FFD(return_on_pagefault!(safe_read64s(addr)), r);
  4977. }
  4978. #[no_mangle]
  4979. pub unsafe fn instr_660FFD(source: reg128, r: i32) {
  4980. // paddw xmm, xmm/m128
  4981. // XXX: Aligned access or #gp
  4982. let destination = read_xmm128s(r);
  4983. let mut result = reg128 { i8: [0; 16] };
  4984. for i in 0..8 {
  4985. result.u16[i] = (destination.u16[i] as i32 + source.u16[i] as i32 & 0xFFFF) as u16;
  4986. }
  4987. write_xmm_reg128(r, result);
  4988. }
  4989. pub unsafe fn instr_660FFD_reg(r1: i32, r2: i32) { instr_660FFD(read_xmm128s(r1), r2); }
  4990. pub unsafe fn instr_660FFD_mem(addr: i32, r: i32) {
  4991. instr_660FFD(return_on_pagefault!(safe_read128s(addr)), r);
  4992. }
  4993. #[no_mangle]
  4994. pub unsafe fn instr_0FFE(source: u64, r: i32) {
  4995. // paddd mm, mm/m64
  4996. let destination: [i32; 2] = std::mem::transmute(read_mmx64s(r));
  4997. let source: [i32; 2] = std::mem::transmute(source);
  4998. let mut result = [0; 2];
  4999. for i in 0..2 {
  5000. result[i] = destination[i] + source[i]
  5001. }
  5002. write_mmx_reg64(r, std::mem::transmute(result));
  5003. transition_fpu_to_mmx();
  5004. }
  5005. pub unsafe fn instr_0FFE_reg(r1: i32, r2: i32) { instr_0FFE(read_mmx64s(r1), r2); }
  5006. pub unsafe fn instr_0FFE_mem(addr: i32, r: i32) {
  5007. instr_0FFE(return_on_pagefault!(safe_read64s(addr)), r);
  5008. }
  5009. #[no_mangle]
  5010. pub unsafe fn instr_660FFE(source: reg128, r: i32) {
  5011. // paddd xmm, xmm/m128
  5012. // XXX: Aligned access or #gp
  5013. let destination = read_xmm128s(r);
  5014. let dword0 = destination.i32[0] + source.i32[0];
  5015. let dword1 = destination.i32[1] + source.i32[1];
  5016. let dword2 = destination.i32[2] + source.i32[2];
  5017. let dword3 = destination.i32[3] + source.i32[3];
  5018. write_xmm128(r, dword0, dword1, dword2, dword3);
  5019. }
  5020. pub unsafe fn instr_660FFE_reg(r1: i32, r2: i32) { instr_660FFE(read_xmm128s(r1), r2); }
  5021. pub unsafe fn instr_660FFE_mem(addr: i32, r: i32) {
  5022. instr_660FFE(return_on_pagefault!(safe_read128s(addr)), r);
  5023. }
  5024. #[no_mangle]
  5025. pub unsafe fn instr_0FFF() {
  5026. // Windows 98
  5027. dbg_log!("#ud: 0F FF");
  5028. trigger_ud();
  5029. }