cpu.rs 112 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510
  1. #![allow(non_upper_case_globals)]
  2. extern "C" {
  3. #[no_mangle]
  4. fn cpu_exception_hook(interrupt: i32) -> bool;
  5. #[no_mangle]
  6. fn do_task_switch(selector: i32, has_error_code: bool, error_code: i32);
  7. //#[no_mangle]
  8. //fn logop(addr: i32, op: i32);
  9. #[no_mangle]
  10. fn microtick() -> f64;
  11. #[no_mangle]
  12. fn call_indirect1(f: i32, x: u16);
  13. #[no_mangle]
  14. fn pic_acknowledge();
  15. #[no_mangle]
  16. pub fn io_port_read8(port: i32) -> i32;
  17. #[no_mangle]
  18. pub fn io_port_read16(port: i32) -> i32;
  19. #[no_mangle]
  20. pub fn io_port_read32(port: i32) -> i32;
  21. #[no_mangle]
  22. pub fn io_port_write8(port: i32, value: i32);
  23. #[no_mangle]
  24. pub fn io_port_write16(port: i32, value: i32);
  25. #[no_mangle]
  26. pub fn io_port_write32(port: i32, value: i32);
  27. }
  28. use cpu::fpu::fpu_set_tag_word;
  29. use cpu::global_pointers::*;
  30. pub use cpu::imports::mem8;
  31. use cpu::memory;
  32. use cpu::memory::{
  33. in_mapped_range, read8, read16, read32s, read64s, read128, read_aligned32, write8,
  34. write_aligned32,
  35. };
  36. use cpu::misc_instr::{
  37. adjust_stack_reg, get_stack_pointer, getaf, getcf, getof, getpf, getsf, getzf, pop16, pop32s,
  38. push16, push32,
  39. };
  40. use cpu::modrm::{resolve_modrm16, resolve_modrm32};
  41. use page::Page;
  42. use paging::OrPageFault;
  43. use profiler;
  44. use profiler::stat::*;
  45. use state_flags::CachedStateFlags;
  46. pub use util::dbg_trace;
  47. /// The offset for our generated functions in the wasm table. Every index less than this is
  48. /// reserved for rustc's indirect functions
  49. pub const WASM_TABLE_OFFSET: u32 = 1024;
  50. #[derive(Copy, Clone)]
  51. #[repr(C)]
  52. pub union reg128 {
  53. pub i8_0: [i8; 16],
  54. pub i16_0: [i16; 8],
  55. pub i32_0: [i32; 4],
  56. pub i64_0: [i64; 2],
  57. pub u8_0: [u8; 16],
  58. pub u16_0: [u16; 8],
  59. pub u32_0: [u32; 4],
  60. pub u64_0: [u64; 2],
  61. pub f32_0: [f32; 4],
  62. pub f64_0: [f64; 2],
  63. }
  64. /// Setting this to true will make execution extremely slow
  65. pub const CHECK_MISSED_ENTRY_POINTS: bool = false;
  66. pub const INTERPRETER_ITERATION_LIMIT: u32 = 1000;
  67. pub const FLAG_SUB: i32 = -0x8000_0000;
  68. pub const FLAG_CARRY: i32 = 1;
  69. pub const FLAG_PARITY: i32 = 4;
  70. pub const FLAG_ADJUST: i32 = 16;
  71. pub const FLAG_ZERO: i32 = 64;
  72. pub const FLAG_SIGN: i32 = 128;
  73. pub const FLAG_TRAP: i32 = 256;
  74. pub const FLAG_INTERRUPT: i32 = 512;
  75. pub const FLAG_DIRECTION: i32 = 1024;
  76. pub const FLAG_OVERFLOW: i32 = 2048;
  77. pub const FLAG_IOPL: i32 = 1 << 12 | 1 << 13;
  78. pub const FLAG_NT: i32 = 1 << 14;
  79. pub const FLAG_RF: i32 = 1 << 16;
  80. pub const FLAG_VM: i32 = 1 << 17;
  81. pub const FLAG_AC: i32 = 1 << 18;
  82. pub const FLAG_VIF: i32 = 1 << 19;
  83. pub const FLAG_VIP: i32 = 1 << 20;
  84. pub const FLAG_ID: i32 = 1 << 21;
  85. pub const FLAGS_DEFAULT: i32 = 1 << 1;
  86. pub const FLAGS_MASK: i32 = FLAG_CARRY
  87. | FLAG_PARITY
  88. | FLAG_ADJUST
  89. | FLAG_ZERO
  90. | FLAG_SIGN
  91. | FLAG_TRAP
  92. | FLAG_INTERRUPT
  93. | FLAG_DIRECTION
  94. | FLAG_OVERFLOW
  95. | FLAG_IOPL
  96. | FLAG_NT
  97. | FLAG_RF
  98. | FLAG_VM
  99. | FLAG_AC
  100. | FLAG_VIF
  101. | FLAG_VIP
  102. | FLAG_ID;
  103. pub const FLAGS_ALL: i32 =
  104. FLAG_CARRY | FLAG_PARITY | FLAG_ADJUST | FLAG_ZERO | FLAG_SIGN | FLAG_OVERFLOW;
  105. pub const OPSIZE_8: i32 = 7;
  106. pub const OPSIZE_16: i32 = 15;
  107. pub const OPSIZE_32: i32 = 31;
  108. pub const EAX: i32 = 0;
  109. pub const ECX: i32 = 1;
  110. pub const EDX: i32 = 2;
  111. pub const EBX: i32 = 3;
  112. pub const ESP: i32 = 4;
  113. pub const EBP: i32 = 5;
  114. pub const ESI: i32 = 6;
  115. pub const EDI: i32 = 7;
  116. pub const AX: i32 = 0;
  117. pub const CX: i32 = 1;
  118. pub const DX: i32 = 2;
  119. pub const BX: i32 = 3;
  120. pub const SP: i32 = 4;
  121. pub const BP: i32 = 5;
  122. pub const SI: i32 = 6;
  123. pub const DI: i32 = 7;
  124. pub const AL: i32 = 0;
  125. pub const CL: i32 = 1;
  126. pub const DL: i32 = 2;
  127. pub const BL: i32 = 3;
  128. pub const AH: i32 = 4;
  129. pub const CH: i32 = 5;
  130. pub const DH: i32 = 6;
  131. pub const BH: i32 = 7;
  132. pub const ES: i32 = 0;
  133. pub const CS: i32 = 1;
  134. pub const SS: i32 = 2;
  135. pub const DS: i32 = 3;
  136. pub const FS: i32 = 4;
  137. pub const GS: i32 = 5;
  138. pub const TR: i32 = 6;
  139. pub const LDTR: i32 = 7;
  140. pub const PAGE_TABLE_PRESENT_MASK: i32 = 1 << 0;
  141. pub const PAGE_TABLE_RW_MASK: i32 = 1 << 1;
  142. pub const PAGE_TABLE_USER_MASK: i32 = 1 << 2;
  143. pub const PAGE_TABLE_ACCESSED_MASK: i32 = 1 << 5;
  144. pub const PAGE_TABLE_DIRTY_MASK: i32 = 1 << 6;
  145. pub const PAGE_TABLE_PSE_MASK: i32 = 1 << 7;
  146. pub const PAGE_TABLE_GLOBAL_MASK: i32 = 1 << 8;
  147. pub const MMAP_BLOCK_BITS: i32 = 17;
  148. pub const MMAP_BLOCK_SIZE: i32 = 1 << MMAP_BLOCK_BITS;
  149. pub const CR0_PE: i32 = 1;
  150. pub const CR0_MP: i32 = 1 << 1;
  151. pub const CR0_EM: i32 = 1 << 2;
  152. pub const CR0_TS: i32 = 1 << 3;
  153. pub const CR0_ET: i32 = 1 << 4;
  154. pub const CR0_WP: i32 = 1 << 16;
  155. pub const CR0_AM: i32 = 1 << 18;
  156. pub const CR0_NW: i32 = 1 << 29;
  157. pub const CR0_CD: i32 = 1 << 30;
  158. pub const CR0_PG: i32 = 1 << 31;
  159. pub const CR4_VME: i32 = 1;
  160. pub const CR4_PVI: i32 = 1 << 1;
  161. pub const CR4_TSD: i32 = 1 << 2;
  162. pub const CR4_PSE: i32 = 1 << 4;
  163. pub const CR4_DE: i32 = 1 << 3;
  164. pub const CR4_PAE: i32 = 1 << 5;
  165. pub const CR4_PGE: i32 = 1 << 7;
  166. pub const CR4_OSFXSR: i32 = 1 << 9;
  167. pub const CR4_OSXMMEXCPT: i32 = 1 << 10;
  168. pub const IA32_SYSENTER_CS: i32 = 372;
  169. pub const IA32_SYSENTER_ESP: i32 = 373;
  170. pub const IA32_SYSENTER_EIP: i32 = 374;
  171. pub const IA32_TIME_STAMP_COUNTER: i32 = 16;
  172. pub const IA32_PLATFORM_ID: i32 = 23;
  173. pub const IA32_APIC_BASE_MSR: i32 = 27;
  174. pub const IA32_BIOS_SIGN_ID: i32 = 139;
  175. pub const MSR_PLATFORM_INFO: i32 = 206;
  176. pub const MSR_MISC_FEATURE_ENABLES: i32 = 320;
  177. pub const IA32_MISC_ENABLE: i32 = 416;
  178. pub const IA32_RTIT_CTL: i32 = 1392;
  179. pub const MSR_SMI_COUNT: i32 = 52;
  180. pub const MSR_TEST_CTRL: i32 = 0x33;
  181. pub const MSR_IA32_FEAT_CTL: i32 = 0x3a;
  182. pub const IA32_MCG_CAP: i32 = 377;
  183. pub const IA32_KERNEL_GS_BASE: i32 = 0xC0000101u32 as i32;
  184. pub const MSR_PKG_C2_RESIDENCY: i32 = 1549;
  185. pub const IA32_APIC_BASE_BSP: i32 = 1 << 8;
  186. pub const IA32_APIC_BASE_EXTD: i32 = 1 << 10;
  187. pub const IA32_APIC_BASE_EN: i32 = 1 << 11;
  188. pub const APIC_ADDRESS: i32 = 0xFEE00000u32 as i32;
  189. pub const SEG_PREFIX_NONE: i32 = -1;
  190. pub const SEG_PREFIX_ZERO: i32 = 7;
  191. pub const PREFIX_MASK_REP: i32 = 24;
  192. pub const PREFIX_REPZ: i32 = 8;
  193. pub const PREFIX_REPNZ: i32 = 16;
  194. pub const PREFIX_MASK_SEGMENT: i32 = 7;
  195. pub const PREFIX_MASK_OPSIZE: i32 = 32;
  196. pub const PREFIX_MASK_ADDRSIZE: i32 = 64;
  197. pub const PREFIX_F2: i32 = PREFIX_REPNZ;
  198. pub const PREFIX_F3: i32 = PREFIX_REPZ;
  199. pub const PREFIX_66: i32 = PREFIX_MASK_OPSIZE;
  200. pub const LOG_CPU: i32 = 2;
  201. pub const MXCSR_MASK: i32 = 0xffff;
  202. pub const MXCSR_FZ: i32 = 1 << 15;
  203. pub const MXCSR_DAZ: i32 = 1 << 6;
  204. pub const MXCSR_RC_SHIFT: i32 = 13;
  205. pub const VALID_TLB_ENTRY_MAX: i32 = 10000;
  206. pub const TLB_VALID: i32 = 1 << 0;
  207. pub const TLB_READONLY: i32 = 1 << 1;
  208. pub const TLB_NO_USER: i32 = 1 << 2;
  209. pub const TLB_IN_MAPPED_RANGE: i32 = 1 << 3;
  210. pub const TLB_GLOBAL: i32 = 1 << 4;
  211. pub const TLB_HAS_CODE: i32 = 1 << 5;
  212. pub const IVT_SIZE: u32 = 0x400;
  213. pub const CPU_EXCEPTION_DE: i32 = 0;
  214. pub const CPU_EXCEPTION_DB: i32 = 1;
  215. pub const CPU_EXCEPTION_NMI: i32 = 2;
  216. pub const CPU_EXCEPTION_BP: i32 = 3;
  217. pub const CPU_EXCEPTION_OF: i32 = 4;
  218. pub const CPU_EXCEPTION_BR: i32 = 5;
  219. pub const CPU_EXCEPTION_UD: i32 = 6;
  220. pub const CPU_EXCEPTION_NM: i32 = 7;
  221. pub const CPU_EXCEPTION_DF: i32 = 8;
  222. pub const CPU_EXCEPTION_TS: i32 = 10;
  223. pub const CPU_EXCEPTION_NP: i32 = 11;
  224. pub const CPU_EXCEPTION_SS: i32 = 12;
  225. pub const CPU_EXCEPTION_GP: i32 = 13;
  226. pub const CPU_EXCEPTION_PF: i32 = 14;
  227. pub const CPU_EXCEPTION_MF: i32 = 16;
  228. pub const CPU_EXCEPTION_AC: i32 = 17;
  229. pub const CPU_EXCEPTION_MC: i32 = 18;
  230. pub const CPU_EXCEPTION_XM: i32 = 19;
  231. pub const CPU_EXCEPTION_VE: i32 = 20;
  232. pub const CHECK_TLB_INVARIANTS: bool = false;
  233. pub const DEBUG: bool = cfg!(debug_assertions);
  234. pub const LOOP_COUNTER: i32 = 20011;
  235. pub const TSC_RATE: f64 = 1_000_000.0;
  236. pub static mut jit_block_boundary: bool = false;
  237. pub static mut must_not_fault: bool = false;
  238. pub static mut rdtsc_imprecision_offset: u64 = 0;
  239. pub static mut rdtsc_last_value: u64 = 0;
  240. pub static mut tsc_offset: u64 = 0;
  241. pub static mut valid_tlb_entries: [i32; 10000] = [0; 10000];
  242. pub static mut valid_tlb_entries_count: i32 = 0;
  243. pub static mut apic_enabled: bool = false;
  244. pub static mut in_jit: bool = false;
  245. pub enum LastJump {
  246. Interrupt {
  247. phys_addr: u32,
  248. int: u8,
  249. software: bool,
  250. error: Option<u32>,
  251. },
  252. Compiled {
  253. phys_addr: u32,
  254. },
  255. Interpreted {
  256. phys_addr: u32,
  257. },
  258. None,
  259. }
  260. impl LastJump {
  261. pub fn phys_address(&self) -> Option<u32> {
  262. match self {
  263. LastJump::Interrupt { phys_addr, .. } => Some(*phys_addr),
  264. LastJump::Compiled { phys_addr } => Some(*phys_addr),
  265. LastJump::Interpreted { phys_addr } => Some(*phys_addr),
  266. LastJump::None => None,
  267. }
  268. }
  269. pub fn name(&self) -> &'static str {
  270. match self {
  271. LastJump::Interrupt { .. } => "interrupt",
  272. LastJump::Compiled { .. } => "compiled",
  273. LastJump::Interpreted { .. } => "interpreted",
  274. LastJump::None => "none",
  275. }
  276. }
  277. }
  278. pub static mut debug_last_jump: LastJump = LastJump::None;
  279. pub struct SegmentSelector {
  280. raw: u16,
  281. }
  282. impl SegmentSelector {
  283. pub fn of_u16(raw: u16) -> SegmentSelector { SegmentSelector { raw } }
  284. pub fn rpl(&self) -> u8 { (self.raw & 3) as u8 }
  285. pub fn is_gdt(&self) -> bool { (self.raw & 4) == 0 }
  286. pub fn descriptor_offset(&self) -> u16 { (self.raw & !7) as u16 }
  287. pub fn is_null(&self) -> bool { self.is_gdt() && self.descriptor_offset() == 0 }
  288. }
  289. // Used to indicate early that the selector cannot be used to fetch a descriptor
  290. #[derive(PartialEq)]
  291. pub enum SelectorNullOrInvalid {
  292. IsNull,
  293. IsInvalid,
  294. }
  295. pub struct SegmentDescriptor {
  296. raw: u64,
  297. }
  298. impl SegmentDescriptor {
  299. pub fn of_u64(raw: u64) -> SegmentDescriptor { SegmentDescriptor { raw } }
  300. pub fn base(&self) -> i32 {
  301. ((self.raw >> 16) & 0xffff | (self.raw & 0xff_00000000) >> 16 | (self.raw >> 56 << 24))
  302. as i32
  303. }
  304. pub fn limit(&self) -> u32 { (self.raw & 0xffff | ((self.raw >> 48) & 0xf) << 16) as u32 }
  305. pub fn access_byte(&self) -> u8 { ((self.raw >> 40) & 0xff) as u8 }
  306. pub fn flags(&self) -> u8 { ((self.raw >> 48 >> 4) & 0xf) as u8 }
  307. pub fn is_system(&self) -> bool { self.access_byte() & 0x10 == 0 }
  308. pub fn system_type(&self) -> u8 { self.access_byte() & 0xF }
  309. pub fn is_rw(&self) -> bool { self.access_byte() & 2 == 2 }
  310. pub fn is_dc(&self) -> bool { self.access_byte() & 4 == 4 }
  311. pub fn is_executable(&self) -> bool { self.access_byte() & 8 == 8 }
  312. pub fn is_present(&self) -> bool { self.access_byte() & 0x80 == 0x80 }
  313. pub fn is_writable(&self) -> bool { self.is_rw() && !self.is_executable() }
  314. pub fn is_readable(&self) -> bool { self.is_rw() || !self.is_executable() }
  315. pub fn is_conforming_executable(&self) -> bool { self.is_dc() && self.is_executable() }
  316. pub fn dpl(&self) -> u8 { (self.access_byte() >> 5) & 3 }
  317. pub fn is_32(&self) -> bool { self.flags() & 4 == 4 }
  318. pub fn effective_limit(&self) -> u32 {
  319. if self.flags() & 8 == 8 { self.limit() << 12 | 0xFFF } else { self.limit() }
  320. }
  321. }
  322. pub struct InterruptDescriptor {
  323. raw: u64,
  324. }
  325. impl InterruptDescriptor {
  326. pub fn of_u64(raw: u64) -> InterruptDescriptor { InterruptDescriptor { raw } }
  327. pub fn offset(&self) -> i32 { (self.raw & 0xffff | self.raw >> 32 & 0xffff0000) as i32 }
  328. pub fn selector(&self) -> u16 { (self.raw >> 16 & 0xffff) as u16 }
  329. pub fn access_byte(&self) -> u8 { (self.raw >> 40 & 0xff) as u8 }
  330. pub fn dpl(&self) -> u8 { (self.access_byte() >> 5 & 3) as u8 }
  331. pub fn gate_type(&self) -> u8 { self.access_byte() & 7 }
  332. pub fn is_32(&self) -> bool { self.access_byte() & 8 == 8 }
  333. pub fn is_present(&self) -> bool { self.access_byte() & 0x80 == 0x80 }
  334. pub fn reserved_zeros_are_valid(&self) -> bool { self.access_byte() & 16 == 0 }
  335. const TASK_GATE: u8 = 0b101;
  336. const INTERRUPT_GATE: u8 = 0b110;
  337. const TRAP_GATE: u8 = 0b111;
  338. }
  339. #[no_mangle]
  340. pub unsafe fn switch_cs_real_mode(selector: i32) {
  341. dbg_assert!(!*protected_mode || vm86_mode());
  342. *sreg.offset(CS as isize) = selector as u16;
  343. *segment_is_null.offset(CS as isize) = false;
  344. *segment_offsets.offset(CS as isize) = selector << 4;
  345. update_cs_size(false);
  346. }
  347. pub unsafe fn get_tss_stack_addr(dpl: u8) -> OrPageFault<u32> {
  348. let (tss_stack_offset, page_boundary) = if *tss_size_32 {
  349. (((dpl << 3) + 4) as u32, 0x1000 - 6)
  350. }
  351. else {
  352. (((dpl << 2) + 2) as u32, 0x1000 - 4)
  353. };
  354. if tss_stack_offset + 5 > *segment_limits.offset(TR as isize) {
  355. panic!("#TS handler");
  356. }
  357. let tss_stack_addr = *segment_offsets.offset(TR as isize) as u32 + tss_stack_offset;
  358. dbg_assert!(tss_stack_addr & 0xFFF <= page_boundary);
  359. Ok(translate_address_system_read(tss_stack_addr as i32)?)
  360. }
  361. pub unsafe fn iret16() { iret(true); }
  362. pub unsafe fn iret32() { iret(false); }
  363. pub unsafe fn iret(is_16: bool) {
  364. if vm86_mode() && getiopl() < 3 {
  365. // vm86 mode, iopl != 3
  366. dbg_log!("#gp iret vm86 mode, iopl != 3");
  367. trigger_gp(0);
  368. return;
  369. }
  370. let (new_eip, new_cs, mut new_flags) = if is_16 {
  371. (
  372. return_on_pagefault!(safe_read16(get_stack_pointer(0))),
  373. return_on_pagefault!(safe_read16(get_stack_pointer(2))),
  374. return_on_pagefault!(safe_read16(get_stack_pointer(4))),
  375. )
  376. }
  377. else {
  378. (
  379. return_on_pagefault!(safe_read32s(get_stack_pointer(0))),
  380. return_on_pagefault!(safe_read16(get_stack_pointer(4))),
  381. return_on_pagefault!(safe_read32s(get_stack_pointer(8))),
  382. )
  383. };
  384. if !*protected_mode || (vm86_mode() && getiopl() == 3) {
  385. if new_eip as u32 & 0xFFFF0000 != 0 {
  386. panic!("#GP handler");
  387. }
  388. switch_cs_real_mode(new_cs);
  389. *instruction_pointer = get_seg_cs() + new_eip;
  390. if is_16 {
  391. update_eflags(new_flags | *flags & !0xFFFF);
  392. adjust_stack_reg(3 * 2);
  393. }
  394. else {
  395. if !*protected_mode {
  396. update_eflags((new_flags & 0x257FD5) | (*flags & 0x1A0000));
  397. }
  398. else {
  399. update_eflags(new_flags);
  400. }
  401. adjust_stack_reg(3 * 4);
  402. }
  403. handle_irqs();
  404. return;
  405. }
  406. dbg_assert!(!vm86_mode());
  407. if *flags & FLAG_NT != 0 {
  408. if DEBUG {
  409. panic!("NT");
  410. }
  411. trigger_gp(0);
  412. return;
  413. }
  414. if new_flags & FLAG_VM != 0 {
  415. if *cpl == 0 {
  416. // return to virtual 8086 mode
  417. // vm86 cannot be set in 16 bit flag
  418. dbg_assert!(!is_16);
  419. dbg_assert!((new_eip & !0xFFFF) == 0);
  420. let temp_esp = return_on_pagefault!(safe_read32s(get_stack_pointer(12)));
  421. let temp_ss = return_on_pagefault!(safe_read16(get_stack_pointer(16)));
  422. let new_es = return_on_pagefault!(safe_read16(get_stack_pointer(20)));
  423. let new_ds = return_on_pagefault!(safe_read16(get_stack_pointer(24)));
  424. let new_fs = return_on_pagefault!(safe_read16(get_stack_pointer(28)));
  425. let new_gs = return_on_pagefault!(safe_read16(get_stack_pointer(32)));
  426. // no exceptions below
  427. update_eflags(new_flags);
  428. *flags |= FLAG_VM;
  429. switch_cs_real_mode(new_cs);
  430. *instruction_pointer = get_seg_cs() + (new_eip & 0xFFFF);
  431. if !switch_seg(ES, new_es)
  432. || !switch_seg(DS, new_ds)
  433. || !switch_seg(FS, new_fs)
  434. || !switch_seg(GS, new_gs)
  435. {
  436. // XXX: Should be checked before side effects
  437. dbg_assert!(false);
  438. }
  439. adjust_stack_reg(9 * 4); // 9 dwords: eip, cs, flags, esp, ss, es, ds, fs, gs
  440. write_reg32(ESP, temp_esp);
  441. if !switch_seg(SS, temp_ss) {
  442. // XXX
  443. dbg_assert!(false);
  444. }
  445. *cpl = 3;
  446. cpl_changed();
  447. update_cs_size(false);
  448. // iret end
  449. return;
  450. }
  451. else {
  452. dbg_log!("vm86 flag ignored because cpl != 0");
  453. new_flags &= !FLAG_VM;
  454. }
  455. }
  456. // protected mode return
  457. let (cs_descriptor, cs_selector) = match return_on_pagefault!(lookup_segment_selector(new_cs)) {
  458. Ok((desc, sel)) => (desc, sel),
  459. Err(selector_unusable) => match selector_unusable {
  460. SelectorNullOrInvalid::IsNull => {
  461. panic!("Unimplemented: CS selector is null");
  462. },
  463. SelectorNullOrInvalid::IsInvalid => {
  464. panic!("Unimplemented: CS selector is invalid");
  465. },
  466. },
  467. };
  468. dbg_assert!(new_eip as u32 <= cs_descriptor.effective_limit());
  469. if !cs_descriptor.is_present() {
  470. panic!("not present");
  471. }
  472. if !cs_descriptor.is_executable() {
  473. panic!("not exec");
  474. }
  475. if cs_selector.rpl() < *cpl {
  476. panic!("rpl < cpl");
  477. }
  478. if cs_descriptor.is_dc() && cs_descriptor.dpl() > cs_selector.rpl() {
  479. panic!("conforming and dpl > rpl");
  480. }
  481. if !cs_descriptor.is_dc() && cs_selector.rpl() != cs_descriptor.dpl() {
  482. dbg_log!(
  483. "#gp iret: non-conforming cs and rpl != dpl, dpl={} rpl={}",
  484. cs_descriptor.dpl(),
  485. cs_selector.rpl()
  486. );
  487. trigger_gp(new_cs & !3);
  488. return;
  489. }
  490. if cs_selector.rpl() > *cpl {
  491. // outer privilege return
  492. let (temp_esp, temp_ss) = if is_16 {
  493. (
  494. return_on_pagefault!(safe_read16(get_stack_pointer(6))),
  495. return_on_pagefault!(safe_read16(get_stack_pointer(8))),
  496. )
  497. }
  498. else {
  499. (
  500. return_on_pagefault!(safe_read32s(get_stack_pointer(12))),
  501. return_on_pagefault!(safe_read16(get_stack_pointer(16))),
  502. )
  503. };
  504. let (ss_descriptor, ss_selector) =
  505. match return_on_pagefault!(lookup_segment_selector(temp_ss)) {
  506. Ok((desc, sel)) => (desc, sel),
  507. Err(selector_unusable) => match selector_unusable {
  508. SelectorNullOrInvalid::IsNull => {
  509. dbg_log!("#GP for loading 0 in SS sel={:x}", temp_ss);
  510. dbg_trace();
  511. trigger_gp(0);
  512. return;
  513. },
  514. SelectorNullOrInvalid::IsInvalid => {
  515. dbg_log!("#GP for loading invalid in SS sel={:x}", temp_ss);
  516. trigger_gp(temp_ss & !3);
  517. return;
  518. },
  519. },
  520. };
  521. let new_cpl = cs_selector.rpl();
  522. if ss_descriptor.is_system()
  523. || ss_selector.rpl() != new_cpl
  524. || !ss_descriptor.is_writable()
  525. || ss_descriptor.dpl() != new_cpl
  526. {
  527. dbg_log!("#GP for loading invalid in SS sel={:x}", temp_ss);
  528. dbg_trace();
  529. trigger_gp(temp_ss & !3);
  530. return;
  531. }
  532. if !ss_descriptor.is_present() {
  533. dbg_log!("#SS for loading non-present in SS sel={:x}", temp_ss);
  534. dbg_trace();
  535. trigger_ss(temp_ss & !3);
  536. return;
  537. }
  538. // no exceptions below
  539. if is_16 {
  540. update_eflags(new_flags | *flags & !0xFFFF);
  541. }
  542. else {
  543. update_eflags(new_flags);
  544. }
  545. *cpl = cs_selector.rpl();
  546. cpl_changed();
  547. if !switch_seg(SS, temp_ss) {
  548. // XXX
  549. dbg_assert!(false);
  550. }
  551. set_stack_reg(temp_esp);
  552. if *cpl == 0 && !is_16 {
  553. *flags = *flags & !FLAG_VIF & !FLAG_VIP | (new_flags & (FLAG_VIF | FLAG_VIP));
  554. }
  555. // XXX: Set segment to 0 if it's not usable in the new cpl
  556. // XXX: Use cached segment information
  557. // ...
  558. }
  559. else if cs_selector.rpl() == *cpl {
  560. // same privilege return
  561. // no exceptions below
  562. if is_16 {
  563. adjust_stack_reg(3 * 2);
  564. update_eflags(new_flags | *flags & !0xFFFF);
  565. }
  566. else {
  567. adjust_stack_reg(3 * 4);
  568. update_eflags(new_flags);
  569. }
  570. // update vip and vif, which are not changed by update_eflags
  571. if *cpl == 0 && !is_16 {
  572. *flags = *flags & !FLAG_VIF & !FLAG_VIP | (new_flags & (FLAG_VIF | FLAG_VIP));
  573. }
  574. }
  575. else {
  576. dbg_assert!(false);
  577. }
  578. *sreg.offset(CS as isize) = new_cs as u16;
  579. dbg_assert!((new_cs & 3) == *cpl as i32);
  580. update_cs_size(cs_descriptor.is_32());
  581. *segment_limits.offset(CS as isize) = cs_descriptor.effective_limit();
  582. *segment_offsets.offset(CS as isize) = cs_descriptor.base();
  583. *instruction_pointer = new_eip + get_seg_cs();
  584. // iret end
  585. handle_irqs();
  586. }
  587. pub unsafe fn call_interrupt_vector(
  588. interrupt_nr: i32,
  589. is_software_int: bool,
  590. error_code: Option<i32>,
  591. ) {
  592. // we have to leave hlt_loop at some point, this is a
  593. // good place to do it
  594. *in_hlt = false;
  595. if *protected_mode {
  596. if vm86_mode() && *cr.offset(4) & CR4_VME != 0 {
  597. panic!("Unimplemented: VME");
  598. }
  599. if vm86_mode() && is_software_int && getiopl() < 3 {
  600. dbg_log!("call_interrupt_vector #GP. vm86 && software int && iopl < 3");
  601. dbg_trace();
  602. trigger_gp(0);
  603. return;
  604. }
  605. if interrupt_nr << 3 | 7 > *idtr_size {
  606. dbg_log!("interrupt_nr={:x} idtr_size={:x}", interrupt_nr, *idtr_size);
  607. dbg_trace();
  608. panic!("Unimplemented: #GP handler");
  609. }
  610. let descriptor_address = return_on_pagefault!(translate_address_system_read(
  611. *idtr_offset + (interrupt_nr << 3)
  612. ));
  613. let descriptor = InterruptDescriptor::of_u64(read64s(descriptor_address) as u64);
  614. let mut offset = descriptor.offset();
  615. let selector = descriptor.selector() as i32;
  616. let dpl = descriptor.dpl();
  617. let gate_type = descriptor.gate_type();
  618. if !descriptor.is_present() {
  619. // present bit not set
  620. panic!("Unimplemented: #NP handler");
  621. }
  622. if is_software_int && dpl < *cpl {
  623. dbg_log!("#gp software interrupt ({:x}) and dpl < cpl", interrupt_nr);
  624. dbg_trace();
  625. trigger_gp(interrupt_nr << 3 | 2);
  626. return;
  627. }
  628. if gate_type == InterruptDescriptor::TASK_GATE {
  629. // task gate
  630. dbg_log!(
  631. "interrupt to task gate: int={:x} sel={:x} dpl={}",
  632. interrupt_nr,
  633. selector,
  634. dpl
  635. );
  636. dbg_trace();
  637. do_task_switch(selector, error_code.is_some(), error_code.unwrap_or(0));
  638. return;
  639. }
  640. let is_valid_type = gate_type == InterruptDescriptor::TRAP_GATE
  641. || gate_type == InterruptDescriptor::INTERRUPT_GATE;
  642. if !is_valid_type || !descriptor.reserved_zeros_are_valid() {
  643. // invalid gate_type
  644. dbg_log!(
  645. "gate type invalid or reserved 0s violated. gate_type=0b{:b} raw={:b}",
  646. gate_type,
  647. descriptor.raw
  648. );
  649. dbg_log!(
  650. "addr={:x} offset={:x} selector={:x}",
  651. descriptor_address,
  652. offset,
  653. selector
  654. );
  655. dbg_trace();
  656. panic!("Unimplemented: #GP handler");
  657. }
  658. let cs_segment_descriptor = match return_on_pagefault!(lookup_segment_selector(selector)) {
  659. Ok((desc, _)) => desc,
  660. Err(selector_unusable) => match selector_unusable {
  661. SelectorNullOrInvalid::IsNull => {
  662. dbg_log!("is null");
  663. panic!("Unimplemented: #GP handler");
  664. },
  665. SelectorNullOrInvalid::IsInvalid => {
  666. dbg_log!("is invalid");
  667. panic!("Unimplemented: #GP handler (error code)");
  668. },
  669. },
  670. };
  671. dbg_assert!(offset as u32 <= cs_segment_descriptor.effective_limit());
  672. if !cs_segment_descriptor.is_executable() || cs_segment_descriptor.dpl() > *cpl {
  673. dbg_log!("not exec");
  674. panic!("Unimplemented: #GP handler");
  675. }
  676. if !cs_segment_descriptor.is_present() {
  677. // kvm-unit-test
  678. dbg_log!("not present");
  679. trigger_np(interrupt_nr << 3 | 2);
  680. return;
  681. }
  682. let old_flags = get_eflags();
  683. if !cs_segment_descriptor.is_dc() && cs_segment_descriptor.dpl() < *cpl {
  684. // inter privilege level interrupt
  685. // interrupt from vm86 mode
  686. if old_flags & FLAG_VM != 0 && cs_segment_descriptor.dpl() != 0 {
  687. panic!("Unimplemented: #GP handler for non-0 cs segment dpl when in vm86 mode");
  688. }
  689. let tss_stack_addr =
  690. return_on_pagefault!(get_tss_stack_addr(cs_segment_descriptor.dpl()));
  691. let new_esp = read32s(tss_stack_addr);
  692. let new_ss = read16(tss_stack_addr + if *tss_size_32 { 4 } else { 2 });
  693. let (ss_segment_descriptor, ss_segment_selector) =
  694. match return_on_pagefault!(lookup_segment_selector(new_ss)) {
  695. Ok((desc, sel)) => (desc, sel),
  696. Err(_) => {
  697. panic!("Unimplemented: #TS handler");
  698. },
  699. };
  700. // Disabled: Incorrect handling of direction bit
  701. // See http://css.csail.mit.edu/6.858/2014/readings/i386/s06_03.htm
  702. //if !((new_esp >>> 0) <= ss_segment_descriptor.effective_limit())
  703. // debugger;
  704. //dbg_assert!((new_esp >>> 0) <= ss_segment_descriptor.effective_limit());
  705. dbg_assert!(!ss_segment_descriptor.is_system() && ss_segment_descriptor.is_writable());
  706. if ss_segment_selector.rpl() != cs_segment_descriptor.dpl() {
  707. panic!("Unimplemented: #TS handler");
  708. }
  709. if ss_segment_descriptor.dpl() != cs_segment_descriptor.dpl()
  710. || !ss_segment_descriptor.is_rw()
  711. {
  712. panic!("Unimplemented: #TS handler");
  713. }
  714. if !ss_segment_descriptor.is_present() {
  715. panic!("Unimplemented: #TS handler");
  716. }
  717. let old_esp = read_reg32(ESP);
  718. let old_ss = *sreg.offset(SS as isize) as i32;
  719. let error_code_space = if error_code.is_some() { 1 } else { 0 };
  720. let vm86_space = if (old_flags & FLAG_VM) == FLAG_VM { 4 } else { 0 };
  721. let bytes_per_arg = if descriptor.is_32() { 4 } else { 2 };
  722. let stack_space = bytes_per_arg * (5 + error_code_space + vm86_space);
  723. let new_stack_pointer = ss_segment_descriptor.base()
  724. + if ss_segment_descriptor.is_32() {
  725. new_esp - stack_space
  726. }
  727. else {
  728. new_esp - stack_space & 0xFFFF
  729. };
  730. return_on_pagefault!(translate_address_system_write(new_stack_pointer));
  731. return_on_pagefault!(translate_address_system_write(
  732. ss_segment_descriptor.base() + new_esp - 1
  733. ));
  734. // no exceptions below
  735. *cpl = cs_segment_descriptor.dpl();
  736. cpl_changed();
  737. update_cs_size(cs_segment_descriptor.is_32());
  738. *flags &= !FLAG_VM & !FLAG_RF;
  739. if !switch_seg(SS, new_ss) {
  740. // XXX
  741. dbg_assert!(false);
  742. }
  743. set_stack_reg(new_esp);
  744. // XXX: #SS if stack would cross stack limit
  745. if old_flags & FLAG_VM != 0 {
  746. if !descriptor.is_32() {
  747. dbg_assert!(false);
  748. }
  749. else {
  750. push32(*sreg.offset(GS as isize) as i32).unwrap();
  751. push32(*sreg.offset(FS as isize) as i32).unwrap();
  752. push32(*sreg.offset(DS as isize) as i32).unwrap();
  753. push32(*sreg.offset(ES as isize) as i32).unwrap();
  754. }
  755. }
  756. if descriptor.is_32() {
  757. push32(old_ss).unwrap();
  758. push32(old_esp).unwrap();
  759. }
  760. else {
  761. push16(old_ss).unwrap();
  762. push16(old_esp).unwrap();
  763. }
  764. }
  765. else if cs_segment_descriptor.is_dc() || cs_segment_descriptor.dpl() == *cpl {
  766. // intra privilege level interrupt
  767. //dbg_log!("Intra privilege interrupt gate=" + h(selector, 4) + ":" + h(offset >>> 0, 8) +
  768. // " gate_type=" + gate_type + " 16bit=" + descriptor.is_32() +
  769. // " cpl=" + *cpl + " dpl=" + segment_descriptor.dpl() + " conforming=" + +segment_descriptor.is_dc(), );
  770. //debug.dump_regs_short();
  771. if *flags & FLAG_VM != 0 {
  772. dbg_assert!(false, "check error code");
  773. trigger_gp(selector & !3);
  774. return;
  775. }
  776. let bytes_per_arg = if descriptor.is_32() { 4 } else { 2 };
  777. let error_code_space = if error_code.is_some() { 1 } else { 0 };
  778. let stack_space = bytes_per_arg * (3 + error_code_space);
  779. // XXX: with current cpl or with cpl 0?
  780. return_on_pagefault!(writable_or_pagefault(
  781. get_stack_pointer(-stack_space),
  782. stack_space
  783. ));
  784. // no exceptions below
  785. }
  786. else {
  787. panic!("Unimplemented: #GP handler");
  788. }
  789. // XXX: #SS if stack would cross stack limit
  790. if descriptor.is_32() {
  791. push32(old_flags).unwrap();
  792. push32(*sreg.offset(CS as isize) as i32).unwrap();
  793. push32(get_real_eip()).unwrap();
  794. if let Some(ec) = error_code {
  795. push32(ec).unwrap();
  796. }
  797. }
  798. else {
  799. push16(old_flags).unwrap();
  800. push16(*sreg.offset(CS as isize) as i32).unwrap();
  801. push16(get_real_eip()).unwrap();
  802. if let Some(ec) = error_code {
  803. push16(ec).unwrap();
  804. }
  805. offset &= 0xFFFF;
  806. }
  807. if old_flags & FLAG_VM != 0 {
  808. if !switch_seg(GS, 0) || !switch_seg(FS, 0) || !switch_seg(DS, 0) || !switch_seg(ES, 0)
  809. {
  810. // can't fail
  811. dbg_assert!(false);
  812. }
  813. }
  814. *sreg.offset(CS as isize) = (selector as u16) & !3 | *cpl as u16;
  815. dbg_assert!((*sreg.offset(CS as isize) & 3) == *cpl as u16);
  816. update_cs_size(cs_segment_descriptor.is_32());
  817. *segment_limits.offset(CS as isize) = cs_segment_descriptor.effective_limit();
  818. *segment_offsets.offset(CS as isize) = cs_segment_descriptor.base();
  819. *instruction_pointer = get_seg_cs() + offset;
  820. *flags &= !FLAG_NT & !FLAG_VM & !FLAG_RF & !FLAG_TRAP;
  821. if gate_type == InterruptDescriptor::INTERRUPT_GATE {
  822. // clear int flag for interrupt gates
  823. *flags &= !FLAG_INTERRUPT;
  824. }
  825. else {
  826. if *flags & FLAG_INTERRUPT != 0 && old_flags & FLAG_INTERRUPT == 0 {
  827. handle_irqs();
  828. }
  829. }
  830. }
  831. else {
  832. // call 4 byte cs:ip interrupt vector from ivt at cpu.memory 0
  833. let index = (interrupt_nr << 2) as u32;
  834. let new_ip = read16(index);
  835. let new_cs = read16(index + 2);
  836. dbg_assert!(
  837. index | 3 <= IVT_SIZE,
  838. "Unimplemented: #GP for interrupt number out of IVT bounds"
  839. );
  840. // XXX: #SS if stack would cross stack limit
  841. // push flags, cs:ip
  842. push16(get_eflags()).unwrap();
  843. push16(*sreg.offset(CS as isize) as i32).unwrap();
  844. push16(get_real_eip()).unwrap();
  845. *flags &= !FLAG_INTERRUPT & !FLAG_AC & !FLAG_TRAP;
  846. switch_cs_real_mode(new_cs);
  847. *instruction_pointer = get_seg_cs() + new_ip;
  848. }
  849. }
  850. pub unsafe fn far_jump(eip: i32, selector: i32, is_call: bool, is_osize_32: bool) {
  851. dbg_assert!(selector < 0x10000 && selector >= 0);
  852. //dbg_log("far " + ["jump", "call"][+is_call] + " eip=" + h(eip >>> 0, 8) + " cs=" + h(selector, 4), LOG_CPU);
  853. //CPU_LOG_VERBOSE && this.debug.dump_state("far " + ["jump", "call"][+is_call]);
  854. if !*protected_mode || vm86_mode() {
  855. if is_call {
  856. if is_osize_32 {
  857. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-8), 8));
  858. push32(*sreg.offset(CS as isize) as i32).unwrap();
  859. push32(get_real_eip()).unwrap();
  860. }
  861. else {
  862. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-4), 4));
  863. push16(*sreg.offset(CS as isize) as i32).unwrap();
  864. push16(get_real_eip()).unwrap();
  865. }
  866. }
  867. switch_cs_real_mode(selector);
  868. *instruction_pointer = get_seg_cs() + eip;
  869. return;
  870. }
  871. let (info, cs_selector) = match return_on_pagefault!(lookup_segment_selector(selector)) {
  872. Ok((desc, sel)) => (desc, sel),
  873. Err(selector_unusable) => match selector_unusable {
  874. SelectorNullOrInvalid::IsNull => {
  875. dbg_log!("#gp null cs");
  876. trigger_gp(0);
  877. return;
  878. },
  879. SelectorNullOrInvalid::IsInvalid => {
  880. dbg_log!("#gp invalid cs: {:x}", selector);
  881. trigger_gp(selector & !3);
  882. return;
  883. },
  884. },
  885. };
  886. if info.is_system() {
  887. dbg_assert!(is_call, "TODO: Jump");
  888. dbg_log!("system type cs: {:x}", selector);
  889. if info.system_type() == 0xC || info.system_type() == 4 {
  890. // call gate
  891. let is_16 = info.system_type() == 4;
  892. if info.dpl() < *cpl || info.dpl() < cs_selector.rpl() {
  893. dbg_log!("#gp cs gate dpl < cpl or dpl < rpl: {:x}", selector);
  894. trigger_gp(selector & !3);
  895. return;
  896. }
  897. if !info.is_present() {
  898. dbg_log!("#NP for loading not-present in gate cs sel={:x}", selector);
  899. trigger_np(selector & !3);
  900. return;
  901. }
  902. let cs_selector = (info.raw >> 16) as i32;
  903. let (cs_info, _) = match return_on_pagefault!(lookup_segment_selector(cs_selector)) {
  904. Ok((desc, sel)) => (desc, sel),
  905. Err(selector_unusable) => match selector_unusable {
  906. SelectorNullOrInvalid::IsNull => {
  907. dbg_log!("#gp null cs");
  908. trigger_gp(0);
  909. return;
  910. },
  911. SelectorNullOrInvalid::IsInvalid => {
  912. dbg_log!("#gp invalid cs: {:x}", selector);
  913. trigger_gp(selector & !3);
  914. return;
  915. },
  916. },
  917. };
  918. if !cs_info.is_executable() {
  919. dbg_log!("#gp non-executable cs: {:x}", cs_selector);
  920. trigger_gp(cs_selector & !3);
  921. return;
  922. }
  923. if cs_info.dpl() > *cpl {
  924. dbg_log!("#gp dpl > cpl: {:x}", cs_selector);
  925. trigger_gp(cs_selector & !3);
  926. return;
  927. }
  928. if !cs_info.is_present() {
  929. dbg_log!("#NP for loading not-present in cs sel={:x}", cs_selector);
  930. trigger_np(cs_selector & !3);
  931. return;
  932. }
  933. if !cs_info.is_dc() && cs_info.dpl() < *cpl {
  934. dbg_log!(
  935. "more privilege call gate is_16={} from={} to={}",
  936. is_16,
  937. *cpl,
  938. cs_info.dpl()
  939. );
  940. let tss_stack_addr = return_on_pagefault!(get_tss_stack_addr(cs_info.dpl()));
  941. let new_esp;
  942. let new_ss;
  943. if *tss_size_32 {
  944. new_esp = read32s(tss_stack_addr);
  945. new_ss = read16(tss_stack_addr + 4);
  946. }
  947. else {
  948. new_esp = read16(tss_stack_addr);
  949. new_ss = read16(tss_stack_addr + 2);
  950. }
  951. let (ss_info, ss_selector) =
  952. match return_on_pagefault!(lookup_segment_selector(new_ss)) {
  953. Ok((desc, sel)) => (desc, sel),
  954. Err(selector_unusable) => match selector_unusable {
  955. SelectorNullOrInvalid::IsNull => {
  956. panic!("null ss: {}", new_ss);
  957. },
  958. SelectorNullOrInvalid::IsInvalid => {
  959. panic!("invalid ss: {}", new_ss);
  960. },
  961. },
  962. };
  963. // Disabled: Incorrect handling of direction bit
  964. // See http://css.csail.mit.edu/6.858/2014/readings/i386/s06_03.htm
  965. //if(!((new_esp >>> 0) <= ss_info.effective_limit))
  966. // debugger;
  967. //dbg_assert!((new_esp >>> 0) <= ss_info.effective_limit);
  968. dbg_assert!(!ss_info.is_system() && ss_info.is_writable());
  969. if ss_selector.rpl() != cs_info.dpl()
  970. // xxx: 0 in v86 mode
  971. {
  972. panic!("#TS handler");
  973. }
  974. if ss_info.dpl() != cs_info.dpl() || !ss_info.is_writable() {
  975. panic!("#TS handler");
  976. }
  977. if !ss_info.is_present() {
  978. panic!("#SS handler");
  979. }
  980. let parameter_count = (info.raw >> 32 & 0x1F) as i32;
  981. let mut stack_space = if is_16 { 4 } else { 8 };
  982. if is_call {
  983. stack_space +=
  984. if is_16 { 4 + 2 * parameter_count } else { 8 + 4 * parameter_count };
  985. }
  986. if ss_info.is_32() {
  987. return_on_pagefault!(writable_or_pagefault(
  988. ss_info.base() + new_esp - stack_space,
  989. stack_space
  990. )); // , cs_info.dpl
  991. }
  992. else {
  993. return_on_pagefault!(writable_or_pagefault(
  994. ss_info.base() + (new_esp - stack_space & 0xFFFF),
  995. stack_space
  996. )); // , cs_info.dpl
  997. }
  998. let old_esp = read_reg32(ESP);
  999. let old_ss = *sreg.offset(SS as isize);
  1000. let old_stack_pointer = get_stack_pointer(0);
  1001. //dbg_log!("old_esp=" + h(old_esp));
  1002. *cpl = cs_info.dpl();
  1003. cpl_changed();
  1004. update_cs_size(cs_info.is_32());
  1005. // XXX: Should be checked before side effects
  1006. if !switch_seg(SS, new_ss) {
  1007. dbg_assert!(false)
  1008. };
  1009. set_stack_reg(new_esp);
  1010. //dbg_log!("parameter_count=" + parameter_count);
  1011. //dbg_assert!(parameter_count == 0, "TODO");
  1012. if is_16 {
  1013. push16(old_ss as i32).unwrap();
  1014. push16(old_esp).unwrap();
  1015. }
  1016. else {
  1017. push32(old_ss as i32).unwrap();
  1018. push32(old_esp).unwrap();
  1019. }
  1020. if is_call {
  1021. if is_16 {
  1022. for i in (0..parameter_count).rev() {
  1023. //for(let i = parameter_count - 1; i >= 0; i--)
  1024. let parameter = safe_read16(old_stack_pointer + 2 * i).unwrap();
  1025. push16(parameter).unwrap();
  1026. }
  1027. //writable_or_pagefault(get_stack_pointer(-4), 4);
  1028. push16(*sreg.offset(CS as isize) as i32).unwrap();
  1029. push16(get_real_eip()).unwrap();
  1030. }
  1031. else {
  1032. for i in (0..parameter_count).rev() {
  1033. //for(let i = parameter_count - 1; i >= 0; i--)
  1034. let parameter = safe_read32s(old_stack_pointer + 4 * i).unwrap();
  1035. push32(parameter).unwrap();
  1036. }
  1037. //writable_or_pagefault(get_stack_pointer(-8), 8);
  1038. push32(*sreg.offset(CS as isize) as i32).unwrap();
  1039. push32(get_real_eip()).unwrap();
  1040. }
  1041. }
  1042. }
  1043. else {
  1044. dbg_log!(
  1045. "same privilege call gate is_16={} from={} to={} conforming={}",
  1046. is_16,
  1047. *cpl,
  1048. cs_info.dpl(),
  1049. cs_info.is_dc()
  1050. );
  1051. // ok
  1052. if is_call {
  1053. if is_16 {
  1054. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-4), 4));
  1055. push16(*sreg.offset(CS as isize) as i32).unwrap();
  1056. push16(get_real_eip()).unwrap();
  1057. }
  1058. else {
  1059. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-8), 8));
  1060. push32(*sreg.offset(CS as isize) as i32).unwrap();
  1061. push32(get_real_eip()).unwrap();
  1062. }
  1063. }
  1064. }
  1065. // Note: eip from call is ignored
  1066. let mut new_eip = (info.raw & 0xFFFF) as i32;
  1067. if !is_16 {
  1068. new_eip |= ((info.raw >> 32) & 0xFFFF0000) as i32;
  1069. }
  1070. dbg_log!(
  1071. "call gate eip={:x} cs={:x} conforming={}",
  1072. new_eip as u32,
  1073. cs_selector,
  1074. cs_info.is_dc()
  1075. );
  1076. dbg_assert!((new_eip as u32) <= cs_info.effective_limit(), "todo: #gp");
  1077. update_cs_size(cs_info.is_32());
  1078. *segment_is_null.offset(CS as isize) = false;
  1079. *segment_limits.offset(CS as isize) = cs_info.effective_limit();
  1080. *segment_offsets.offset(CS as isize) = cs_info.base();
  1081. *sreg.offset(CS as isize) = cs_selector as u16 & !3 | *cpl as u16;
  1082. dbg_assert!(*sreg.offset(CS as isize) & 3 == *cpl as u16);
  1083. *instruction_pointer = get_seg_cs() + new_eip;
  1084. }
  1085. else {
  1086. dbg_assert!(false);
  1087. //let types = { 9: "Available 386 TSS", 0xb: "Busy 386 TSS", 4: "286 Call Gate", 0xc: "386 Call Gate" };
  1088. //throw debug.unimpl("load system segment descriptor, type = " + (info.access & 15) + " (" + types[info.access & 15] + ")");
  1089. }
  1090. }
  1091. else {
  1092. if !info.is_executable() {
  1093. dbg_log!("#gp non-executable cs: {:x}", selector);
  1094. trigger_gp(selector & !3);
  1095. return;
  1096. }
  1097. if info.is_dc() {
  1098. // conforming code segment
  1099. if info.dpl() > *cpl {
  1100. dbg_log!("#gp cs dpl > cpl: {:x}", selector);
  1101. trigger_gp(selector & !3);
  1102. return;
  1103. }
  1104. }
  1105. else {
  1106. // non-conforming code segment
  1107. if cs_selector.rpl() > *cpl || info.dpl() != *cpl {
  1108. dbg_log!("#gp cs rpl > cpl or dpl != cpl: {:x}", selector);
  1109. trigger_gp(selector & !3);
  1110. return;
  1111. }
  1112. }
  1113. if !info.is_present() {
  1114. dbg_log!("#NP for loading not-present in cs sel={:x}", selector);
  1115. dbg_trace();
  1116. trigger_np(selector & !3);
  1117. return;
  1118. }
  1119. if is_call {
  1120. if is_osize_32 {
  1121. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-8), 8));
  1122. push32(*sreg.offset(CS as isize) as i32).unwrap();
  1123. push32(get_real_eip()).unwrap();
  1124. }
  1125. else {
  1126. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-4), 4));
  1127. push16(*sreg.offset(CS as isize) as i32).unwrap();
  1128. push16(get_real_eip()).unwrap();
  1129. }
  1130. }
  1131. dbg_assert!((eip as u32) <= info.effective_limit(), "todo: #gp");
  1132. update_cs_size(info.is_32());
  1133. *segment_is_null.offset(CS as isize) = false;
  1134. *segment_limits.offset(CS as isize) = info.effective_limit();
  1135. *segment_offsets.offset(CS as isize) = info.base();
  1136. *sreg.offset(CS as isize) = selector as u16 & !3 | *cpl as u16;
  1137. *instruction_pointer = get_seg_cs() + eip;
  1138. }
  1139. //dbg_log!("far " + ["jump", "call"][+is_call] + " to:", LOG_CPU)
  1140. //CPU_LOG_VERBOSE && debug.dump_state("far " + ["jump", "call"][+is_call] + " end");
  1141. }
  1142. pub unsafe fn far_return(eip: i32, selector: i32, stack_adjust: i32, is_osize_32: bool) {
  1143. dbg_assert!(selector < 0x10000 && selector >= 0);
  1144. //dbg_log("far return eip=" + h(eip >>> 0, 8) + " cs=" + h(selector, 4) + " stack_adjust=" + h(stack_adjust), LOG_CPU);
  1145. //CPU_LOG_VERBOSE && this.debug.dump_state("far ret start");
  1146. if !*protected_mode {
  1147. dbg_assert!(!*is_32);
  1148. //dbg_assert(!this.stack_size_32[0]);
  1149. }
  1150. if !*protected_mode || vm86_mode() {
  1151. switch_cs_real_mode(selector);
  1152. *instruction_pointer = get_seg_cs() + eip;
  1153. adjust_stack_reg(2 * (if is_osize_32 { 4 } else { 2 }) + stack_adjust);
  1154. return;
  1155. }
  1156. let (info, cs_selector) = match return_on_pagefault!(lookup_segment_selector(selector)) {
  1157. Ok((desc, sel)) => (desc, sel),
  1158. Err(selector_unusable) => match selector_unusable {
  1159. SelectorNullOrInvalid::IsNull => {
  1160. dbg_log!("far return: #gp null cs");
  1161. trigger_gp(0);
  1162. return;
  1163. },
  1164. SelectorNullOrInvalid::IsInvalid => {
  1165. dbg_log!("far return: #gp invalid cs: {:x}", selector);
  1166. trigger_gp(selector & !3);
  1167. return;
  1168. },
  1169. },
  1170. };
  1171. if info.is_system() {
  1172. dbg_assert!(false, "is system in far return");
  1173. trigger_gp(selector & !3);
  1174. return;
  1175. }
  1176. if !info.is_executable() {
  1177. dbg_log!("non-executable cs: {:x}", selector);
  1178. trigger_gp(selector & !3);
  1179. return;
  1180. }
  1181. if cs_selector.rpl() < *cpl {
  1182. dbg_log!("cs rpl < cpl: {:x}", selector);
  1183. trigger_gp(selector & !3);
  1184. return;
  1185. }
  1186. if info.is_dc() && info.dpl() > cs_selector.rpl() {
  1187. dbg_log!("cs conforming and dpl > rpl: {:x}", selector);
  1188. trigger_gp(selector & !3);
  1189. return;
  1190. }
  1191. if !info.is_dc() && info.dpl() != cs_selector.rpl() {
  1192. dbg_log!("cs non-conforming and dpl != rpl: {:x}", selector);
  1193. trigger_gp(selector & !3);
  1194. return;
  1195. }
  1196. if !info.is_present() {
  1197. dbg_log!("#NP for loading not-present in cs sel={:x}", selector);
  1198. dbg_trace();
  1199. trigger_np(selector & !3);
  1200. return;
  1201. }
  1202. if cs_selector.rpl() > *cpl {
  1203. dbg_log!(
  1204. "far return privilege change cs: {:x} from={} to={} is_16={}",
  1205. selector,
  1206. *cpl,
  1207. cs_selector.rpl(),
  1208. is_osize_32
  1209. );
  1210. let temp_esp;
  1211. let temp_ss;
  1212. if is_osize_32 {
  1213. //dbg_log!("esp read from " + h(translate_address_system_read(get_stack_pointer(stack_adjust + 8))))
  1214. temp_esp = safe_read32s(get_stack_pointer(stack_adjust + 8)).unwrap();
  1215. //dbg_log!("esp=" + h(temp_esp));
  1216. temp_ss = safe_read16(get_stack_pointer(stack_adjust + 12)).unwrap();
  1217. }
  1218. else {
  1219. //dbg_log!("esp read from " + h(translate_address_system_read(get_stack_pointer(stack_adjust + 4))));
  1220. temp_esp = safe_read16(get_stack_pointer(stack_adjust + 4)).unwrap();
  1221. //dbg_log!("esp=" + h(temp_esp));
  1222. temp_ss = safe_read16(get_stack_pointer(stack_adjust + 6)).unwrap();
  1223. }
  1224. *cpl = cs_selector.rpl();
  1225. cpl_changed();
  1226. // XXX: This failure should be checked before side effects
  1227. if !switch_seg(SS, temp_ss) {
  1228. dbg_assert!(false);
  1229. }
  1230. set_stack_reg(temp_esp + stack_adjust);
  1231. //if(is_osize_32)
  1232. //{
  1233. // adjust_stack_reg(2 * 4);
  1234. //}
  1235. //else
  1236. //{
  1237. // adjust_stack_reg(2 * 2);
  1238. //}
  1239. //throw debug.unimpl("privilege change");
  1240. //adjust_stack_reg(stack_adjust);
  1241. }
  1242. else {
  1243. if is_osize_32 {
  1244. adjust_stack_reg(2 * 4 + stack_adjust);
  1245. }
  1246. else {
  1247. adjust_stack_reg(2 * 2 + stack_adjust);
  1248. }
  1249. }
  1250. //dbg_assert(*cpl === info.dpl);
  1251. update_cs_size(info.is_32());
  1252. *segment_is_null.offset(CS as isize) = false;
  1253. *segment_limits.offset(CS as isize) = info.effective_limit();
  1254. *segment_offsets.offset(CS as isize) = info.base();
  1255. *sreg.offset(CS as isize) = selector as u16;
  1256. dbg_assert!(selector & 3 == *cpl as i32);
  1257. *instruction_pointer = get_seg_cs() + eip;
  1258. //dbg_log("far return to:", LOG_CPU)
  1259. //CPU_LOG_VERBOSE && debug.dump_state("far ret end");
  1260. }
  1261. pub unsafe fn after_block_boundary() { jit_block_boundary = true; }
  1262. #[no_mangle]
  1263. pub fn track_jit_exit(phys_addr: u32) {
  1264. unsafe {
  1265. debug_last_jump = LastJump::Compiled { phys_addr };
  1266. }
  1267. }
  1268. #[no_mangle]
  1269. pub unsafe fn get_eflags() -> i32 {
  1270. return *flags & !FLAGS_ALL
  1271. | getcf() as i32
  1272. | (getpf() as i32) << 2
  1273. | (getaf() as i32) << 4
  1274. | (getzf() as i32) << 6
  1275. | (getsf() as i32) << 7
  1276. | (getof() as i32) << 11;
  1277. }
  1278. #[no_mangle]
  1279. pub unsafe fn get_eflags_no_arith() -> i32 { return *flags; }
  1280. pub unsafe fn translate_address_read(address: i32) -> OrPageFault<u32> {
  1281. let entry = *tlb_data.offset((address as u32 >> 12) as isize);
  1282. let user = *cpl == 3;
  1283. if entry & (TLB_VALID | if user { TLB_NO_USER } else { 0 }) == TLB_VALID {
  1284. Ok((entry & !0xFFF ^ address) as u32)
  1285. }
  1286. else {
  1287. Ok((do_page_translation(address, false, user)? | address & 0xFFF) as u32)
  1288. }
  1289. }
  1290. pub unsafe fn translate_address_read_jit(address: i32) -> OrPageFault<u32> {
  1291. let entry = *tlb_data.offset((address as u32 >> 12) as isize);
  1292. let user = *cpl == 3;
  1293. if entry & (TLB_VALID | if user { TLB_NO_USER } else { 0 }) == TLB_VALID {
  1294. Ok((entry & !0xFFF ^ address) as u32)
  1295. }
  1296. else {
  1297. match do_page_walk(address, false, user) {
  1298. Ok(phys_addr_high) => Ok((phys_addr_high | address & 0xFFF) as u32),
  1299. Err(pagefault) => {
  1300. trigger_pagefault_jit(pagefault);
  1301. Err(())
  1302. },
  1303. }
  1304. }
  1305. }
  1306. pub struct PageFault {
  1307. addr: i32,
  1308. for_writing: bool,
  1309. user: bool,
  1310. present: bool,
  1311. }
  1312. pub unsafe fn do_page_translation(addr: i32, for_writing: bool, user: bool) -> OrPageFault<i32> {
  1313. match do_page_walk(addr, for_writing, user) {
  1314. Ok(phys_addr) => Ok(phys_addr),
  1315. Err(pagefault) => {
  1316. trigger_pagefault(pagefault);
  1317. Err(())
  1318. },
  1319. }
  1320. }
  1321. pub unsafe fn do_page_walk(addr: i32, for_writing: bool, user: bool) -> Result<i32, PageFault> {
  1322. let mut can_write: bool = true;
  1323. let global;
  1324. let mut allow_user: bool = true;
  1325. let page = (addr as u32 >> 12) as i32;
  1326. let high;
  1327. if *cr & CR0_PG == 0 {
  1328. // paging disabled
  1329. high = (addr as u32 & 0xFFFFF000) as i32;
  1330. global = false
  1331. }
  1332. else {
  1333. let page_dir_addr = (*cr.offset(3) as u32 >> 2).wrapping_add((page >> 10) as u32) as i32;
  1334. let page_dir_entry = read_aligned32(page_dir_addr as u32);
  1335. // XXX
  1336. let kernel_write_override = !user && 0 == *cr & CR0_WP;
  1337. if 0 == page_dir_entry & PAGE_TABLE_PRESENT_MASK {
  1338. // to do at this place:
  1339. //
  1340. // - set cr2 = addr (which caused the page fault)
  1341. // - call_interrupt_vector with id 14, error code 0-7 (requires information if read or write)
  1342. // - prevent execution of the function that triggered this call
  1343. return Err(PageFault {
  1344. addr,
  1345. for_writing,
  1346. user,
  1347. present: false,
  1348. });
  1349. }
  1350. if page_dir_entry & PAGE_TABLE_RW_MASK == 0 && !kernel_write_override {
  1351. can_write = false;
  1352. if for_writing {
  1353. return Err(PageFault {
  1354. addr,
  1355. for_writing,
  1356. user,
  1357. present: true,
  1358. });
  1359. }
  1360. }
  1361. if page_dir_entry & PAGE_TABLE_USER_MASK == 0 {
  1362. allow_user = false;
  1363. if user {
  1364. // Page Fault: page table accessed by non-supervisor
  1365. return Err(PageFault {
  1366. addr,
  1367. for_writing,
  1368. user,
  1369. present: true,
  1370. });
  1371. }
  1372. }
  1373. if 0 != page_dir_entry & PAGE_TABLE_PSE_MASK && 0 != *cr.offset(4) & CR4_PSE {
  1374. // size bit is set
  1375. // set the accessed and dirty bits
  1376. let new_page_dir_entry = page_dir_entry
  1377. | PAGE_TABLE_ACCESSED_MASK
  1378. | if for_writing { PAGE_TABLE_DIRTY_MASK } else { 0 };
  1379. if page_dir_entry != new_page_dir_entry {
  1380. write_aligned32(page_dir_addr as u32, new_page_dir_entry);
  1381. }
  1382. high = (page_dir_entry as u32 & 0xFFC00000 | (addr & 0x3FF000) as u32) as i32;
  1383. global = page_dir_entry & PAGE_TABLE_GLOBAL_MASK == PAGE_TABLE_GLOBAL_MASK
  1384. }
  1385. else {
  1386. let page_table_addr = ((page_dir_entry as u32 & 0xFFFFF000) >> 2)
  1387. .wrapping_add((page & 1023) as u32) as i32;
  1388. let page_table_entry = read_aligned32(page_table_addr as u32);
  1389. if page_table_entry & PAGE_TABLE_PRESENT_MASK == 0 {
  1390. return Err(PageFault {
  1391. addr,
  1392. for_writing,
  1393. user,
  1394. present: false,
  1395. });
  1396. }
  1397. if page_table_entry & PAGE_TABLE_RW_MASK == 0 && !kernel_write_override {
  1398. can_write = false;
  1399. if for_writing {
  1400. return Err(PageFault {
  1401. addr,
  1402. for_writing,
  1403. user,
  1404. present: true,
  1405. });
  1406. }
  1407. }
  1408. if page_table_entry & PAGE_TABLE_USER_MASK == 0 {
  1409. allow_user = false;
  1410. if user {
  1411. return Err(PageFault {
  1412. addr,
  1413. for_writing,
  1414. user,
  1415. present: true,
  1416. });
  1417. }
  1418. }
  1419. // Set the accessed and dirty bits
  1420. // Note: dirty bit is only set on the page table entry
  1421. let new_page_dir_entry = page_dir_entry | PAGE_TABLE_ACCESSED_MASK;
  1422. if new_page_dir_entry != page_dir_entry {
  1423. write_aligned32(page_dir_addr as u32, new_page_dir_entry);
  1424. }
  1425. let new_page_table_entry = page_table_entry
  1426. | PAGE_TABLE_ACCESSED_MASK
  1427. | if for_writing { PAGE_TABLE_DIRTY_MASK } else { 0 };
  1428. if page_table_entry != new_page_table_entry {
  1429. write_aligned32(page_table_addr as u32, new_page_table_entry);
  1430. }
  1431. high = (page_table_entry as u32 & 0xFFFFF000) as i32;
  1432. global = page_table_entry & PAGE_TABLE_GLOBAL_MASK == PAGE_TABLE_GLOBAL_MASK
  1433. }
  1434. }
  1435. if *tlb_data.offset(page as isize) == 0 {
  1436. if valid_tlb_entries_count == VALID_TLB_ENTRY_MAX {
  1437. profiler::stat_increment(TLB_FULL);
  1438. clear_tlb();
  1439. // also clear global entries if tlb is almost full after clearing non-global pages
  1440. if valid_tlb_entries_count > VALID_TLB_ENTRY_MAX * 3 / 4 {
  1441. profiler::stat_increment(TLB_GLOBAL_FULL);
  1442. full_clear_tlb();
  1443. }
  1444. }
  1445. dbg_assert!(valid_tlb_entries_count < VALID_TLB_ENTRY_MAX);
  1446. valid_tlb_entries[valid_tlb_entries_count as usize] = page;
  1447. valid_tlb_entries_count += 1;
  1448. // TODO: Check that there are no duplicates in valid_tlb_entries
  1449. // XXX: There will probably be duplicates due to invlpg deleting
  1450. // entries from tlb_data but not from valid_tlb_entries
  1451. }
  1452. else if CHECK_TLB_INVARIANTS {
  1453. let mut found: bool = false;
  1454. for i in 0..valid_tlb_entries_count {
  1455. if valid_tlb_entries[i as usize] == page {
  1456. found = true;
  1457. break;
  1458. }
  1459. }
  1460. dbg_assert!(found);
  1461. }
  1462. let is_in_mapped_range = in_mapped_range(high as u32);
  1463. let has_code = !is_in_mapped_range && ::jit::jit_page_has_code(Page::page_of(high as u32));
  1464. let info_bits = TLB_VALID
  1465. | if can_write { 0 } else { TLB_READONLY }
  1466. | if allow_user { 0 } else { TLB_NO_USER }
  1467. | if is_in_mapped_range { TLB_IN_MAPPED_RANGE } else { 0 }
  1468. | if global && 0 != *cr.offset(4) & CR4_PGE { TLB_GLOBAL } else { 0 }
  1469. | if has_code { TLB_HAS_CODE } else { 0 };
  1470. dbg_assert!((high ^ page << 12) & 0xFFF == 0);
  1471. *tlb_data.offset(page as isize) = high ^ page << 12 | info_bits;
  1472. return Ok(high);
  1473. }
  1474. #[no_mangle]
  1475. pub unsafe fn full_clear_tlb() {
  1476. profiler::stat_increment(FULL_CLEAR_TLB);
  1477. // clear tlb including global pages
  1478. *last_virt_eip = -1;
  1479. for i in 0..valid_tlb_entries_count {
  1480. let page = valid_tlb_entries[i as usize];
  1481. *tlb_data.offset(page as isize) = 0;
  1482. }
  1483. valid_tlb_entries_count = 0;
  1484. if CHECK_TLB_INVARIANTS {
  1485. for i in 0..0x100000 {
  1486. dbg_assert!(*tlb_data.offset(i) == 0);
  1487. }
  1488. };
  1489. }
  1490. #[no_mangle]
  1491. pub unsafe fn clear_tlb() {
  1492. profiler::stat_increment(CLEAR_TLB);
  1493. // clear tlb excluding global pages
  1494. *last_virt_eip = -1;
  1495. let mut global_page_offset: i32 = 0;
  1496. for i in 0..valid_tlb_entries_count {
  1497. let page = valid_tlb_entries[i as usize];
  1498. let entry = *tlb_data.offset(page as isize);
  1499. if 0 != entry & TLB_GLOBAL {
  1500. // reinsert at the front
  1501. valid_tlb_entries[global_page_offset as usize] = page;
  1502. global_page_offset += 1;
  1503. }
  1504. else {
  1505. *tlb_data.offset(page as isize) = 0
  1506. }
  1507. }
  1508. valid_tlb_entries_count = global_page_offset;
  1509. if CHECK_TLB_INVARIANTS {
  1510. for i in 0..0x100000 {
  1511. dbg_assert!(*tlb_data.offset(i) == 0 || 0 != *tlb_data.offset(i) & TLB_GLOBAL);
  1512. }
  1513. };
  1514. }
  1515. /// Pagefault handling with the jit works as follows:
  1516. /// - If the slow path is taken, it calls safe_{read,write}*_jit
  1517. /// - safe_{read,write}*_jit call translate_address_{read,write}_jit
  1518. /// - translate_address_{read,write}_jit do the normal page walk and call this method instead of
  1519. /// trigger_pagefault when a page fault happens
  1520. /// - this method prepares a page fault by setting cr2, eip, prefixes and writes the error code
  1521. /// into page_fault_error_code. This method *doesn't* trigger the interrupt, as registers are
  1522. /// still stored in the wasm module
  1523. /// - back in the wasm module, the generated code detects the page fault, restores the registers
  1524. /// and finally calls trigger_pagefault_end_jit, which does the interrupt
  1525. pub unsafe fn trigger_pagefault_jit(fault: PageFault) {
  1526. let write = fault.for_writing;
  1527. let addr = fault.addr;
  1528. let present = fault.present;
  1529. let user = fault.user;
  1530. if ::config::LOG_PAGE_FAULTS {
  1531. dbg_log!(
  1532. "page fault jit w={} u={} p={} eip={:x} cr2={:x}",
  1533. write as i32,
  1534. user as i32,
  1535. present as i32,
  1536. *previous_ip,
  1537. addr
  1538. );
  1539. dbg_trace();
  1540. }
  1541. if DEBUG {
  1542. if must_not_fault {
  1543. dbg_log!("Unexpected page fault");
  1544. dbg_trace();
  1545. dbg_assert!(false);
  1546. }
  1547. }
  1548. profiler::stat_increment(PAGE_FAULT);
  1549. *cr.offset(2) = addr;
  1550. // invalidate tlb entry
  1551. let page = ((addr as u32) >> 12) as i32;
  1552. *tlb_data.offset(page as isize) = 0;
  1553. if DEBUG {
  1554. if cpu_exception_hook(CPU_EXCEPTION_PF) {
  1555. return;
  1556. }
  1557. }
  1558. *page_fault_error_code = (user as i32) << 2 | (write as i32) << 1 | present as i32;
  1559. }
  1560. #[no_mangle]
  1561. pub unsafe fn trigger_pagefault_end_jit() {
  1562. *instruction_pointer = *previous_ip;
  1563. call_interrupt_vector(CPU_EXCEPTION_PF, false, Some(*page_fault_error_code));
  1564. }
  1565. pub unsafe fn trigger_pagefault(fault: PageFault) {
  1566. let write = fault.for_writing;
  1567. let addr = fault.addr;
  1568. let present = fault.present;
  1569. let user = fault.user;
  1570. if ::config::LOG_PAGE_FAULTS {
  1571. dbg_log!(
  1572. "page fault w={} u={} p={} eip={:x} cr2={:x}",
  1573. write as i32,
  1574. user as i32,
  1575. present as i32,
  1576. *previous_ip,
  1577. addr
  1578. );
  1579. dbg_trace();
  1580. }
  1581. if DEBUG {
  1582. if must_not_fault {
  1583. dbg_log!("Unexpected page fault");
  1584. dbg_trace();
  1585. dbg_assert!(false);
  1586. }
  1587. }
  1588. profiler::stat_increment(PAGE_FAULT);
  1589. *cr.offset(2) = addr;
  1590. // invalidate tlb entry
  1591. let page = ((addr as u32) >> 12) as i32;
  1592. *tlb_data.offset(page as isize) = 0;
  1593. *instruction_pointer = *previous_ip;
  1594. call_interrupt_vector(
  1595. CPU_EXCEPTION_PF,
  1596. false,
  1597. Some((user as i32) << 2 | (write as i32) << 1 | present as i32),
  1598. );
  1599. }
  1600. pub unsafe fn translate_address_write_and_can_skip_dirty(address: i32) -> OrPageFault<(u32, bool)> {
  1601. let entry = *tlb_data.offset((address as u32 >> 12) as isize);
  1602. let user = *cpl == 3;
  1603. if entry & (TLB_VALID | if user { TLB_NO_USER } else { 0 } | TLB_READONLY) == TLB_VALID {
  1604. return Ok(((entry & !0xFFF ^ address) as u32, entry & TLB_HAS_CODE == 0));
  1605. }
  1606. else {
  1607. return Ok((
  1608. (do_page_translation(address, true, user)? | address & 0xFFF) as u32,
  1609. false,
  1610. ));
  1611. };
  1612. }
  1613. pub unsafe fn translate_address_write(address: i32) -> OrPageFault<u32> {
  1614. let entry = *tlb_data.offset((address as u32 >> 12) as isize);
  1615. let user = *cpl == 3;
  1616. if entry & (TLB_VALID | if user { TLB_NO_USER } else { 0 } | TLB_READONLY) == TLB_VALID {
  1617. return Ok((entry & !0xFFF ^ address) as u32);
  1618. }
  1619. else {
  1620. return Ok((do_page_translation(address, true, user)? | address & 0xFFF) as u32);
  1621. };
  1622. }
  1623. pub unsafe fn translate_address_write_jit(address: i32) -> OrPageFault<u32> {
  1624. let entry = *tlb_data.offset((address as u32 >> 12) as isize);
  1625. let user = *cpl == 3;
  1626. if entry & (TLB_VALID | if user { TLB_NO_USER } else { 0 } | TLB_READONLY) == TLB_VALID {
  1627. Ok((entry & !0xFFF ^ address) as u32)
  1628. }
  1629. else {
  1630. match do_page_walk(address, true, user) {
  1631. Ok(phys_addr_high) => Ok((phys_addr_high | address & 0xFFF) as u32),
  1632. Err(pagefault) => {
  1633. trigger_pagefault_jit(pagefault);
  1634. Err(())
  1635. },
  1636. }
  1637. }
  1638. }
  1639. #[no_mangle]
  1640. pub fn tlb_set_has_code(physical_page: Page, has_code: bool) {
  1641. let physical_page = physical_page.to_u32();
  1642. for i in 0..unsafe { valid_tlb_entries_count } {
  1643. let page = unsafe { valid_tlb_entries[i as usize] };
  1644. let entry = unsafe { *tlb_data.offset(page as isize) };
  1645. if 0 != entry {
  1646. let tlb_physical_page = entry as u32 >> 12 ^ page as u32;
  1647. if physical_page == tlb_physical_page {
  1648. unsafe {
  1649. *tlb_data.offset(page as isize) =
  1650. if has_code { entry | TLB_HAS_CODE } else { entry & !TLB_HAS_CODE }
  1651. }
  1652. }
  1653. }
  1654. }
  1655. check_tlb_invariants();
  1656. }
  1657. #[no_mangle]
  1658. pub fn check_tlb_invariants() {
  1659. if !CHECK_TLB_INVARIANTS {
  1660. return;
  1661. }
  1662. for i in 0..unsafe { valid_tlb_entries_count } {
  1663. let page = unsafe { valid_tlb_entries[i as usize] };
  1664. let entry = unsafe { *tlb_data.offset(page as isize) };
  1665. if 0 == entry || 0 != entry & TLB_IN_MAPPED_RANGE {
  1666. // there's no code in mapped memory
  1667. continue;
  1668. }
  1669. let target = (entry ^ page << 12) as u32;
  1670. dbg_assert!(!in_mapped_range(target));
  1671. let entry_has_code = entry & TLB_HAS_CODE != 0;
  1672. let has_code = ::jit::jit_page_has_code(Page::page_of(target));
  1673. // If some code has been created in a page, the corresponding tlb entries must be marked
  1674. dbg_assert!(!has_code || entry_has_code);
  1675. // If a tlb entry is marked to have code, the physical page should
  1676. // contain code (the converse is not a bug, but indicates a cleanup
  1677. // problem when clearing code from a page)
  1678. dbg_assert!(!entry_has_code || has_code);
  1679. }
  1680. }
  1681. pub unsafe fn readable_or_pagefault(addr: i32, size: i32) -> OrPageFault<()> {
  1682. dbg_assert!(size < 0x1000);
  1683. dbg_assert!(size > 0);
  1684. if *cr & CR0_PG == 0 {
  1685. return Ok(());
  1686. }
  1687. let user = *cpl == 3;
  1688. let mask = TLB_VALID | if user { TLB_NO_USER } else { 0 };
  1689. let expect = TLB_VALID;
  1690. let page = (addr as u32 >> 12) as i32;
  1691. if *tlb_data.offset(page as isize) & mask != expect {
  1692. do_page_translation(addr, false, user)?;
  1693. }
  1694. let next_page = ((addr + size - 1) as u32 >> 12) as i32;
  1695. if page != next_page {
  1696. dbg_assert!(next_page == page + 1);
  1697. if *tlb_data.offset(next_page as isize) & mask != expect {
  1698. do_page_translation(next_page << 12, false, user)?;
  1699. }
  1700. }
  1701. return Ok(());
  1702. }
  1703. pub unsafe fn writable_or_pagefault(addr: i32, size: i32) -> OrPageFault<()> {
  1704. dbg_assert!(size < 0x1000);
  1705. dbg_assert!(size > 0);
  1706. if *cr & CR0_PG == 0 {
  1707. return Ok(());
  1708. }
  1709. let user = *cpl == 3;
  1710. let mask = TLB_READONLY | TLB_VALID | if user { TLB_NO_USER } else { 0 };
  1711. let expect = TLB_VALID;
  1712. let page = (addr as u32 >> 12) as i32;
  1713. if *tlb_data.offset(page as isize) & mask != expect {
  1714. do_page_translation(addr, true, user)?;
  1715. }
  1716. let next_page = ((addr + size - 1) as u32 >> 12) as i32;
  1717. if page != next_page {
  1718. dbg_assert!(next_page == page + 1);
  1719. if *tlb_data.offset(next_page as isize) & mask != expect {
  1720. do_page_translation(next_page << 12, true, user)?;
  1721. }
  1722. }
  1723. return Ok(());
  1724. }
  1725. pub unsafe fn read_imm8() -> OrPageFault<i32> {
  1726. let eip = *instruction_pointer;
  1727. if 0 != eip & !0xFFF ^ *last_virt_eip {
  1728. *eip_phys = (translate_address_read(eip)? ^ eip as u32) as i32;
  1729. *last_virt_eip = eip & !0xFFF
  1730. }
  1731. dbg_assert!(!in_mapped_range((*eip_phys ^ eip) as u32));
  1732. let data8 = *mem8.offset((*eip_phys ^ eip) as isize) as i32;
  1733. *instruction_pointer = eip + 1;
  1734. return Ok(data8);
  1735. }
  1736. pub unsafe fn read_imm8s() -> OrPageFault<i32> { return Ok(read_imm8()? << 24 >> 24); }
  1737. pub unsafe fn read_imm16() -> OrPageFault<i32> {
  1738. // Two checks in one comparison:
  1739. // 1. Did the high 20 bits of eip change
  1740. // or 2. Are the low 12 bits of eip 0xFFF (and this read crosses a page boundary)
  1741. if (*instruction_pointer ^ *last_virt_eip) as u32 > 0xFFE {
  1742. return Ok(read_imm8()? | read_imm8()? << 8);
  1743. }
  1744. else {
  1745. let data16 = read16((*eip_phys ^ *instruction_pointer) as u32);
  1746. *instruction_pointer = *instruction_pointer + 2;
  1747. return Ok(data16);
  1748. };
  1749. }
  1750. pub unsafe fn read_imm32s() -> OrPageFault<i32> {
  1751. // Analogue to the above comment
  1752. if (*instruction_pointer ^ *last_virt_eip) as u32 > 0xFFC {
  1753. return Ok(read_imm16()? | read_imm16()? << 16);
  1754. }
  1755. else {
  1756. let data32 = read32s((*eip_phys ^ *instruction_pointer) as u32);
  1757. *instruction_pointer = *instruction_pointer + 4;
  1758. return Ok(data32);
  1759. };
  1760. }
  1761. pub unsafe fn is_osize_32() -> bool {
  1762. dbg_assert!(!in_jit);
  1763. return *is_32 != (*prefixes as i32 & PREFIX_MASK_OPSIZE == PREFIX_MASK_OPSIZE);
  1764. }
  1765. pub unsafe fn is_asize_32() -> bool {
  1766. dbg_assert!(!in_jit);
  1767. return *is_32 != (*prefixes as i32 & PREFIX_MASK_ADDRSIZE == PREFIX_MASK_ADDRSIZE);
  1768. }
  1769. pub unsafe fn lookup_segment_selector(
  1770. selector: i32,
  1771. ) -> OrPageFault<Result<(SegmentDescriptor, SegmentSelector), SelectorNullOrInvalid>> {
  1772. let selector = SegmentSelector::of_u16(selector as u16);
  1773. if selector.is_null() {
  1774. return Ok(Err(SelectorNullOrInvalid::IsNull));
  1775. }
  1776. let (table_offset, table_limit) = if selector.is_gdt() {
  1777. (*gdtr_offset as u32, *gdtr_size as u16)
  1778. }
  1779. else {
  1780. (
  1781. *segment_offsets.offset(LDTR as isize) as u32,
  1782. *segment_limits.offset(LDTR as isize) as u16,
  1783. )
  1784. };
  1785. if selector.descriptor_offset() > table_limit {
  1786. return Ok(Err(SelectorNullOrInvalid::IsInvalid));
  1787. }
  1788. let descriptor_address =
  1789. translate_address_system_read(selector.descriptor_offset() as i32 + table_offset as i32)?;
  1790. let descriptor = SegmentDescriptor::of_u64(read64s(descriptor_address) as u64);
  1791. Ok(Ok((descriptor, selector)))
  1792. }
  1793. pub unsafe fn switch_seg(reg: i32, selector_raw: i32) -> bool {
  1794. dbg_assert!(reg >= 0 && reg <= 5);
  1795. dbg_assert!(selector_raw >= 0 && selector_raw < 0x10000);
  1796. if !*protected_mode || vm86_mode() {
  1797. *sreg.offset(reg as isize) = selector_raw as u16;
  1798. *segment_is_null.offset(reg as isize) = false;
  1799. *segment_offsets.offset(reg as isize) = selector_raw << 4;
  1800. if reg == SS {
  1801. *stack_size_32 = false;
  1802. }
  1803. return true;
  1804. }
  1805. let (descriptor, selector) =
  1806. match return_on_pagefault!(lookup_segment_selector(selector_raw), false) {
  1807. Ok((desc, sel)) => (desc, sel),
  1808. Err(selector_unusable) => {
  1809. // The selector couldn't be used to fetch a descriptor, so we handle all of those
  1810. // cases
  1811. if selector_unusable == SelectorNullOrInvalid::IsNull {
  1812. if reg == SS {
  1813. dbg_log!("#GP for loading 0 in SS sel={:x}", selector_raw);
  1814. trigger_gp(0);
  1815. return false;
  1816. }
  1817. else if reg != CS {
  1818. // es, ds, fs, gs
  1819. *sreg.offset(reg as isize) = selector_raw as u16;
  1820. *segment_is_null.offset(reg as isize) = true;
  1821. return true;
  1822. }
  1823. }
  1824. else if selector_unusable == SelectorNullOrInvalid::IsInvalid {
  1825. dbg_log!(
  1826. "#GP for loading invalid in seg={} sel={:x}",
  1827. reg,
  1828. selector_raw
  1829. );
  1830. trigger_gp(selector_raw & !3);
  1831. return false;
  1832. }
  1833. dbg_assert!(false);
  1834. return false;
  1835. },
  1836. };
  1837. if reg == SS {
  1838. if descriptor.is_system()
  1839. || selector.rpl() != *cpl
  1840. || !descriptor.is_writable()
  1841. || descriptor.dpl() != *cpl
  1842. {
  1843. dbg_log!("#GP for loading invalid in SS sel={:x}", selector_raw);
  1844. trigger_gp(selector_raw & !3);
  1845. return false;
  1846. }
  1847. if !descriptor.is_present() {
  1848. dbg_log!("#SS for loading non-present in SS sel={:x}", selector_raw);
  1849. trigger_ss(selector_raw & !3);
  1850. return false;
  1851. }
  1852. *stack_size_32 = descriptor.is_32();
  1853. }
  1854. else if reg == CS {
  1855. // handled by switch_cs_real_mode, far_return or far_jump
  1856. dbg_assert!(false);
  1857. }
  1858. else {
  1859. if descriptor.is_system()
  1860. || !descriptor.is_readable()
  1861. || (!descriptor.is_conforming_executable()
  1862. && (selector.rpl() > descriptor.dpl() || *cpl > descriptor.dpl()))
  1863. {
  1864. dbg_log!(
  1865. "#GP for loading invalid in seg {} sel={:x}",
  1866. reg,
  1867. selector_raw,
  1868. );
  1869. trigger_gp(selector_raw & !3);
  1870. return false;
  1871. }
  1872. if !descriptor.is_present() {
  1873. dbg_log!(
  1874. "#NP for loading not-present in seg {} sel={:x}",
  1875. reg,
  1876. selector_raw,
  1877. );
  1878. trigger_np(selector_raw & !3);
  1879. return false;
  1880. }
  1881. }
  1882. *segment_is_null.offset(reg as isize) = false;
  1883. *segment_limits.offset(reg as isize) = descriptor.effective_limit();
  1884. *segment_offsets.offset(reg as isize) = descriptor.base();
  1885. *sreg.offset(reg as isize) = selector_raw as u16;
  1886. true
  1887. }
  1888. #[no_mangle]
  1889. pub unsafe fn log_segment_null(segment: i32) {
  1890. dbg_assert!(segment >= 0 && segment < 8);
  1891. if *segment_is_null.offset(segment as isize) {
  1892. dbg_assert!(segment != CS && segment != SS);
  1893. dbg_log!("#gp: Access null segment in jit");
  1894. }
  1895. }
  1896. pub unsafe fn get_seg(segment: i32) -> OrPageFault<i32> {
  1897. dbg_assert!(segment >= 0 && segment < 8);
  1898. if *segment_is_null.offset(segment as isize) {
  1899. dbg_assert!(segment != CS && segment != SS);
  1900. dbg_log!("#gp: Access null segment");
  1901. dbg_trace();
  1902. dbg_assert!(!in_jit, "TODO");
  1903. trigger_gp(0);
  1904. return Err(());
  1905. }
  1906. return Ok(*segment_offsets.offset(segment as isize));
  1907. }
  1908. pub unsafe fn set_cr0(cr0: i32) {
  1909. let old_cr0 = *cr;
  1910. if old_cr0 & CR0_AM == 0 && cr0 & CR0_AM != 0 {
  1911. dbg_log!("Warning: Unimplemented: cr0 alignment mask");
  1912. }
  1913. if (cr0 & (CR0_PE | CR0_PG)) == CR0_PG {
  1914. panic!("cannot load PG without PE");
  1915. }
  1916. *cr = cr0;
  1917. *cr |= CR0_ET;
  1918. if old_cr0 & (CR0_PG | CR0_WP) != cr0 & (CR0_PG | CR0_WP) {
  1919. full_clear_tlb();
  1920. }
  1921. *protected_mode = (*cr & CR0_PE) == CR0_PE;
  1922. }
  1923. pub unsafe fn cpl_changed() { *last_virt_eip = -1; }
  1924. pub unsafe fn update_cs_size(new_size: bool) {
  1925. if *is_32 != new_size {
  1926. *is_32 = new_size;
  1927. }
  1928. }
  1929. pub unsafe fn test_privileges_for_io(port: i32, size: i32) -> bool {
  1930. if *protected_mode && (*cpl > getiopl() as u8 || (*flags & FLAG_VM != 0)) {
  1931. if !*tss_size_32 {
  1932. dbg_log!("#GP for port io, 16-bit TSS port={:x} size={}", port, size);
  1933. trigger_gp(0);
  1934. return false;
  1935. }
  1936. let tsr_size = *segment_limits.offset(TR as isize);
  1937. let tsr_offset = *segment_offsets.offset(TR as isize);
  1938. if tsr_size >= 0x67 {
  1939. dbg_assert!(tsr_offset + 0x64 + 2 & 0xFFF < 0xFFF);
  1940. let iomap_base = read16(return_on_pagefault!(
  1941. translate_address_system_read(tsr_offset + 0x64 + 2),
  1942. false
  1943. ));
  1944. let high_port = port + size - 1;
  1945. if tsr_size >= (iomap_base + (high_port >> 3)) as u32 {
  1946. let mask = ((1 << size) - 1) << (port & 7);
  1947. let addr = return_on_pagefault!(
  1948. translate_address_system_read(tsr_offset + iomap_base + (port >> 3)),
  1949. false
  1950. );
  1951. let port_info = if mask & 0xFF00 != 0 { read16(addr) } else { read8(addr) };
  1952. dbg_assert!(addr & 0xFFF < 0xFFF);
  1953. if port_info & mask == 0 {
  1954. return true;
  1955. }
  1956. }
  1957. }
  1958. dbg_log!("#GP for port io port={:x} size={}", port, size);
  1959. trigger_gp(0);
  1960. return false;
  1961. }
  1962. return true;
  1963. }
  1964. pub unsafe fn popa16() {
  1965. return_on_pagefault!(readable_or_pagefault(get_stack_pointer(0), 16));
  1966. write_reg16(DI, pop16().unwrap());
  1967. write_reg16(SI, pop16().unwrap());
  1968. write_reg16(BP, pop16().unwrap());
  1969. adjust_stack_reg(2);
  1970. write_reg16(BX, pop16().unwrap());
  1971. write_reg16(DX, pop16().unwrap());
  1972. write_reg16(CX, pop16().unwrap());
  1973. write_reg16(AX, pop16().unwrap());
  1974. }
  1975. pub unsafe fn popa32() {
  1976. return_on_pagefault!(readable_or_pagefault(get_stack_pointer(0), 32));
  1977. write_reg32(EDI, pop32s().unwrap());
  1978. write_reg32(ESI, pop32s().unwrap());
  1979. write_reg32(EBP, pop32s().unwrap());
  1980. adjust_stack_reg(4);
  1981. write_reg32(EBX, pop32s().unwrap());
  1982. write_reg32(EDX, pop32s().unwrap());
  1983. write_reg32(ECX, pop32s().unwrap());
  1984. write_reg32(EAX, pop32s().unwrap());
  1985. }
  1986. #[no_mangle]
  1987. pub fn get_seg_cs() -> i32 { unsafe { *segment_offsets.offset(CS as isize) } }
  1988. #[no_mangle]
  1989. pub unsafe fn get_seg_ss() -> i32 { return *segment_offsets.offset(SS as isize); }
  1990. pub unsafe fn get_seg_prefix(default_segment: i32) -> OrPageFault<i32> {
  1991. dbg_assert!(!in_jit);
  1992. let prefix = *prefixes as i32 & PREFIX_MASK_SEGMENT;
  1993. if 0 != prefix {
  1994. if prefix == SEG_PREFIX_ZERO {
  1995. return Ok(0);
  1996. }
  1997. else {
  1998. return get_seg(prefix - 1);
  1999. }
  2000. }
  2001. else {
  2002. return get_seg(default_segment);
  2003. };
  2004. }
  2005. pub unsafe fn get_seg_prefix_ds(offset: i32) -> OrPageFault<i32> {
  2006. Ok(get_seg_prefix(DS)? + offset)
  2007. }
  2008. pub unsafe fn get_seg_prefix_ss(offset: i32) -> OrPageFault<i32> {
  2009. Ok(get_seg_prefix(SS)? + offset)
  2010. }
  2011. pub unsafe fn modrm_resolve(modrm_byte: i32) -> OrPageFault<i32> {
  2012. if is_asize_32() { resolve_modrm32(modrm_byte) } else { resolve_modrm16(modrm_byte) }
  2013. }
  2014. pub unsafe fn run_instruction(opcode: i32) { ::gen::interpreter::run(opcode as u32) }
  2015. pub unsafe fn run_instruction0f_16(opcode: i32) { ::gen::interpreter0f::run(opcode as u32) }
  2016. pub unsafe fn run_instruction0f_32(opcode: i32) { ::gen::interpreter0f::run(opcode as u32 | 0x100) }
  2017. #[no_mangle]
  2018. pub unsafe fn cycle_internal() {
  2019. profiler::stat_increment(CYCLE_INTERNAL);
  2020. if !::config::FORCE_DISABLE_JIT {
  2021. *previous_ip = *instruction_pointer;
  2022. let phys_addr = return_on_pagefault!(get_phys_eip()) as u32;
  2023. let state_flags = pack_current_state_flags();
  2024. let entry = ::jit::jit_find_cache_entry(phys_addr, state_flags);
  2025. if entry != ::jit::cached_code::NONE {
  2026. profiler::stat_increment(RUN_FROM_CACHE);
  2027. let initial_tsc = *timestamp_counter;
  2028. let wasm_table_index = entry.wasm_table_index;
  2029. let initial_state = entry.initial_state;
  2030. #[cfg(debug_assertions)]
  2031. {
  2032. in_jit = true;
  2033. }
  2034. call_indirect1(
  2035. (wasm_table_index as u32).wrapping_add(WASM_TABLE_OFFSET as u32) as i32,
  2036. initial_state,
  2037. );
  2038. #[cfg(debug_assertions)]
  2039. {
  2040. in_jit = false;
  2041. }
  2042. profiler::stat_increment_by(
  2043. RUN_FROM_CACHE_STEPS,
  2044. (*timestamp_counter - initial_tsc) as u64,
  2045. );
  2046. dbg_assert!(*timestamp_counter != initial_tsc, "TSC didn't change");
  2047. if cfg!(feature = "profiler") && cfg!(feature = "profiler_instrument") {
  2048. dbg_assert!(match ::cpu::cpu::debug_last_jump {
  2049. LastJump::Compiled { .. } => true,
  2050. _ => false,
  2051. });
  2052. let last_jump_addr = ::cpu::cpu::debug_last_jump.phys_address().unwrap();
  2053. let last_jump_opcode = if last_jump_addr != 0 {
  2054. read32s(last_jump_addr)
  2055. }
  2056. else {
  2057. // Happens during exit due to loop iteration limit
  2058. 0
  2059. };
  2060. ::opstats::record_opstat_jit_exit(last_jump_opcode as u32);
  2061. }
  2062. if Page::page_of(*previous_ip as u32) == Page::page_of(*instruction_pointer as u32) {
  2063. profiler::stat_increment(RUN_FROM_CACHE_EXIT_SAME_PAGE);
  2064. }
  2065. else {
  2066. profiler::stat_increment(RUN_FROM_CACHE_EXIT_DIFFERENT_PAGE);
  2067. }
  2068. }
  2069. else {
  2070. ::jit::record_entry_point(phys_addr);
  2071. #[cfg(feature = "profiler")]
  2072. {
  2073. if CHECK_MISSED_ENTRY_POINTS {
  2074. ::jit::check_missed_entry_points(phys_addr, state_flags);
  2075. }
  2076. }
  2077. if DEBUG {
  2078. dbg_assert!(!must_not_fault);
  2079. must_not_fault = true
  2080. }
  2081. if DEBUG {
  2082. dbg_assert!(must_not_fault);
  2083. must_not_fault = false
  2084. }
  2085. let initial_tsc = *timestamp_counter;
  2086. jit_run_interpreted(phys_addr as i32);
  2087. ::jit::jit_increase_hotness_and_maybe_compile(
  2088. phys_addr,
  2089. get_seg_cs() as u32,
  2090. state_flags,
  2091. *timestamp_counter - initial_tsc,
  2092. );
  2093. profiler::stat_increment_by(
  2094. RUN_INTERPRETED_STEPS,
  2095. (*timestamp_counter - initial_tsc) as u64,
  2096. );
  2097. dbg_assert!(*timestamp_counter != initial_tsc, "TSC didn't change");
  2098. };
  2099. }
  2100. else {
  2101. *previous_ip = *instruction_pointer;
  2102. let opcode = return_on_pagefault!(read_imm8());
  2103. *timestamp_counter += 1;
  2104. dbg_assert!(*prefixes == 0);
  2105. run_instruction(opcode | (*is_32 as i32) << 8);
  2106. dbg_assert!(*prefixes == 0);
  2107. }
  2108. }
  2109. pub unsafe fn get_phys_eip() -> OrPageFault<u32> {
  2110. let eip = *instruction_pointer;
  2111. if 0 != eip & !0xFFF ^ *last_virt_eip {
  2112. *eip_phys = (translate_address_read(eip)? ^ eip as u32) as i32;
  2113. *last_virt_eip = eip & !0xFFF
  2114. }
  2115. let phys_addr = (*eip_phys ^ eip) as u32;
  2116. dbg_assert!(!in_mapped_range(phys_addr));
  2117. return Ok(phys_addr);
  2118. }
  2119. unsafe fn jit_run_interpreted(phys_addr: i32) {
  2120. profiler::stat_increment(RUN_INTERPRETED);
  2121. dbg_assert!(!in_mapped_range(phys_addr as u32));
  2122. if cfg!(debug_assertions) {
  2123. debug_last_jump = LastJump::Interpreted {
  2124. phys_addr: phys_addr as u32,
  2125. };
  2126. }
  2127. jit_block_boundary = false;
  2128. let opcode = *mem8.offset(phys_addr as isize) as i32;
  2129. *instruction_pointer += 1;
  2130. *timestamp_counter += 1;
  2131. dbg_assert!(*prefixes == 0);
  2132. run_instruction(opcode | (*is_32 as i32) << 8);
  2133. dbg_assert!(*prefixes == 0);
  2134. // We need to limit the number of iterations here as jumps within the same page are not counted
  2135. // as block boundaries for the interpreter (as they don't create an entry point and don't need
  2136. // a check if the jump target may have compiled code)
  2137. let mut i = 0;
  2138. while !jit_block_boundary
  2139. && Page::page_of(*previous_ip as u32) == Page::page_of(*instruction_pointer as u32)
  2140. && i < INTERPRETER_ITERATION_LIMIT
  2141. {
  2142. *previous_ip = *instruction_pointer;
  2143. let opcode = return_on_pagefault!(read_imm8());
  2144. if CHECK_MISSED_ENTRY_POINTS {
  2145. let phys_addr = return_on_pagefault!(get_phys_eip()) as u32;
  2146. let state_flags = pack_current_state_flags();
  2147. let entry = ::jit::jit_find_cache_entry(phys_addr, state_flags);
  2148. if entry != ::jit::cached_code::NONE {
  2149. profiler::stat_increment(RUN_INTERPRETED_MISSED_COMPILED_ENTRY_RUN_INTERPRETED);
  2150. //dbg_log!(
  2151. // "missed entry point at {:x} prev_opcode={:x} opcode={:x}",
  2152. // phys_addr,
  2153. // prev_opcode,
  2154. // opcode
  2155. //);
  2156. }
  2157. }
  2158. if cfg!(debug_assertions) {
  2159. debug_last_jump = LastJump::Interpreted {
  2160. phys_addr: phys_addr as u32,
  2161. };
  2162. }
  2163. *timestamp_counter += 1;
  2164. //if DEBUG {
  2165. // logop(*previous_ip, opcode_0);
  2166. //}
  2167. dbg_assert!(*prefixes == 0);
  2168. run_instruction(opcode | (*is_32 as i32) << 8);
  2169. dbg_assert!(*prefixes == 0);
  2170. i += 1;
  2171. }
  2172. }
  2173. pub fn pack_current_state_flags() -> CachedStateFlags {
  2174. unsafe {
  2175. CachedStateFlags::of_u32(
  2176. (*is_32 as u32) << 0
  2177. | (*stack_size_32 as u32) << 1
  2178. | ((*cpl == 3) as u32) << 2
  2179. | (has_flat_segmentation() as u32) << 3,
  2180. )
  2181. }
  2182. }
  2183. #[no_mangle]
  2184. pub unsafe fn has_flat_segmentation() -> bool {
  2185. // ss can't be null
  2186. return *segment_offsets.offset(SS as isize) == 0
  2187. && !*segment_is_null.offset(DS as isize)
  2188. && *segment_offsets.offset(DS as isize) == 0;
  2189. }
  2190. pub unsafe fn run_prefix_instruction() {
  2191. run_instruction(return_on_pagefault!(read_imm8()) | (is_osize_32() as i32) << 8);
  2192. }
  2193. pub unsafe fn segment_prefix_op(seg: i32) {
  2194. dbg_assert!(seg <= 5);
  2195. *prefixes = (*prefixes as i32 | seg + 1) as u8;
  2196. run_prefix_instruction();
  2197. *prefixes = 0
  2198. }
  2199. #[no_mangle]
  2200. pub unsafe fn do_many_cycles_native() {
  2201. profiler::stat_increment(DO_MANY_CYCLES);
  2202. let initial_timestamp_counter = *timestamp_counter;
  2203. while (*timestamp_counter).wrapping_sub(initial_timestamp_counter) < LOOP_COUNTER as u32
  2204. && !*in_hlt
  2205. {
  2206. cycle_internal();
  2207. }
  2208. }
  2209. #[no_mangle]
  2210. pub unsafe fn trigger_de() {
  2211. dbg_log!("#de");
  2212. *instruction_pointer = *previous_ip;
  2213. if DEBUG {
  2214. if cpu_exception_hook(CPU_EXCEPTION_DE) {
  2215. return;
  2216. }
  2217. }
  2218. call_interrupt_vector(CPU_EXCEPTION_DE, false, None);
  2219. }
  2220. #[no_mangle]
  2221. pub unsafe fn trigger_ud() {
  2222. dbg_log!("#ud");
  2223. dbg_trace();
  2224. *instruction_pointer = *previous_ip;
  2225. if DEBUG {
  2226. if cpu_exception_hook(CPU_EXCEPTION_UD) {
  2227. return;
  2228. }
  2229. }
  2230. call_interrupt_vector(CPU_EXCEPTION_UD, false, None);
  2231. }
  2232. pub unsafe fn trigger_nm() {
  2233. dbg_log!("#nm eip={:x}", *previous_ip);
  2234. dbg_trace();
  2235. *instruction_pointer = *previous_ip;
  2236. if DEBUG {
  2237. if cpu_exception_hook(CPU_EXCEPTION_NM) {
  2238. return;
  2239. }
  2240. }
  2241. call_interrupt_vector(CPU_EXCEPTION_NM, false, None);
  2242. }
  2243. #[no_mangle]
  2244. pub unsafe fn trigger_gp(code: i32) {
  2245. dbg_log!("#gp");
  2246. *instruction_pointer = *previous_ip;
  2247. if DEBUG {
  2248. if cpu_exception_hook(CPU_EXCEPTION_GP) {
  2249. return;
  2250. }
  2251. }
  2252. call_interrupt_vector(CPU_EXCEPTION_GP, false, Some(code));
  2253. }
  2254. pub unsafe fn virt_boundary_read16(low: u32, high: u32) -> i32 {
  2255. dbg_assert!(low & 0xFFF == 0xFFF);
  2256. dbg_assert!(high & 0xFFF == 0);
  2257. return read8(low as u32) | read8(high as u32) << 8;
  2258. }
  2259. pub unsafe fn virt_boundary_read32s(low: u32, high: u32) -> i32 {
  2260. dbg_assert!(low & 0xFFF >= 0xFFD);
  2261. dbg_assert!(high - 3 & 0xFFF == low & 0xFFF);
  2262. let mid;
  2263. if 0 != low & 1 {
  2264. if 0 != low & 2 {
  2265. // 0xFFF
  2266. mid = read16(high - 2)
  2267. }
  2268. else {
  2269. // 0xFFD
  2270. mid = read16(low + 1)
  2271. }
  2272. }
  2273. else {
  2274. // 0xFFE
  2275. mid = virt_boundary_read16(low + 1, high - 1)
  2276. }
  2277. return read8(low as u32) | mid << 8 | read8(high as u32) << 24;
  2278. }
  2279. pub unsafe fn virt_boundary_write16(low: u32, high: u32, value: i32) {
  2280. dbg_assert!(low & 0xFFF == 0xFFF);
  2281. dbg_assert!(high & 0xFFF == 0);
  2282. write8(low as u32, value);
  2283. write8(high as u32, value >> 8);
  2284. }
  2285. pub unsafe fn virt_boundary_write32(low: u32, high: u32, value: i32) {
  2286. dbg_assert!(low & 0xFFF >= 0xFFD);
  2287. dbg_assert!(high - 3 & 0xFFF == low & 0xFFF);
  2288. write8(low as u32, value);
  2289. if 0 != low & 1 {
  2290. if 0 != low & 2 {
  2291. // 0xFFF
  2292. write8((high - 2) as u32, value >> 8);
  2293. write8((high - 1) as u32, value >> 16);
  2294. }
  2295. else {
  2296. // 0xFFD
  2297. write8((low + 1) as u32, value >> 8);
  2298. write8((low + 2) as u32, value >> 16);
  2299. }
  2300. }
  2301. else {
  2302. // 0xFFE
  2303. write8((low + 1) as u32, value >> 8);
  2304. write8((high - 1) as u32, value >> 16);
  2305. }
  2306. write8(high as u32, value >> 24);
  2307. }
  2308. pub unsafe fn safe_read8(addr: i32) -> OrPageFault<i32> { Ok(read8(translate_address_read(addr)?)) }
  2309. pub unsafe fn safe_read16(addr: i32) -> OrPageFault<i32> {
  2310. if addr & 0xFFF == 0xFFF {
  2311. Ok(safe_read8(addr)? | safe_read8(addr + 1)? << 8)
  2312. }
  2313. else {
  2314. Ok(read16(translate_address_read(addr)?))
  2315. }
  2316. }
  2317. pub unsafe fn safe_read32s(addr: i32) -> OrPageFault<i32> {
  2318. if addr & 0xFFF >= 0xFFD {
  2319. Ok(safe_read16(addr)? | safe_read16(addr + 2)? << 16)
  2320. }
  2321. else {
  2322. Ok(read32s(translate_address_read(addr)?))
  2323. }
  2324. }
  2325. pub unsafe fn safe_read64s(addr: i32) -> OrPageFault<u64> {
  2326. if addr & 0xFFF > 0x1000 - 8 {
  2327. Ok(safe_read32s(addr)? as u32 as u64 | (safe_read32s(addr + 4)? as u32 as u64) << 32)
  2328. }
  2329. else {
  2330. Ok(read64s(translate_address_read(addr)?) as u64)
  2331. }
  2332. }
  2333. pub unsafe fn safe_read128s(addr: i32) -> OrPageFault<reg128> {
  2334. if addr & 0xFFF > 0x1000 - 16 {
  2335. Ok(reg128 {
  2336. u64_0: [safe_read64s(addr)?, safe_read64s(addr + 8)?],
  2337. })
  2338. }
  2339. else {
  2340. Ok(read128(translate_address_read(addr)?))
  2341. }
  2342. }
  2343. #[no_mangle]
  2344. #[cfg(feature = "profiler")]
  2345. pub fn report_safe_read_jit_slow(address: u32, entry: i32) {
  2346. if entry & TLB_VALID == 0 {
  2347. profiler::stat_increment(SAFE_READ_SLOW_NOT_VALID);
  2348. }
  2349. else if entry & TLB_IN_MAPPED_RANGE != 0 {
  2350. profiler::stat_increment(SAFE_READ_SLOW_IN_MAPPED_RANGE);
  2351. }
  2352. else if entry & TLB_NO_USER != 0 {
  2353. profiler::stat_increment(SAFE_READ_SLOW_NOT_USER);
  2354. }
  2355. else if address & 0xFFF > 0x1000 - 16 {
  2356. profiler::stat_increment(SAFE_READ_SLOW_PAGE_CROSSED);
  2357. }
  2358. else {
  2359. dbg_log!("Unexpected entry bit: {:x} (read at {:x})", entry, address);
  2360. dbg_assert!(false);
  2361. }
  2362. }
  2363. #[no_mangle]
  2364. #[cfg(feature = "profiler")]
  2365. pub fn report_safe_write_jit_slow(address: u32, entry: i32) {
  2366. if entry & TLB_VALID == 0 {
  2367. profiler::stat_increment(SAFE_WRITE_SLOW_NOT_VALID);
  2368. }
  2369. else if entry & TLB_IN_MAPPED_RANGE != 0 {
  2370. profiler::stat_increment(SAFE_WRITE_SLOW_IN_MAPPED_RANGE);
  2371. }
  2372. else if entry & TLB_HAS_CODE != 0 {
  2373. profiler::stat_increment(SAFE_WRITE_SLOW_HAS_CODE);
  2374. }
  2375. else if entry & TLB_READONLY != 0 {
  2376. profiler::stat_increment(SAFE_WRITE_SLOW_READ_ONLY);
  2377. }
  2378. else if entry & TLB_NO_USER != 0 {
  2379. profiler::stat_increment(SAFE_WRITE_SLOW_NOT_USER);
  2380. }
  2381. else if address & 0xFFF > 0x1000 - 16 {
  2382. profiler::stat_increment(SAFE_WRITE_SLOW_PAGE_CROSSED);
  2383. }
  2384. else {
  2385. dbg_assert!(false);
  2386. }
  2387. }
  2388. #[no_mangle]
  2389. #[cfg(feature = "profiler")]
  2390. pub fn report_safe_read_write_jit_slow(address: u32, entry: i32) {
  2391. if entry & TLB_VALID == 0 {
  2392. profiler::stat_increment(SAFE_READ_WRITE_SLOW_NOT_VALID);
  2393. }
  2394. else if entry & TLB_IN_MAPPED_RANGE != 0 {
  2395. profiler::stat_increment(SAFE_READ_WRITE_SLOW_IN_MAPPED_RANGE);
  2396. }
  2397. else if entry & TLB_HAS_CODE != 0 {
  2398. profiler::stat_increment(SAFE_READ_WRITE_SLOW_HAS_CODE);
  2399. }
  2400. else if entry & TLB_READONLY != 0 {
  2401. profiler::stat_increment(SAFE_READ_WRITE_SLOW_READ_ONLY);
  2402. }
  2403. else if entry & TLB_NO_USER != 0 {
  2404. profiler::stat_increment(SAFE_READ_WRITE_SLOW_NOT_USER);
  2405. }
  2406. else if address & 0xFFF > 0x1000 - 16 {
  2407. profiler::stat_increment(SAFE_READ_WRITE_SLOW_PAGE_CROSSED);
  2408. }
  2409. else {
  2410. dbg_assert!(false);
  2411. }
  2412. }
  2413. #[repr(align(0x1000))]
  2414. struct ScratchBuffer([u8; 0x1000 * 2]);
  2415. static mut jit_paging_scratch_buffer: ScratchBuffer = ScratchBuffer([0; 2 * 0x1000]);
  2416. pub unsafe fn safe_read_slow_jit(addr: i32, bitsize: i32, start_eip: i32, is_write: bool) -> i32 {
  2417. let crosses_page = (addr & 0xFFF) + bitsize / 8 > 0x1000;
  2418. let addr_low = match if is_write {
  2419. translate_address_write_jit(addr)
  2420. }
  2421. else {
  2422. translate_address_read_jit(addr)
  2423. } {
  2424. Err(()) => {
  2425. *previous_ip = *instruction_pointer & !0xFFF | start_eip & 0xFFF;
  2426. return 1;
  2427. },
  2428. Ok(addr) => addr,
  2429. };
  2430. if crosses_page {
  2431. let boundary_addr = (addr | 0xFFF) + 1;
  2432. let addr_high = match if is_write {
  2433. translate_address_write_jit(boundary_addr)
  2434. }
  2435. else {
  2436. translate_address_read_jit(boundary_addr)
  2437. } {
  2438. Err(()) => {
  2439. *previous_ip = *instruction_pointer & !0xFFF | start_eip & 0xFFF;
  2440. return 1;
  2441. },
  2442. Ok(addr) => addr,
  2443. };
  2444. // TODO: Could check if virtual pages point to consecutive physical and go to fast path
  2445. // do read, write into scratch buffer
  2446. let scratch = jit_paging_scratch_buffer.0.as_mut_ptr() as u32;
  2447. dbg_assert!(scratch & 0xFFF == 0);
  2448. for s in addr_low..((addr_low | 0xFFF) + 1) {
  2449. *(scratch as *mut u8).offset((s & 0xFFF) as isize) = read8(s) as u8
  2450. }
  2451. for s in addr_high..(addr_high + (addr + bitsize / 8 & 0xFFF) as u32) {
  2452. *(scratch as *mut u8).offset((0x1000 | s & 0xFFF) as isize) = read8(s) as u8
  2453. }
  2454. (((scratch - mem8 as u32) as i32) ^ addr) & !0xFFF
  2455. }
  2456. else if in_mapped_range(addr_low) {
  2457. let scratch = jit_paging_scratch_buffer.0.as_mut_ptr() as u32;
  2458. dbg_assert!(scratch & 0xFFF == 0);
  2459. for s in addr_low..(addr_low + bitsize as u32 / 8) {
  2460. *(scratch as *mut u8).offset((s & 0xFFF) as isize) = read8(s) as u8
  2461. }
  2462. (((scratch - mem8 as u32) as i32) ^ addr) & !0xFFF
  2463. }
  2464. else {
  2465. (addr_low as i32 ^ addr) & !0xFFF
  2466. }
  2467. }
  2468. #[no_mangle]
  2469. pub unsafe fn safe_read8_slow_jit(addr: i32, eip: i32) -> i32 {
  2470. safe_read_slow_jit(addr, 8, eip, false)
  2471. }
  2472. #[no_mangle]
  2473. pub unsafe fn safe_read16_slow_jit(addr: i32, eip: i32) -> i32 {
  2474. safe_read_slow_jit(addr, 16, eip, false)
  2475. }
  2476. #[no_mangle]
  2477. pub unsafe fn safe_read32s_slow_jit(addr: i32, eip: i32) -> i32 {
  2478. safe_read_slow_jit(addr, 32, eip, false)
  2479. }
  2480. #[no_mangle]
  2481. pub unsafe fn safe_read64s_slow_jit(addr: i32, eip: i32) -> i32 {
  2482. safe_read_slow_jit(addr, 64, eip, false)
  2483. }
  2484. #[no_mangle]
  2485. pub unsafe fn safe_read128s_slow_jit(addr: i32, eip: i32) -> i32 {
  2486. safe_read_slow_jit(addr, 128, eip, false)
  2487. }
  2488. #[no_mangle]
  2489. pub unsafe fn safe_read_write8_slow_jit(addr: i32, eip: i32) -> i32 {
  2490. safe_read_slow_jit(addr, 8, eip, true)
  2491. }
  2492. #[no_mangle]
  2493. pub unsafe fn safe_read_write16_slow_jit(addr: i32, eip: i32) -> i32 {
  2494. safe_read_slow_jit(addr, 16, eip, true)
  2495. }
  2496. #[no_mangle]
  2497. pub unsafe fn safe_read_write32s_slow_jit(addr: i32, eip: i32) -> i32 {
  2498. safe_read_slow_jit(addr, 32, eip, true)
  2499. }
  2500. pub unsafe fn safe_write_slow_jit(
  2501. addr: i32,
  2502. bitsize: i32,
  2503. value_low: u64,
  2504. value_high: u64,
  2505. start_eip: i32,
  2506. ) -> i32 {
  2507. if Page::page_of(*instruction_pointer as u32) == Page::page_of(addr as u32) {
  2508. // XXX: Check based on virtual address
  2509. dbg_log!(
  2510. "SMC: bits={} eip={:x} writeaddr={:x}",
  2511. bitsize,
  2512. start_eip as u32,
  2513. addr as u32
  2514. );
  2515. }
  2516. let crosses_page = (addr & 0xFFF) + bitsize / 8 > 0x1000;
  2517. let addr_low = match translate_address_write_jit(addr) {
  2518. Err(()) => {
  2519. *previous_ip = *instruction_pointer & !0xFFF | start_eip & 0xFFF;
  2520. return 1;
  2521. },
  2522. Ok(addr) => addr,
  2523. };
  2524. if crosses_page {
  2525. let addr_high = match translate_address_write_jit((addr | 0xFFF) + 1) {
  2526. Err(()) => {
  2527. *previous_ip = *instruction_pointer & !0xFFF | start_eip & 0xFFF;
  2528. *page_fault = true;
  2529. return 1;
  2530. },
  2531. Ok(addr) => addr,
  2532. };
  2533. // TODO: Could check if virtual pages point to consecutive physical and go to fast path
  2534. // do write, return dummy pointer for fast path to write into
  2535. match bitsize {
  2536. 128 => safe_write128(
  2537. addr,
  2538. reg128 {
  2539. u64_0: [value_low, value_high],
  2540. },
  2541. )
  2542. .unwrap(),
  2543. 64 => safe_write64(addr, value_low).unwrap(),
  2544. 32 => virt_boundary_write32(
  2545. addr_low,
  2546. addr_high | (addr as u32 + 3 & 3),
  2547. value_low as i32,
  2548. ),
  2549. 16 => virt_boundary_write16(addr_low, addr_high, value_low as i32),
  2550. 8 => dbg_assert!(false),
  2551. _ => dbg_assert!(false),
  2552. }
  2553. let scratch = jit_paging_scratch_buffer.0.as_mut_ptr() as u32;
  2554. dbg_assert!(scratch & 0xFFF == 0);
  2555. ((scratch as i32 - mem8 as i32) ^ addr) & !0xFFF
  2556. }
  2557. else if in_mapped_range(addr_low) {
  2558. match bitsize {
  2559. 128 => memory::mmap_write128(
  2560. addr_low,
  2561. value_low as i32,
  2562. (value_low >> 32) as i32,
  2563. value_high as i32,
  2564. (value_high >> 32) as i32,
  2565. ),
  2566. 64 => memory::mmap_write64(addr_low, value_low as i32, (value_low >> 32) as i32),
  2567. 32 => memory::mmap_write32(addr_low, value_low as i32),
  2568. 16 => memory::mmap_write16(addr_low, value_low as i32),
  2569. 8 => memory::mmap_write8(addr_low, value_low as i32),
  2570. _ => dbg_assert!(false),
  2571. }
  2572. let scratch = jit_paging_scratch_buffer.0.as_mut_ptr() as u32;
  2573. dbg_assert!(scratch & 0xFFF == 0);
  2574. ((scratch as i32 - mem8 as i32) ^ addr) & !0xFFF
  2575. }
  2576. else {
  2577. ::jit::jit_dirty_page(::jit::get_jit_state(), Page::page_of(addr_low));
  2578. (addr_low as i32 ^ addr) & !0xFFF
  2579. }
  2580. }
  2581. #[no_mangle]
  2582. pub unsafe fn safe_write8_slow_jit(addr: i32, value: u32, start_eip: i32) -> i32 {
  2583. safe_write_slow_jit(addr, 8, value as u64, 0, start_eip)
  2584. }
  2585. #[no_mangle]
  2586. pub unsafe fn safe_write16_slow_jit(addr: i32, value: u32, start_eip: i32) -> i32 {
  2587. safe_write_slow_jit(addr, 16, value as u64, 0, start_eip)
  2588. }
  2589. #[no_mangle]
  2590. pub unsafe fn safe_write32_slow_jit(addr: i32, value: u32, start_eip: i32) -> i32 {
  2591. safe_write_slow_jit(addr, 32, value as u64, 0, start_eip)
  2592. }
  2593. #[no_mangle]
  2594. pub unsafe fn safe_write64_slow_jit(addr: i32, value: u64, start_eip: i32) -> i32 {
  2595. safe_write_slow_jit(addr, 64, value, 0, start_eip)
  2596. }
  2597. #[no_mangle]
  2598. pub unsafe fn safe_write128_slow_jit(addr: i32, low: u64, high: u64, start_eip: i32) -> i32 {
  2599. safe_write_slow_jit(addr, 128, low, high, start_eip)
  2600. }
  2601. pub unsafe fn safe_write8(addr: i32, value: i32) -> OrPageFault<()> {
  2602. let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
  2603. if in_mapped_range(phys_addr) {
  2604. memory::mmap_write8(phys_addr, value);
  2605. }
  2606. else {
  2607. if !can_skip_dirty_page {
  2608. ::jit::jit_dirty_page(::jit::get_jit_state(), Page::page_of(phys_addr));
  2609. }
  2610. else {
  2611. dbg_assert!(!::jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
  2612. }
  2613. memory::write8_no_mmap_or_dirty_check(phys_addr, value);
  2614. };
  2615. Ok(())
  2616. }
  2617. pub unsafe fn safe_write16(addr: i32, value: i32) -> OrPageFault<()> {
  2618. let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
  2619. if addr & 0xFFF == 0xFFF {
  2620. virt_boundary_write16(phys_addr, translate_address_write(addr + 1)?, value);
  2621. }
  2622. else if in_mapped_range(phys_addr) {
  2623. memory::mmap_write16(phys_addr, value);
  2624. }
  2625. else {
  2626. if !can_skip_dirty_page {
  2627. ::jit::jit_dirty_page(::jit::get_jit_state(), Page::page_of(phys_addr));
  2628. }
  2629. else {
  2630. dbg_assert!(!::jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
  2631. }
  2632. memory::write16_no_mmap_or_dirty_check(phys_addr, value);
  2633. };
  2634. Ok(())
  2635. }
  2636. pub unsafe fn safe_write32(addr: i32, value: i32) -> OrPageFault<()> {
  2637. let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
  2638. if addr & 0xFFF > 0x1000 - 4 {
  2639. virt_boundary_write32(
  2640. phys_addr,
  2641. translate_address_write(addr + 3 & !3)? | (addr as u32 + 3 & 3),
  2642. value,
  2643. );
  2644. }
  2645. else if in_mapped_range(phys_addr) {
  2646. memory::mmap_write32(phys_addr, value);
  2647. }
  2648. else {
  2649. if !can_skip_dirty_page {
  2650. ::jit::jit_dirty_page(::jit::get_jit_state(), Page::page_of(phys_addr));
  2651. }
  2652. else {
  2653. dbg_assert!(!::jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
  2654. }
  2655. memory::write32_no_mmap_or_dirty_check(phys_addr, value);
  2656. };
  2657. Ok(())
  2658. }
  2659. pub unsafe fn safe_write64(addr: i32, value: u64) -> OrPageFault<()> {
  2660. if addr & 0xFFF > 0x1000 - 8 {
  2661. writable_or_pagefault(addr, 8)?;
  2662. safe_write32(addr, value as i32).unwrap();
  2663. safe_write32(addr + 4, (value >> 32) as i32).unwrap();
  2664. }
  2665. else {
  2666. let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
  2667. if in_mapped_range(phys_addr) {
  2668. memory::mmap_write64(phys_addr, value as i32, (value >> 32) as i32);
  2669. }
  2670. else {
  2671. if !can_skip_dirty_page {
  2672. ::jit::jit_dirty_page(::jit::get_jit_state(), Page::page_of(phys_addr));
  2673. }
  2674. else {
  2675. dbg_assert!(!::jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
  2676. }
  2677. memory::write64_no_mmap_or_dirty_check(phys_addr, value);
  2678. }
  2679. };
  2680. Ok(())
  2681. }
  2682. pub unsafe fn safe_write128(addr: i32, value: reg128) -> OrPageFault<()> {
  2683. if addr & 0xFFF > 0x1000 - 16 {
  2684. writable_or_pagefault(addr, 16)?;
  2685. safe_write64(addr, value.u64_0[0]).unwrap();
  2686. safe_write64(addr + 8, value.u64_0[1]).unwrap();
  2687. }
  2688. else {
  2689. let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
  2690. if in_mapped_range(phys_addr) {
  2691. memory::mmap_write128(
  2692. phys_addr,
  2693. value.i32_0[0],
  2694. value.i32_0[1],
  2695. value.i32_0[2],
  2696. value.i32_0[3],
  2697. );
  2698. }
  2699. else {
  2700. if !can_skip_dirty_page {
  2701. ::jit::jit_dirty_page(::jit::get_jit_state(), Page::page_of(phys_addr));
  2702. }
  2703. else {
  2704. dbg_assert!(!::jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
  2705. }
  2706. memory::write128_no_mmap_or_dirty_check(phys_addr, value);
  2707. }
  2708. };
  2709. Ok(())
  2710. }
  2711. fn get_reg8_index(index: i32) -> i32 { return index << 2 & 12 | index >> 2 & 1; }
  2712. pub unsafe fn read_reg8(index: i32) -> i32 {
  2713. dbg_assert!(index >= 0 && index < 8);
  2714. return *reg8.offset(get_reg8_index(index) as isize) as i32;
  2715. }
  2716. pub unsafe fn write_reg8(index: i32, value: i32) {
  2717. dbg_assert!(index >= 0 && index < 8);
  2718. *reg8.offset(get_reg8_index(index) as isize) = value as u8;
  2719. }
  2720. fn get_reg16_index(index: i32) -> i32 { return index << 1; }
  2721. pub unsafe fn read_reg16(index: i32) -> i32 {
  2722. dbg_assert!(index >= 0 && index < 8);
  2723. return *reg16.offset(get_reg16_index(index) as isize) as i32;
  2724. }
  2725. pub unsafe fn write_reg16(index: i32, value: i32) {
  2726. dbg_assert!(index >= 0 && index < 8);
  2727. *reg16.offset(get_reg16_index(index) as isize) = value as u16;
  2728. }
  2729. pub unsafe fn read_reg32(index: i32) -> i32 {
  2730. dbg_assert!(index >= 0 && index < 8);
  2731. *reg32.offset(index as isize)
  2732. }
  2733. pub unsafe fn write_reg32(index: i32, value: i32) {
  2734. dbg_assert!(index >= 0 && index < 8);
  2735. *reg32.offset(index as isize) = value;
  2736. }
  2737. pub unsafe fn read_mmx32s(r: i32) -> i32 { *reg_mmx.offset(r as isize) as i32 }
  2738. pub unsafe fn read_mmx64s(r: i32) -> u64 { *reg_mmx.offset(r as isize) }
  2739. pub unsafe fn write_mmx_reg64(r: i32, data: u64) {
  2740. *fxsave_store_fpu_mask &= !(1 << r);
  2741. *reg_mmx.offset(r as isize) = data;
  2742. }
  2743. pub unsafe fn read_xmm_f32(r: i32) -> f32 { return (*reg_xmm.offset(r as isize)).f32_0[0]; }
  2744. pub unsafe fn read_xmm32(r: i32) -> i32 { return (*reg_xmm.offset(r as isize)).u32_0[0] as i32; }
  2745. pub unsafe fn read_xmm64s(r: i32) -> u64 { (*reg_xmm.offset(r as isize)).u64_0[0] }
  2746. pub unsafe fn read_xmm128s(r: i32) -> reg128 { return *reg_xmm.offset(r as isize); }
  2747. pub unsafe fn write_xmm_f32(r: i32, data: f32) { (*reg_xmm.offset(r as isize)).f32_0[0] = data; }
  2748. pub unsafe fn write_xmm32(r: i32, data: i32) { (*reg_xmm.offset(r as isize)).i32_0[0] = data; }
  2749. pub unsafe fn write_xmm64(r: i32, data: u64) { (*reg_xmm.offset(r as isize)).u64_0[0] = data }
  2750. pub unsafe fn write_xmm_f64(r: i32, data: f64) { (*reg_xmm.offset(r as isize)).f64_0[0] = data }
  2751. pub unsafe fn write_xmm128(r: i32, i0: i32, i1: i32, i2: i32, i3: i32) {
  2752. let x = reg128 {
  2753. u32_0: [i0 as u32, i1 as u32, i2 as u32, i3 as u32],
  2754. };
  2755. *reg_xmm.offset(r as isize) = x;
  2756. }
  2757. pub unsafe fn write_xmm128_2(r: i32, i0: u64, i1: u64) {
  2758. *reg_xmm.offset(r as isize) = reg128 { u64_0: [i0, i1] };
  2759. }
  2760. pub unsafe fn write_xmm_reg128(r: i32, data: reg128) { *reg_xmm.offset(r as isize) = data; }
  2761. /// Set the fpu tag word to valid and the top-of-stack to 0 on mmx instructions
  2762. pub fn transition_fpu_to_mmx() {
  2763. unsafe {
  2764. fpu_set_tag_word(0);
  2765. *fpu_stack_ptr = 0;
  2766. }
  2767. }
  2768. pub unsafe fn task_switch_test() -> bool {
  2769. if 0 != *cr & (CR0_EM | CR0_TS) {
  2770. trigger_nm();
  2771. return false;
  2772. }
  2773. else {
  2774. return true;
  2775. };
  2776. }
  2777. pub unsafe fn set_mxcsr(new_mxcsr: i32) {
  2778. dbg_assert!(new_mxcsr & !MXCSR_MASK == 0); // checked by caller
  2779. if *mxcsr & MXCSR_DAZ == 0 && new_mxcsr & MXCSR_DAZ != 0 {
  2780. dbg_log!("Warning: Unimplemented MXCSR bit: Denormals Are Zero")
  2781. }
  2782. if *mxcsr & MXCSR_FZ == 0 && new_mxcsr & MXCSR_FZ != 0 {
  2783. dbg_log!("Warning: Unimplemented MXCSR bit: Flush To Zero")
  2784. }
  2785. let rounding_mode = new_mxcsr >> MXCSR_RC_SHIFT & 3;
  2786. if *mxcsr >> MXCSR_RC_SHIFT & 3 == 0 && rounding_mode != 0 {
  2787. dbg_log!(
  2788. "Warning: Unimplemented MXCSR rounding mode: {}",
  2789. rounding_mode
  2790. )
  2791. }
  2792. let exception_mask = new_mxcsr >> 7 & 0b111111;
  2793. if exception_mask != 0b111111 {
  2794. dbg_log!(
  2795. "Warning: Unimplemented MXCSR exception mask: 0b{:b}",
  2796. exception_mask
  2797. )
  2798. }
  2799. *mxcsr = new_mxcsr;
  2800. }
  2801. #[no_mangle]
  2802. pub unsafe fn task_switch_test_jit() {
  2803. let did_fault = !task_switch_test();
  2804. dbg_assert!(did_fault);
  2805. }
  2806. pub unsafe fn task_switch_test_mmx() -> bool {
  2807. if *cr.offset(4) & CR4_OSFXSR == 0 {
  2808. dbg_log!("Warning: Unimplemented task switch test with cr4.osfxsr=0");
  2809. }
  2810. if 0 != *cr & CR0_EM {
  2811. trigger_ud();
  2812. return false;
  2813. }
  2814. else if 0 != *cr & CR0_TS {
  2815. trigger_nm();
  2816. return false;
  2817. }
  2818. else {
  2819. return true;
  2820. };
  2821. }
  2822. #[no_mangle]
  2823. pub unsafe fn task_switch_test_mmx_jit() {
  2824. let did_fault = !task_switch_test_mmx();
  2825. dbg_assert!(did_fault);
  2826. }
  2827. pub unsafe fn read_moffs() -> OrPageFault<i32> {
  2828. // read 2 or 4 byte from ip, depending on address size attribute
  2829. if is_asize_32() { read_imm32s() } else { read_imm16() }
  2830. }
  2831. #[no_mangle]
  2832. pub unsafe fn get_real_eip() -> i32 {
  2833. // Returns the 'real' instruction pointer, without segment offset
  2834. return *instruction_pointer - get_seg_cs();
  2835. }
  2836. pub unsafe fn get_stack_reg() -> i32 {
  2837. if *stack_size_32 {
  2838. return read_reg32(ESP);
  2839. }
  2840. else {
  2841. return read_reg16(SP);
  2842. };
  2843. }
  2844. #[no_mangle]
  2845. pub unsafe fn set_stack_reg(value: i32) {
  2846. if *stack_size_32 {
  2847. write_reg32(ESP, value)
  2848. }
  2849. else {
  2850. write_reg16(SP, value)
  2851. };
  2852. }
  2853. pub unsafe fn get_reg_asize(reg: i32) -> i32 {
  2854. dbg_assert!(reg == ECX || reg == ESI || reg == EDI);
  2855. let r = read_reg32(reg);
  2856. if is_asize_32() {
  2857. return r;
  2858. }
  2859. else {
  2860. return r & 0xFFFF;
  2861. };
  2862. }
  2863. pub unsafe fn set_reg_asize(is_asize_32: bool, reg: i32, value: i32) {
  2864. dbg_assert!(reg == ECX || reg == ESI || reg == EDI);
  2865. if is_asize_32 {
  2866. write_reg32(reg, value)
  2867. }
  2868. else {
  2869. write_reg16(reg, value)
  2870. };
  2871. }
  2872. pub unsafe fn decr_ecx_asize(is_asize_32: bool) -> i32 {
  2873. return if is_asize_32 {
  2874. write_reg32(ECX, read_reg32(ECX) - 1);
  2875. read_reg32(ECX)
  2876. }
  2877. else {
  2878. write_reg16(CX, read_reg16(CX) - 1);
  2879. read_reg16(CX)
  2880. };
  2881. }
  2882. #[no_mangle]
  2883. pub unsafe fn set_tsc(low: u32, high: u32) {
  2884. let new_value = low as u64 | (high as u64) << 32;
  2885. let current_value = read_tsc();
  2886. tsc_offset = current_value.wrapping_sub(new_value);
  2887. }
  2888. #[no_mangle]
  2889. pub unsafe fn read_tsc() -> u64 {
  2890. let n = microtick() * TSC_RATE;
  2891. let value = (n as u64).wrapping_sub(tsc_offset);
  2892. if true {
  2893. return value;
  2894. }
  2895. else {
  2896. if value == rdtsc_last_value {
  2897. // don't go past 1ms
  2898. if (rdtsc_imprecision_offset as f64) < TSC_RATE {
  2899. rdtsc_imprecision_offset = rdtsc_imprecision_offset.wrapping_add(1)
  2900. }
  2901. }
  2902. else {
  2903. let previous_value = rdtsc_last_value.wrapping_add(rdtsc_imprecision_offset);
  2904. if previous_value <= value {
  2905. rdtsc_last_value = value;
  2906. rdtsc_imprecision_offset = 0
  2907. }
  2908. else {
  2909. dbg_log!(
  2910. "XXX: Overshot tsc prev={:x}:{:x} offset={:x}:{:x} curr={:x}:{:x}",
  2911. (rdtsc_last_value >> 32) as u32 as i32,
  2912. rdtsc_last_value as u32 as i32,
  2913. (rdtsc_imprecision_offset >> 32) as u32 as i32,
  2914. rdtsc_imprecision_offset as u32 as i32,
  2915. (value >> 32) as u32 as i32,
  2916. value as u32 as i32
  2917. );
  2918. dbg_assert!(false);
  2919. // Keep current value until time catches up
  2920. }
  2921. }
  2922. return rdtsc_last_value.wrapping_add(rdtsc_imprecision_offset);
  2923. };
  2924. }
  2925. #[no_mangle]
  2926. pub unsafe fn vm86_mode() -> bool { return *flags & FLAG_VM == FLAG_VM; }
  2927. #[no_mangle]
  2928. pub unsafe fn getiopl() -> i32 { return *flags >> 12 & 3; }
  2929. #[no_mangle]
  2930. pub unsafe fn get_opstats_buffer(
  2931. compiled: bool,
  2932. jit_exit: bool,
  2933. unguarded_register: bool,
  2934. wasm_size: bool,
  2935. opcode: u8,
  2936. is_0f: bool,
  2937. is_mem: bool,
  2938. fixed_g: u8,
  2939. ) -> u32 {
  2940. let index = (is_0f as u32) << 12 | (opcode as u32) << 4 | (is_mem as u32) << 3 | fixed_g as u32;
  2941. if compiled {
  2942. *opstats_compiled_buffer.offset(index as isize)
  2943. }
  2944. else if jit_exit {
  2945. *opstats_jit_exit_buffer.offset(index as isize)
  2946. }
  2947. else if unguarded_register {
  2948. *opstats_unguarded_register_buffer.offset(index as isize)
  2949. }
  2950. else if wasm_size {
  2951. *opstats_wasm_size.offset(index as isize)
  2952. }
  2953. else {
  2954. *opstats_buffer.offset(index as isize)
  2955. }
  2956. }
  2957. pub unsafe fn invlpg(addr: i32) {
  2958. let page = (addr as u32 >> 12) as i32;
  2959. // Note: Doesn't remove this page from valid_tlb_entries: This isn't
  2960. // necessary, because when valid_tlb_entries grows too large, it will be
  2961. // empties by calling clear_tlb, which removes this entry as it isn't global.
  2962. // This however means that valid_tlb_entries can contain some invalid entries
  2963. *tlb_data.offset(page as isize) = 0;
  2964. *last_virt_eip = -1;
  2965. }
  2966. #[no_mangle]
  2967. pub unsafe fn update_eflags(new_flags: i32) {
  2968. let mut dont_update: i32 = FLAG_RF | FLAG_VM | FLAG_VIP | FLAG_VIF;
  2969. let mut clear: i32 = !FLAG_VIP & !FLAG_VIF & FLAGS_MASK;
  2970. if 0 != *flags & FLAG_VM {
  2971. // other case needs to be handled in popf or iret
  2972. dbg_assert!(getiopl() == 3);
  2973. dont_update |= FLAG_IOPL;
  2974. // don't clear vip or vif
  2975. clear |= FLAG_VIP | FLAG_VIF
  2976. }
  2977. else {
  2978. if !*protected_mode {
  2979. dbg_assert!(*cpl == 0);
  2980. }
  2981. if 0 != *cpl {
  2982. // cpl > 0
  2983. // cannot update iopl
  2984. dont_update |= FLAG_IOPL;
  2985. if *cpl as i32 > getiopl() {
  2986. // cpl > iopl
  2987. // cannot update interrupt flag
  2988. dont_update |= FLAG_INTERRUPT
  2989. }
  2990. }
  2991. }
  2992. *flags = (new_flags ^ (*flags ^ new_flags) & dont_update) & clear | FLAGS_DEFAULT;
  2993. *flags_changed = 0;
  2994. if *flags & FLAG_TRAP != 0 {
  2995. dbg_log!("Not supported: trap flag");
  2996. }
  2997. *flags &= !FLAG_TRAP;
  2998. }
  2999. #[no_mangle]
  3000. pub unsafe fn get_valid_tlb_entries_count() -> i32 {
  3001. if !cfg!(feature = "profiler") {
  3002. return 0;
  3003. }
  3004. let mut result: i32 = 0;
  3005. for i in 0..valid_tlb_entries_count {
  3006. let page = valid_tlb_entries[i as usize];
  3007. let entry = *tlb_data.offset(page as isize);
  3008. if 0 != entry {
  3009. result += 1
  3010. }
  3011. }
  3012. return result;
  3013. }
  3014. #[no_mangle]
  3015. pub unsafe fn get_valid_global_tlb_entries_count() -> i32 {
  3016. if !cfg!(feature = "profiler") {
  3017. return 0;
  3018. }
  3019. let mut result: i32 = 0;
  3020. for i in 0..valid_tlb_entries_count {
  3021. let page = valid_tlb_entries[i as usize];
  3022. let entry = *tlb_data.offset(page as isize);
  3023. if 0 != entry & TLB_GLOBAL {
  3024. result += 1
  3025. }
  3026. }
  3027. return result;
  3028. }
  3029. pub unsafe fn translate_address_system_read(address: i32) -> OrPageFault<u32> {
  3030. let entry = *tlb_data.offset((address as u32 >> 12) as isize);
  3031. if 0 != entry & TLB_VALID {
  3032. return Ok((entry & !0xFFF ^ address) as u32);
  3033. }
  3034. else {
  3035. return Ok((do_page_translation(address, false, false)? | address & 0xFFF) as u32);
  3036. };
  3037. }
  3038. pub unsafe fn translate_address_system_write(address: i32) -> OrPageFault<u32> {
  3039. let entry = *tlb_data.offset((address as u32 >> 12) as isize);
  3040. if entry & (TLB_VALID | TLB_READONLY) == TLB_VALID {
  3041. return Ok((entry & !0xFFF ^ address) as u32);
  3042. }
  3043. else {
  3044. return Ok((do_page_translation(address, true, false)? | address & 0xFFF) as u32);
  3045. };
  3046. }
  3047. #[no_mangle]
  3048. pub unsafe fn trigger_np(code: i32) {
  3049. dbg_log!("#np");
  3050. *instruction_pointer = *previous_ip;
  3051. if DEBUG {
  3052. if cpu_exception_hook(CPU_EXCEPTION_NP) {
  3053. return;
  3054. }
  3055. }
  3056. call_interrupt_vector(CPU_EXCEPTION_NP, false, Some(code));
  3057. }
  3058. #[no_mangle]
  3059. pub unsafe fn trigger_ss(code: i32) {
  3060. dbg_log!("#ss");
  3061. *instruction_pointer = *previous_ip;
  3062. if DEBUG {
  3063. if cpu_exception_hook(CPU_EXCEPTION_SS) {
  3064. return;
  3065. }
  3066. }
  3067. call_interrupt_vector(CPU_EXCEPTION_SS, false, Some(code));
  3068. }
  3069. #[no_mangle]
  3070. pub unsafe fn store_current_tsc() { *current_tsc = read_tsc(); }
  3071. #[no_mangle]
  3072. pub unsafe fn handle_irqs() {
  3073. if *flags & FLAG_INTERRUPT != 0 {
  3074. pic_acknowledge()
  3075. }
  3076. }
  3077. #[no_mangle]
  3078. pub unsafe fn pic_call_irq(interrupt_nr: i32) {
  3079. *previous_ip = *instruction_pointer; // XXX: What if called after instruction (port IO)
  3080. call_interrupt_vector(interrupt_nr, false, None);
  3081. }