cpu.rs 112 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515
  1. #![allow(non_upper_case_globals)]
  2. extern "C" {
  3. #[no_mangle]
  4. fn cpu_exception_hook(interrupt: i32) -> bool;
  5. #[no_mangle]
  6. fn do_task_switch(selector: i32, has_error_code: bool, error_code: i32);
  7. //#[no_mangle]
  8. //fn logop(addr: i32, op: i32);
  9. #[no_mangle]
  10. fn microtick() -> f64;
  11. #[no_mangle]
  12. fn call_indirect1(f: i32, x: u16);
  13. #[no_mangle]
  14. fn pic_acknowledge();
  15. #[no_mangle]
  16. pub fn io_port_read8(port: i32) -> i32;
  17. #[no_mangle]
  18. pub fn io_port_read16(port: i32) -> i32;
  19. #[no_mangle]
  20. pub fn io_port_read32(port: i32) -> i32;
  21. #[no_mangle]
  22. pub fn io_port_write8(port: i32, value: i32);
  23. #[no_mangle]
  24. pub fn io_port_write16(port: i32, value: i32);
  25. #[no_mangle]
  26. pub fn io_port_write32(port: i32, value: i32);
  27. }
  28. use ::jit;
  29. use cpu::fpu::fpu_set_tag_word;
  30. use cpu::global_pointers::*;
  31. pub use cpu::imports::mem8;
  32. use cpu::memory;
  33. use cpu::memory::{
  34. in_mapped_range, read8, read16, read32s, read64s, read128, read_aligned32, write8,
  35. write_aligned32,
  36. };
  37. use cpu::misc_instr::{
  38. adjust_stack_reg, get_stack_pointer, getaf, getcf, getof, getpf, getsf, getzf, pop16, pop32s,
  39. push16, push32,
  40. };
  41. use cpu::modrm::{resolve_modrm16, resolve_modrm32};
  42. use page::Page;
  43. use paging::OrPageFault;
  44. use profiler;
  45. use profiler::stat::*;
  46. use state_flags::CachedStateFlags;
  47. pub use util::dbg_trace;
  48. /// The offset for our generated functions in the wasm table. Every index less than this is
  49. /// reserved for rustc's indirect functions
  50. pub const WASM_TABLE_OFFSET: u32 = 1024;
  51. #[derive(Copy, Clone)]
  52. #[repr(C)]
  53. pub union reg128 {
  54. pub i8_0: [i8; 16],
  55. pub i16_0: [i16; 8],
  56. pub i32_0: [i32; 4],
  57. pub i64_0: [i64; 2],
  58. pub u8_0: [u8; 16],
  59. pub u16_0: [u16; 8],
  60. pub u32_0: [u32; 4],
  61. pub u64_0: [u64; 2],
  62. pub f32_0: [f32; 4],
  63. pub f64_0: [f64; 2],
  64. }
  65. /// Setting this to true will make execution extremely slow
  66. pub const CHECK_MISSED_ENTRY_POINTS: bool = false;
  67. pub const INTERPRETER_ITERATION_LIMIT: u32 = 1000;
  68. pub const FLAG_SUB: i32 = -0x8000_0000;
  69. pub const FLAG_CARRY: i32 = 1;
  70. pub const FLAG_PARITY: i32 = 4;
  71. pub const FLAG_ADJUST: i32 = 16;
  72. pub const FLAG_ZERO: i32 = 64;
  73. pub const FLAG_SIGN: i32 = 128;
  74. pub const FLAG_TRAP: i32 = 256;
  75. pub const FLAG_INTERRUPT: i32 = 512;
  76. pub const FLAG_DIRECTION: i32 = 1024;
  77. pub const FLAG_OVERFLOW: i32 = 2048;
  78. pub const FLAG_IOPL: i32 = 1 << 12 | 1 << 13;
  79. pub const FLAG_NT: i32 = 1 << 14;
  80. pub const FLAG_RF: i32 = 1 << 16;
  81. pub const FLAG_VM: i32 = 1 << 17;
  82. pub const FLAG_AC: i32 = 1 << 18;
  83. pub const FLAG_VIF: i32 = 1 << 19;
  84. pub const FLAG_VIP: i32 = 1 << 20;
  85. pub const FLAG_ID: i32 = 1 << 21;
  86. pub const FLAGS_DEFAULT: i32 = 1 << 1;
  87. pub const FLAGS_MASK: i32 = FLAG_CARRY
  88. | FLAG_PARITY
  89. | FLAG_ADJUST
  90. | FLAG_ZERO
  91. | FLAG_SIGN
  92. | FLAG_TRAP
  93. | FLAG_INTERRUPT
  94. | FLAG_DIRECTION
  95. | FLAG_OVERFLOW
  96. | FLAG_IOPL
  97. | FLAG_NT
  98. | FLAG_RF
  99. | FLAG_VM
  100. | FLAG_AC
  101. | FLAG_VIF
  102. | FLAG_VIP
  103. | FLAG_ID;
  104. pub const FLAGS_ALL: i32 =
  105. FLAG_CARRY | FLAG_PARITY | FLAG_ADJUST | FLAG_ZERO | FLAG_SIGN | FLAG_OVERFLOW;
  106. pub const OPSIZE_8: i32 = 7;
  107. pub const OPSIZE_16: i32 = 15;
  108. pub const OPSIZE_32: i32 = 31;
  109. pub const EAX: i32 = 0;
  110. pub const ECX: i32 = 1;
  111. pub const EDX: i32 = 2;
  112. pub const EBX: i32 = 3;
  113. pub const ESP: i32 = 4;
  114. pub const EBP: i32 = 5;
  115. pub const ESI: i32 = 6;
  116. pub const EDI: i32 = 7;
  117. pub const AX: i32 = 0;
  118. pub const CX: i32 = 1;
  119. pub const DX: i32 = 2;
  120. pub const BX: i32 = 3;
  121. pub const SP: i32 = 4;
  122. pub const BP: i32 = 5;
  123. pub const SI: i32 = 6;
  124. pub const DI: i32 = 7;
  125. pub const AL: i32 = 0;
  126. pub const CL: i32 = 1;
  127. pub const DL: i32 = 2;
  128. pub const BL: i32 = 3;
  129. pub const AH: i32 = 4;
  130. pub const CH: i32 = 5;
  131. pub const DH: i32 = 6;
  132. pub const BH: i32 = 7;
  133. pub const ES: i32 = 0;
  134. pub const CS: i32 = 1;
  135. pub const SS: i32 = 2;
  136. pub const DS: i32 = 3;
  137. pub const FS: i32 = 4;
  138. pub const GS: i32 = 5;
  139. pub const TR: i32 = 6;
  140. pub const LDTR: i32 = 7;
  141. pub const PAGE_TABLE_PRESENT_MASK: i32 = 1 << 0;
  142. pub const PAGE_TABLE_RW_MASK: i32 = 1 << 1;
  143. pub const PAGE_TABLE_USER_MASK: i32 = 1 << 2;
  144. pub const PAGE_TABLE_ACCESSED_MASK: i32 = 1 << 5;
  145. pub const PAGE_TABLE_DIRTY_MASK: i32 = 1 << 6;
  146. pub const PAGE_TABLE_PSE_MASK: i32 = 1 << 7;
  147. pub const PAGE_TABLE_GLOBAL_MASK: i32 = 1 << 8;
  148. pub const MMAP_BLOCK_BITS: i32 = 17;
  149. pub const MMAP_BLOCK_SIZE: i32 = 1 << MMAP_BLOCK_BITS;
  150. pub const CR0_PE: i32 = 1;
  151. pub const CR0_MP: i32 = 1 << 1;
  152. pub const CR0_EM: i32 = 1 << 2;
  153. pub const CR0_TS: i32 = 1 << 3;
  154. pub const CR0_ET: i32 = 1 << 4;
  155. pub const CR0_WP: i32 = 1 << 16;
  156. pub const CR0_AM: i32 = 1 << 18;
  157. pub const CR0_NW: i32 = 1 << 29;
  158. pub const CR0_CD: i32 = 1 << 30;
  159. pub const CR0_PG: i32 = 1 << 31;
  160. pub const CR4_VME: i32 = 1;
  161. pub const CR4_PVI: i32 = 1 << 1;
  162. pub const CR4_TSD: i32 = 1 << 2;
  163. pub const CR4_PSE: i32 = 1 << 4;
  164. pub const CR4_DE: i32 = 1 << 3;
  165. pub const CR4_PAE: i32 = 1 << 5;
  166. pub const CR4_PGE: i32 = 1 << 7;
  167. pub const CR4_OSFXSR: i32 = 1 << 9;
  168. pub const CR4_OSXMMEXCPT: i32 = 1 << 10;
  169. pub const IA32_SYSENTER_CS: i32 = 372;
  170. pub const IA32_SYSENTER_ESP: i32 = 373;
  171. pub const IA32_SYSENTER_EIP: i32 = 374;
  172. pub const IA32_TIME_STAMP_COUNTER: i32 = 16;
  173. pub const IA32_PLATFORM_ID: i32 = 23;
  174. pub const IA32_APIC_BASE_MSR: i32 = 27;
  175. pub const IA32_BIOS_SIGN_ID: i32 = 139;
  176. pub const MSR_PLATFORM_INFO: i32 = 206;
  177. pub const MSR_MISC_FEATURE_ENABLES: i32 = 320;
  178. pub const IA32_MISC_ENABLE: i32 = 416;
  179. pub const IA32_RTIT_CTL: i32 = 1392;
  180. pub const MSR_SMI_COUNT: i32 = 52;
  181. pub const MSR_TEST_CTRL: i32 = 0x33;
  182. pub const MSR_IA32_FEAT_CTL: i32 = 0x3a;
  183. pub const IA32_MCG_CAP: i32 = 377;
  184. pub const IA32_KERNEL_GS_BASE: i32 = 0xC0000101u32 as i32;
  185. pub const MSR_PKG_C2_RESIDENCY: i32 = 1549;
  186. pub const IA32_APIC_BASE_BSP: i32 = 1 << 8;
  187. pub const IA32_APIC_BASE_EXTD: i32 = 1 << 10;
  188. pub const IA32_APIC_BASE_EN: i32 = 1 << 11;
  189. pub const APIC_ADDRESS: i32 = 0xFEE00000u32 as i32;
  190. pub const SEG_PREFIX_NONE: i32 = -1;
  191. pub const SEG_PREFIX_ZERO: i32 = 7;
  192. pub const PREFIX_MASK_REP: i32 = 24;
  193. pub const PREFIX_REPZ: i32 = 8;
  194. pub const PREFIX_REPNZ: i32 = 16;
  195. pub const PREFIX_MASK_SEGMENT: i32 = 7;
  196. pub const PREFIX_MASK_OPSIZE: i32 = 32;
  197. pub const PREFIX_MASK_ADDRSIZE: i32 = 64;
  198. pub const PREFIX_F2: i32 = PREFIX_REPNZ;
  199. pub const PREFIX_F3: i32 = PREFIX_REPZ;
  200. pub const PREFIX_66: i32 = PREFIX_MASK_OPSIZE;
  201. pub const LOG_CPU: i32 = 2;
  202. pub const MXCSR_MASK: i32 = 0xffff;
  203. pub const MXCSR_FZ: i32 = 1 << 15;
  204. pub const MXCSR_DAZ: i32 = 1 << 6;
  205. pub const MXCSR_RC_SHIFT: i32 = 13;
  206. pub const VALID_TLB_ENTRY_MAX: i32 = 10000;
  207. pub const TLB_VALID: i32 = 1 << 0;
  208. pub const TLB_READONLY: i32 = 1 << 1;
  209. pub const TLB_NO_USER: i32 = 1 << 2;
  210. pub const TLB_IN_MAPPED_RANGE: i32 = 1 << 3;
  211. pub const TLB_GLOBAL: i32 = 1 << 4;
  212. pub const TLB_HAS_CODE: i32 = 1 << 5;
  213. pub const IVT_SIZE: u32 = 0x400;
  214. pub const CPU_EXCEPTION_DE: i32 = 0;
  215. pub const CPU_EXCEPTION_DB: i32 = 1;
  216. pub const CPU_EXCEPTION_NMI: i32 = 2;
  217. pub const CPU_EXCEPTION_BP: i32 = 3;
  218. pub const CPU_EXCEPTION_OF: i32 = 4;
  219. pub const CPU_EXCEPTION_BR: i32 = 5;
  220. pub const CPU_EXCEPTION_UD: i32 = 6;
  221. pub const CPU_EXCEPTION_NM: i32 = 7;
  222. pub const CPU_EXCEPTION_DF: i32 = 8;
  223. pub const CPU_EXCEPTION_TS: i32 = 10;
  224. pub const CPU_EXCEPTION_NP: i32 = 11;
  225. pub const CPU_EXCEPTION_SS: i32 = 12;
  226. pub const CPU_EXCEPTION_GP: i32 = 13;
  227. pub const CPU_EXCEPTION_PF: i32 = 14;
  228. pub const CPU_EXCEPTION_MF: i32 = 16;
  229. pub const CPU_EXCEPTION_AC: i32 = 17;
  230. pub const CPU_EXCEPTION_MC: i32 = 18;
  231. pub const CPU_EXCEPTION_XM: i32 = 19;
  232. pub const CPU_EXCEPTION_VE: i32 = 20;
  233. pub const CHECK_TLB_INVARIANTS: bool = false;
  234. pub const DEBUG: bool = cfg!(debug_assertions);
  235. pub const LOOP_COUNTER: i32 = 20011;
  236. pub const TSC_RATE: f64 = 1_000_000.0;
  237. pub static mut jit_block_boundary: bool = false;
  238. pub static mut must_not_fault: bool = false;
  239. pub static mut rdtsc_imprecision_offset: u64 = 0;
  240. pub static mut rdtsc_last_value: u64 = 0;
  241. pub static mut tsc_offset: u64 = 0;
  242. pub static mut tlb_data: [i32; 0x400000] = [0; 0x400000];
  243. pub static mut valid_tlb_entries: [i32; 10000] = [0; 10000];
  244. pub static mut valid_tlb_entries_count: i32 = 0;
  245. pub static mut apic_enabled: bool = false;
  246. pub static mut in_jit: bool = false;
  247. pub enum LastJump {
  248. Interrupt {
  249. phys_addr: u32,
  250. int: u8,
  251. software: bool,
  252. error: Option<u32>,
  253. },
  254. Compiled {
  255. phys_addr: u32,
  256. },
  257. Interpreted {
  258. phys_addr: u32,
  259. },
  260. None,
  261. }
  262. impl LastJump {
  263. pub fn phys_address(&self) -> Option<u32> {
  264. match self {
  265. LastJump::Interrupt { phys_addr, .. } => Some(*phys_addr),
  266. LastJump::Compiled { phys_addr } => Some(*phys_addr),
  267. LastJump::Interpreted { phys_addr } => Some(*phys_addr),
  268. LastJump::None => None,
  269. }
  270. }
  271. pub fn name(&self) -> &'static str {
  272. match self {
  273. LastJump::Interrupt { .. } => "interrupt",
  274. LastJump::Compiled { .. } => "compiled",
  275. LastJump::Interpreted { .. } => "interpreted",
  276. LastJump::None => "none",
  277. }
  278. }
  279. }
  280. pub static mut debug_last_jump: LastJump = LastJump::None;
  281. pub struct SegmentSelector {
  282. raw: u16,
  283. }
  284. impl SegmentSelector {
  285. pub fn of_u16(raw: u16) -> SegmentSelector { SegmentSelector { raw } }
  286. pub fn rpl(&self) -> u8 { (self.raw & 3) as u8 }
  287. pub fn is_gdt(&self) -> bool { (self.raw & 4) == 0 }
  288. pub fn descriptor_offset(&self) -> u16 { (self.raw & !7) as u16 }
  289. pub fn is_null(&self) -> bool { self.is_gdt() && self.descriptor_offset() == 0 }
  290. }
  291. // Used to indicate early that the selector cannot be used to fetch a descriptor
  292. #[derive(PartialEq)]
  293. pub enum SelectorNullOrInvalid {
  294. IsNull,
  295. IsInvalid,
  296. }
  297. pub struct SegmentDescriptor {
  298. raw: u64,
  299. }
  300. impl SegmentDescriptor {
  301. pub fn of_u64(raw: u64) -> SegmentDescriptor { SegmentDescriptor { raw } }
  302. pub fn base(&self) -> i32 {
  303. ((self.raw >> 16) & 0xffff | (self.raw & 0xff_00000000) >> 16 | (self.raw >> 56 << 24))
  304. as i32
  305. }
  306. pub fn limit(&self) -> u32 { (self.raw & 0xffff | ((self.raw >> 48) & 0xf) << 16) as u32 }
  307. pub fn access_byte(&self) -> u8 { ((self.raw >> 40) & 0xff) as u8 }
  308. pub fn flags(&self) -> u8 { ((self.raw >> 48 >> 4) & 0xf) as u8 }
  309. pub fn is_system(&self) -> bool { self.access_byte() & 0x10 == 0 }
  310. pub fn system_type(&self) -> u8 { self.access_byte() & 0xF }
  311. pub fn is_rw(&self) -> bool { self.access_byte() & 2 == 2 }
  312. pub fn is_dc(&self) -> bool { self.access_byte() & 4 == 4 }
  313. pub fn is_executable(&self) -> bool { self.access_byte() & 8 == 8 }
  314. pub fn is_present(&self) -> bool { self.access_byte() & 0x80 == 0x80 }
  315. pub fn is_writable(&self) -> bool { self.is_rw() && !self.is_executable() }
  316. pub fn is_readable(&self) -> bool { self.is_rw() || !self.is_executable() }
  317. pub fn is_conforming_executable(&self) -> bool { self.is_dc() && self.is_executable() }
  318. pub fn dpl(&self) -> u8 { (self.access_byte() >> 5) & 3 }
  319. pub fn is_32(&self) -> bool { self.flags() & 4 == 4 }
  320. pub fn effective_limit(&self) -> u32 {
  321. if self.flags() & 8 == 8 { self.limit() << 12 | 0xFFF } else { self.limit() }
  322. }
  323. }
  324. pub struct InterruptDescriptor {
  325. raw: u64,
  326. }
  327. impl InterruptDescriptor {
  328. pub fn of_u64(raw: u64) -> InterruptDescriptor { InterruptDescriptor { raw } }
  329. pub fn offset(&self) -> i32 { (self.raw & 0xffff | self.raw >> 32 & 0xffff0000) as i32 }
  330. pub fn selector(&self) -> u16 { (self.raw >> 16 & 0xffff) as u16 }
  331. pub fn access_byte(&self) -> u8 { (self.raw >> 40 & 0xff) as u8 }
  332. pub fn dpl(&self) -> u8 { (self.access_byte() >> 5 & 3) as u8 }
  333. pub fn gate_type(&self) -> u8 { self.access_byte() & 7 }
  334. pub fn is_32(&self) -> bool { self.access_byte() & 8 == 8 }
  335. pub fn is_present(&self) -> bool { self.access_byte() & 0x80 == 0x80 }
  336. pub fn reserved_zeros_are_valid(&self) -> bool { self.access_byte() & 16 == 0 }
  337. const TASK_GATE: u8 = 0b101;
  338. const INTERRUPT_GATE: u8 = 0b110;
  339. const TRAP_GATE: u8 = 0b111;
  340. }
  341. #[no_mangle]
  342. pub unsafe fn switch_cs_real_mode(selector: i32) {
  343. dbg_assert!(!*protected_mode || vm86_mode());
  344. *sreg.offset(CS as isize) = selector as u16;
  345. *segment_is_null.offset(CS as isize) = false;
  346. *segment_offsets.offset(CS as isize) = selector << 4;
  347. update_cs_size(false);
  348. }
  349. pub unsafe fn get_tss_stack_addr(dpl: u8) -> OrPageFault<u32> {
  350. let (tss_stack_offset, page_boundary) = if *tss_size_32 {
  351. (((dpl << 3) + 4) as u32, 0x1000 - 6)
  352. }
  353. else {
  354. (((dpl << 2) + 2) as u32, 0x1000 - 4)
  355. };
  356. if tss_stack_offset + 5 > *segment_limits.offset(TR as isize) {
  357. panic!("#TS handler");
  358. }
  359. let tss_stack_addr = *segment_offsets.offset(TR as isize) as u32 + tss_stack_offset;
  360. dbg_assert!(tss_stack_addr & 0xFFF <= page_boundary);
  361. Ok(translate_address_system_read(tss_stack_addr as i32)?)
  362. }
  363. pub unsafe fn iret16() { iret(true); }
  364. pub unsafe fn iret32() { iret(false); }
  365. pub unsafe fn iret(is_16: bool) {
  366. if vm86_mode() && getiopl() < 3 {
  367. // vm86 mode, iopl != 3
  368. dbg_log!("#gp iret vm86 mode, iopl != 3");
  369. trigger_gp(0);
  370. return;
  371. }
  372. let (new_eip, new_cs, mut new_flags) = if is_16 {
  373. (
  374. return_on_pagefault!(safe_read16(get_stack_pointer(0))),
  375. return_on_pagefault!(safe_read16(get_stack_pointer(2))),
  376. return_on_pagefault!(safe_read16(get_stack_pointer(4))),
  377. )
  378. }
  379. else {
  380. (
  381. return_on_pagefault!(safe_read32s(get_stack_pointer(0))),
  382. return_on_pagefault!(safe_read16(get_stack_pointer(4))),
  383. return_on_pagefault!(safe_read32s(get_stack_pointer(8))),
  384. )
  385. };
  386. if !*protected_mode || (vm86_mode() && getiopl() == 3) {
  387. if new_eip as u32 & 0xFFFF0000 != 0 {
  388. panic!("#GP handler");
  389. }
  390. switch_cs_real_mode(new_cs);
  391. *instruction_pointer = get_seg_cs() + new_eip;
  392. if is_16 {
  393. update_eflags(new_flags | *flags & !0xFFFF);
  394. adjust_stack_reg(3 * 2);
  395. }
  396. else {
  397. if !*protected_mode {
  398. update_eflags((new_flags & 0x257FD5) | (*flags & 0x1A0000));
  399. }
  400. else {
  401. update_eflags(new_flags);
  402. }
  403. adjust_stack_reg(3 * 4);
  404. }
  405. handle_irqs();
  406. return;
  407. }
  408. dbg_assert!(!vm86_mode());
  409. if *flags & FLAG_NT != 0 {
  410. if DEBUG {
  411. panic!("NT");
  412. }
  413. trigger_gp(0);
  414. return;
  415. }
  416. if new_flags & FLAG_VM != 0 {
  417. if *cpl == 0 {
  418. // return to virtual 8086 mode
  419. // vm86 cannot be set in 16 bit flag
  420. dbg_assert!(!is_16);
  421. dbg_assert!((new_eip & !0xFFFF) == 0);
  422. let temp_esp = return_on_pagefault!(safe_read32s(get_stack_pointer(12)));
  423. let temp_ss = return_on_pagefault!(safe_read16(get_stack_pointer(16)));
  424. let new_es = return_on_pagefault!(safe_read16(get_stack_pointer(20)));
  425. let new_ds = return_on_pagefault!(safe_read16(get_stack_pointer(24)));
  426. let new_fs = return_on_pagefault!(safe_read16(get_stack_pointer(28)));
  427. let new_gs = return_on_pagefault!(safe_read16(get_stack_pointer(32)));
  428. // no exceptions below
  429. update_eflags(new_flags);
  430. *flags |= FLAG_VM;
  431. switch_cs_real_mode(new_cs);
  432. *instruction_pointer = get_seg_cs() + (new_eip & 0xFFFF);
  433. if !switch_seg(ES, new_es)
  434. || !switch_seg(DS, new_ds)
  435. || !switch_seg(FS, new_fs)
  436. || !switch_seg(GS, new_gs)
  437. {
  438. // XXX: Should be checked before side effects
  439. dbg_assert!(false);
  440. }
  441. adjust_stack_reg(9 * 4); // 9 dwords: eip, cs, flags, esp, ss, es, ds, fs, gs
  442. write_reg32(ESP, temp_esp);
  443. if !switch_seg(SS, temp_ss) {
  444. // XXX
  445. dbg_assert!(false);
  446. }
  447. *cpl = 3;
  448. cpl_changed();
  449. update_cs_size(false);
  450. // iret end
  451. return;
  452. }
  453. else {
  454. dbg_log!("vm86 flag ignored because cpl != 0");
  455. new_flags &= !FLAG_VM;
  456. }
  457. }
  458. // protected mode return
  459. let (cs_descriptor, cs_selector) = match return_on_pagefault!(lookup_segment_selector(new_cs)) {
  460. Ok((desc, sel)) => (desc, sel),
  461. Err(selector_unusable) => match selector_unusable {
  462. SelectorNullOrInvalid::IsNull => {
  463. panic!("Unimplemented: CS selector is null");
  464. },
  465. SelectorNullOrInvalid::IsInvalid => {
  466. panic!("Unimplemented: CS selector is invalid");
  467. },
  468. },
  469. };
  470. dbg_assert!(new_eip as u32 <= cs_descriptor.effective_limit());
  471. if !cs_descriptor.is_present() {
  472. panic!("not present");
  473. }
  474. if !cs_descriptor.is_executable() {
  475. panic!("not exec");
  476. }
  477. if cs_selector.rpl() < *cpl {
  478. panic!("rpl < cpl");
  479. }
  480. if cs_descriptor.is_dc() && cs_descriptor.dpl() > cs_selector.rpl() {
  481. panic!("conforming and dpl > rpl");
  482. }
  483. if !cs_descriptor.is_dc() && cs_selector.rpl() != cs_descriptor.dpl() {
  484. dbg_log!(
  485. "#gp iret: non-conforming cs and rpl != dpl, dpl={} rpl={}",
  486. cs_descriptor.dpl(),
  487. cs_selector.rpl()
  488. );
  489. trigger_gp(new_cs & !3);
  490. return;
  491. }
  492. if cs_selector.rpl() > *cpl {
  493. // outer privilege return
  494. let (temp_esp, temp_ss) = if is_16 {
  495. (
  496. return_on_pagefault!(safe_read16(get_stack_pointer(6))),
  497. return_on_pagefault!(safe_read16(get_stack_pointer(8))),
  498. )
  499. }
  500. else {
  501. (
  502. return_on_pagefault!(safe_read32s(get_stack_pointer(12))),
  503. return_on_pagefault!(safe_read16(get_stack_pointer(16))),
  504. )
  505. };
  506. let (ss_descriptor, ss_selector) =
  507. match return_on_pagefault!(lookup_segment_selector(temp_ss)) {
  508. Ok((desc, sel)) => (desc, sel),
  509. Err(selector_unusable) => match selector_unusable {
  510. SelectorNullOrInvalid::IsNull => {
  511. dbg_log!("#GP for loading 0 in SS sel={:x}", temp_ss);
  512. dbg_trace();
  513. trigger_gp(0);
  514. return;
  515. },
  516. SelectorNullOrInvalid::IsInvalid => {
  517. dbg_log!("#GP for loading invalid in SS sel={:x}", temp_ss);
  518. trigger_gp(temp_ss & !3);
  519. return;
  520. },
  521. },
  522. };
  523. let new_cpl = cs_selector.rpl();
  524. if ss_descriptor.is_system()
  525. || ss_selector.rpl() != new_cpl
  526. || !ss_descriptor.is_writable()
  527. || ss_descriptor.dpl() != new_cpl
  528. {
  529. dbg_log!("#GP for loading invalid in SS sel={:x}", temp_ss);
  530. dbg_trace();
  531. trigger_gp(temp_ss & !3);
  532. return;
  533. }
  534. if !ss_descriptor.is_present() {
  535. dbg_log!("#SS for loading non-present in SS sel={:x}", temp_ss);
  536. dbg_trace();
  537. trigger_ss(temp_ss & !3);
  538. return;
  539. }
  540. // no exceptions below
  541. if is_16 {
  542. update_eflags(new_flags | *flags & !0xFFFF);
  543. }
  544. else {
  545. update_eflags(new_flags);
  546. }
  547. *cpl = cs_selector.rpl();
  548. cpl_changed();
  549. if !switch_seg(SS, temp_ss) {
  550. // XXX
  551. dbg_assert!(false);
  552. }
  553. set_stack_reg(temp_esp);
  554. if *cpl == 0 && !is_16 {
  555. *flags = *flags & !FLAG_VIF & !FLAG_VIP | (new_flags & (FLAG_VIF | FLAG_VIP));
  556. }
  557. // XXX: Set segment to 0 if it's not usable in the new cpl
  558. // XXX: Use cached segment information
  559. // ...
  560. }
  561. else if cs_selector.rpl() == *cpl {
  562. // same privilege return
  563. // no exceptions below
  564. if is_16 {
  565. adjust_stack_reg(3 * 2);
  566. update_eflags(new_flags | *flags & !0xFFFF);
  567. }
  568. else {
  569. adjust_stack_reg(3 * 4);
  570. update_eflags(new_flags);
  571. }
  572. // update vip and vif, which are not changed by update_eflags
  573. if *cpl == 0 && !is_16 {
  574. *flags = *flags & !FLAG_VIF & !FLAG_VIP | (new_flags & (FLAG_VIF | FLAG_VIP));
  575. }
  576. }
  577. else {
  578. dbg_assert!(false);
  579. }
  580. *sreg.offset(CS as isize) = new_cs as u16;
  581. dbg_assert!((new_cs & 3) == *cpl as i32);
  582. update_cs_size(cs_descriptor.is_32());
  583. *segment_limits.offset(CS as isize) = cs_descriptor.effective_limit();
  584. *segment_offsets.offset(CS as isize) = cs_descriptor.base();
  585. *instruction_pointer = new_eip + get_seg_cs();
  586. // iret end
  587. handle_irqs();
  588. }
  589. pub unsafe fn call_interrupt_vector(
  590. interrupt_nr: i32,
  591. is_software_int: bool,
  592. error_code: Option<i32>,
  593. ) {
  594. // we have to leave hlt_loop at some point, this is a
  595. // good place to do it
  596. *in_hlt = false;
  597. if *protected_mode {
  598. if vm86_mode() && *cr.offset(4) & CR4_VME != 0 {
  599. panic!("Unimplemented: VME");
  600. }
  601. if vm86_mode() && is_software_int && getiopl() < 3 {
  602. dbg_log!("call_interrupt_vector #GP. vm86 && software int && iopl < 3");
  603. dbg_trace();
  604. trigger_gp(0);
  605. return;
  606. }
  607. if interrupt_nr << 3 | 7 > *idtr_size {
  608. dbg_log!("interrupt_nr={:x} idtr_size={:x}", interrupt_nr, *idtr_size);
  609. dbg_trace();
  610. panic!("Unimplemented: #GP handler");
  611. }
  612. let descriptor_address = return_on_pagefault!(translate_address_system_read(
  613. *idtr_offset + (interrupt_nr << 3)
  614. ));
  615. let descriptor = InterruptDescriptor::of_u64(read64s(descriptor_address) as u64);
  616. let mut offset = descriptor.offset();
  617. let selector = descriptor.selector() as i32;
  618. let dpl = descriptor.dpl();
  619. let gate_type = descriptor.gate_type();
  620. if !descriptor.is_present() {
  621. // present bit not set
  622. panic!("Unimplemented: #NP handler");
  623. }
  624. if is_software_int && dpl < *cpl {
  625. dbg_log!("#gp software interrupt ({:x}) and dpl < cpl", interrupt_nr);
  626. dbg_trace();
  627. trigger_gp(interrupt_nr << 3 | 2);
  628. return;
  629. }
  630. if gate_type == InterruptDescriptor::TASK_GATE {
  631. // task gate
  632. dbg_log!(
  633. "interrupt to task gate: int={:x} sel={:x} dpl={}",
  634. interrupt_nr,
  635. selector,
  636. dpl
  637. );
  638. dbg_trace();
  639. do_task_switch(selector, error_code.is_some(), error_code.unwrap_or(0));
  640. return;
  641. }
  642. let is_valid_type = gate_type == InterruptDescriptor::TRAP_GATE
  643. || gate_type == InterruptDescriptor::INTERRUPT_GATE;
  644. if !is_valid_type || !descriptor.reserved_zeros_are_valid() {
  645. // invalid gate_type
  646. dbg_log!(
  647. "gate type invalid or reserved 0s violated. gate_type=0b{:b} raw={:b}",
  648. gate_type,
  649. descriptor.raw
  650. );
  651. dbg_log!(
  652. "addr={:x} offset={:x} selector={:x}",
  653. descriptor_address,
  654. offset,
  655. selector
  656. );
  657. dbg_trace();
  658. panic!("Unimplemented: #GP handler");
  659. }
  660. let cs_segment_descriptor = match return_on_pagefault!(lookup_segment_selector(selector)) {
  661. Ok((desc, _)) => desc,
  662. Err(selector_unusable) => match selector_unusable {
  663. SelectorNullOrInvalid::IsNull => {
  664. dbg_log!("is null");
  665. panic!("Unimplemented: #GP handler");
  666. },
  667. SelectorNullOrInvalid::IsInvalid => {
  668. dbg_log!("is invalid");
  669. panic!("Unimplemented: #GP handler (error code)");
  670. },
  671. },
  672. };
  673. dbg_assert!(offset as u32 <= cs_segment_descriptor.effective_limit());
  674. if !cs_segment_descriptor.is_executable() || cs_segment_descriptor.dpl() > *cpl {
  675. dbg_log!("not exec");
  676. panic!("Unimplemented: #GP handler");
  677. }
  678. if !cs_segment_descriptor.is_present() {
  679. // kvm-unit-test
  680. dbg_log!("not present");
  681. trigger_np(interrupt_nr << 3 | 2);
  682. return;
  683. }
  684. let old_flags = get_eflags();
  685. if !cs_segment_descriptor.is_dc() && cs_segment_descriptor.dpl() < *cpl {
  686. // inter privilege level interrupt
  687. // interrupt from vm86 mode
  688. if old_flags & FLAG_VM != 0 && cs_segment_descriptor.dpl() != 0 {
  689. panic!("Unimplemented: #GP handler for non-0 cs segment dpl when in vm86 mode");
  690. }
  691. let tss_stack_addr =
  692. return_on_pagefault!(get_tss_stack_addr(cs_segment_descriptor.dpl()));
  693. let new_esp = read32s(tss_stack_addr);
  694. let new_ss = read16(tss_stack_addr + if *tss_size_32 { 4 } else { 2 });
  695. let (ss_segment_descriptor, ss_segment_selector) =
  696. match return_on_pagefault!(lookup_segment_selector(new_ss)) {
  697. Ok((desc, sel)) => (desc, sel),
  698. Err(_) => {
  699. panic!("Unimplemented: #TS handler");
  700. },
  701. };
  702. // Disabled: Incorrect handling of direction bit
  703. // See http://css.csail.mit.edu/6.858/2014/readings/i386/s06_03.htm
  704. //if !((new_esp >>> 0) <= ss_segment_descriptor.effective_limit())
  705. // debugger;
  706. //dbg_assert!((new_esp >>> 0) <= ss_segment_descriptor.effective_limit());
  707. dbg_assert!(!ss_segment_descriptor.is_system() && ss_segment_descriptor.is_writable());
  708. if ss_segment_selector.rpl() != cs_segment_descriptor.dpl() {
  709. panic!("Unimplemented: #TS handler");
  710. }
  711. if ss_segment_descriptor.dpl() != cs_segment_descriptor.dpl()
  712. || !ss_segment_descriptor.is_rw()
  713. {
  714. panic!("Unimplemented: #TS handler");
  715. }
  716. if !ss_segment_descriptor.is_present() {
  717. panic!("Unimplemented: #TS handler");
  718. }
  719. let old_esp = read_reg32(ESP);
  720. let old_ss = *sreg.offset(SS as isize) as i32;
  721. let error_code_space = if error_code.is_some() { 1 } else { 0 };
  722. let vm86_space = if (old_flags & FLAG_VM) == FLAG_VM { 4 } else { 0 };
  723. let bytes_per_arg = if descriptor.is_32() { 4 } else { 2 };
  724. let stack_space = bytes_per_arg * (5 + error_code_space + vm86_space);
  725. let new_stack_pointer = ss_segment_descriptor.base()
  726. + if ss_segment_descriptor.is_32() {
  727. new_esp - stack_space
  728. }
  729. else {
  730. new_esp - stack_space & 0xFFFF
  731. };
  732. return_on_pagefault!(translate_address_system_write(new_stack_pointer));
  733. return_on_pagefault!(translate_address_system_write(
  734. ss_segment_descriptor.base() + new_esp - 1
  735. ));
  736. // no exceptions below
  737. *cpl = cs_segment_descriptor.dpl();
  738. cpl_changed();
  739. update_cs_size(cs_segment_descriptor.is_32());
  740. *flags &= !FLAG_VM & !FLAG_RF;
  741. if !switch_seg(SS, new_ss) {
  742. // XXX
  743. dbg_assert!(false);
  744. }
  745. set_stack_reg(new_esp);
  746. // XXX: #SS if stack would cross stack limit
  747. if old_flags & FLAG_VM != 0 {
  748. if !descriptor.is_32() {
  749. dbg_assert!(false);
  750. }
  751. else {
  752. push32(*sreg.offset(GS as isize) as i32).unwrap();
  753. push32(*sreg.offset(FS as isize) as i32).unwrap();
  754. push32(*sreg.offset(DS as isize) as i32).unwrap();
  755. push32(*sreg.offset(ES as isize) as i32).unwrap();
  756. }
  757. }
  758. if descriptor.is_32() {
  759. push32(old_ss).unwrap();
  760. push32(old_esp).unwrap();
  761. }
  762. else {
  763. push16(old_ss).unwrap();
  764. push16(old_esp).unwrap();
  765. }
  766. }
  767. else if cs_segment_descriptor.is_dc() || cs_segment_descriptor.dpl() == *cpl {
  768. // intra privilege level interrupt
  769. //dbg_log!("Intra privilege interrupt gate=" + h(selector, 4) + ":" + h(offset >>> 0, 8) +
  770. // " gate_type=" + gate_type + " 16bit=" + descriptor.is_32() +
  771. // " cpl=" + *cpl + " dpl=" + segment_descriptor.dpl() + " conforming=" + +segment_descriptor.is_dc(), );
  772. //debug.dump_regs_short();
  773. if *flags & FLAG_VM != 0 {
  774. dbg_assert!(false, "check error code");
  775. trigger_gp(selector & !3);
  776. return;
  777. }
  778. let bytes_per_arg = if descriptor.is_32() { 4 } else { 2 };
  779. let error_code_space = if error_code.is_some() { 1 } else { 0 };
  780. let stack_space = bytes_per_arg * (3 + error_code_space);
  781. // XXX: with current cpl or with cpl 0?
  782. return_on_pagefault!(writable_or_pagefault(
  783. get_stack_pointer(-stack_space),
  784. stack_space
  785. ));
  786. // no exceptions below
  787. }
  788. else {
  789. panic!("Unimplemented: #GP handler");
  790. }
  791. // XXX: #SS if stack would cross stack limit
  792. if descriptor.is_32() {
  793. push32(old_flags).unwrap();
  794. push32(*sreg.offset(CS as isize) as i32).unwrap();
  795. push32(get_real_eip()).unwrap();
  796. if let Some(ec) = error_code {
  797. push32(ec).unwrap();
  798. }
  799. }
  800. else {
  801. push16(old_flags).unwrap();
  802. push16(*sreg.offset(CS as isize) as i32).unwrap();
  803. push16(get_real_eip()).unwrap();
  804. if let Some(ec) = error_code {
  805. push16(ec).unwrap();
  806. }
  807. offset &= 0xFFFF;
  808. }
  809. if old_flags & FLAG_VM != 0 {
  810. if !switch_seg(GS, 0) || !switch_seg(FS, 0) || !switch_seg(DS, 0) || !switch_seg(ES, 0)
  811. {
  812. // can't fail
  813. dbg_assert!(false);
  814. }
  815. }
  816. *sreg.offset(CS as isize) = (selector as u16) & !3 | *cpl as u16;
  817. dbg_assert!((*sreg.offset(CS as isize) & 3) == *cpl as u16);
  818. update_cs_size(cs_segment_descriptor.is_32());
  819. *segment_limits.offset(CS as isize) = cs_segment_descriptor.effective_limit();
  820. *segment_offsets.offset(CS as isize) = cs_segment_descriptor.base();
  821. *instruction_pointer = get_seg_cs() + offset;
  822. *flags &= !FLAG_NT & !FLAG_VM & !FLAG_RF & !FLAG_TRAP;
  823. if gate_type == InterruptDescriptor::INTERRUPT_GATE {
  824. // clear int flag for interrupt gates
  825. *flags &= !FLAG_INTERRUPT;
  826. }
  827. else {
  828. if *flags & FLAG_INTERRUPT != 0 && old_flags & FLAG_INTERRUPT == 0 {
  829. handle_irqs();
  830. }
  831. }
  832. }
  833. else {
  834. // call 4 byte cs:ip interrupt vector from ivt at cpu.memory 0
  835. let index = (interrupt_nr << 2) as u32;
  836. let new_ip = read16(index);
  837. let new_cs = read16(index + 2);
  838. dbg_assert!(
  839. index | 3 <= IVT_SIZE,
  840. "Unimplemented: #GP for interrupt number out of IVT bounds"
  841. );
  842. // XXX: #SS if stack would cross stack limit
  843. // push flags, cs:ip
  844. push16(get_eflags()).unwrap();
  845. push16(*sreg.offset(CS as isize) as i32).unwrap();
  846. push16(get_real_eip()).unwrap();
  847. *flags &= !FLAG_INTERRUPT & !FLAG_AC & !FLAG_TRAP;
  848. switch_cs_real_mode(new_cs);
  849. *instruction_pointer = get_seg_cs() + new_ip;
  850. }
  851. }
  852. pub unsafe fn far_jump(eip: i32, selector: i32, is_call: bool, is_osize_32: bool) {
  853. dbg_assert!(selector < 0x10000 && selector >= 0);
  854. //dbg_log("far " + ["jump", "call"][+is_call] + " eip=" + h(eip >>> 0, 8) + " cs=" + h(selector, 4), LOG_CPU);
  855. //CPU_LOG_VERBOSE && this.debug.dump_state("far " + ["jump", "call"][+is_call]);
  856. if !*protected_mode || vm86_mode() {
  857. if is_call {
  858. if is_osize_32 {
  859. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-8), 8));
  860. push32(*sreg.offset(CS as isize) as i32).unwrap();
  861. push32(get_real_eip()).unwrap();
  862. }
  863. else {
  864. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-4), 4));
  865. push16(*sreg.offset(CS as isize) as i32).unwrap();
  866. push16(get_real_eip()).unwrap();
  867. }
  868. }
  869. switch_cs_real_mode(selector);
  870. *instruction_pointer = get_seg_cs() + eip;
  871. return;
  872. }
  873. let (info, cs_selector) = match return_on_pagefault!(lookup_segment_selector(selector)) {
  874. Ok((desc, sel)) => (desc, sel),
  875. Err(selector_unusable) => match selector_unusable {
  876. SelectorNullOrInvalid::IsNull => {
  877. dbg_log!("#gp null cs");
  878. trigger_gp(0);
  879. return;
  880. },
  881. SelectorNullOrInvalid::IsInvalid => {
  882. dbg_log!("#gp invalid cs: {:x}", selector);
  883. trigger_gp(selector & !3);
  884. return;
  885. },
  886. },
  887. };
  888. if info.is_system() {
  889. dbg_assert!(is_call, "TODO: Jump");
  890. dbg_log!("system type cs: {:x}", selector);
  891. if info.system_type() == 0xC || info.system_type() == 4 {
  892. // call gate
  893. let is_16 = info.system_type() == 4;
  894. if info.dpl() < *cpl || info.dpl() < cs_selector.rpl() {
  895. dbg_log!("#gp cs gate dpl < cpl or dpl < rpl: {:x}", selector);
  896. trigger_gp(selector & !3);
  897. return;
  898. }
  899. if !info.is_present() {
  900. dbg_log!("#NP for loading not-present in gate cs sel={:x}", selector);
  901. trigger_np(selector & !3);
  902. return;
  903. }
  904. let cs_selector = (info.raw >> 16) as i32;
  905. let (cs_info, _) = match return_on_pagefault!(lookup_segment_selector(cs_selector)) {
  906. Ok((desc, sel)) => (desc, sel),
  907. Err(selector_unusable) => match selector_unusable {
  908. SelectorNullOrInvalid::IsNull => {
  909. dbg_log!("#gp null cs");
  910. trigger_gp(0);
  911. return;
  912. },
  913. SelectorNullOrInvalid::IsInvalid => {
  914. dbg_log!("#gp invalid cs: {:x}", selector);
  915. trigger_gp(selector & !3);
  916. return;
  917. },
  918. },
  919. };
  920. if !cs_info.is_executable() {
  921. dbg_log!("#gp non-executable cs: {:x}", cs_selector);
  922. trigger_gp(cs_selector & !3);
  923. return;
  924. }
  925. if cs_info.dpl() > *cpl {
  926. dbg_log!("#gp dpl > cpl: {:x}", cs_selector);
  927. trigger_gp(cs_selector & !3);
  928. return;
  929. }
  930. if !cs_info.is_present() {
  931. dbg_log!("#NP for loading not-present in cs sel={:x}", cs_selector);
  932. trigger_np(cs_selector & !3);
  933. return;
  934. }
  935. if !cs_info.is_dc() && cs_info.dpl() < *cpl {
  936. dbg_log!(
  937. "more privilege call gate is_16={} from={} to={}",
  938. is_16,
  939. *cpl,
  940. cs_info.dpl()
  941. );
  942. let tss_stack_addr = return_on_pagefault!(get_tss_stack_addr(cs_info.dpl()));
  943. let new_esp;
  944. let new_ss;
  945. if *tss_size_32 {
  946. new_esp = read32s(tss_stack_addr);
  947. new_ss = read16(tss_stack_addr + 4);
  948. }
  949. else {
  950. new_esp = read16(tss_stack_addr);
  951. new_ss = read16(tss_stack_addr + 2);
  952. }
  953. let (ss_info, ss_selector) =
  954. match return_on_pagefault!(lookup_segment_selector(new_ss)) {
  955. Ok((desc, sel)) => (desc, sel),
  956. Err(selector_unusable) => match selector_unusable {
  957. SelectorNullOrInvalid::IsNull => {
  958. panic!("null ss: {}", new_ss);
  959. },
  960. SelectorNullOrInvalid::IsInvalid => {
  961. panic!("invalid ss: {}", new_ss);
  962. },
  963. },
  964. };
  965. // Disabled: Incorrect handling of direction bit
  966. // See http://css.csail.mit.edu/6.858/2014/readings/i386/s06_03.htm
  967. //if(!((new_esp >>> 0) <= ss_info.effective_limit))
  968. // debugger;
  969. //dbg_assert!((new_esp >>> 0) <= ss_info.effective_limit);
  970. dbg_assert!(!ss_info.is_system() && ss_info.is_writable());
  971. if ss_selector.rpl() != cs_info.dpl()
  972. // xxx: 0 in v86 mode
  973. {
  974. panic!("#TS handler");
  975. }
  976. if ss_info.dpl() != cs_info.dpl() || !ss_info.is_writable() {
  977. panic!("#TS handler");
  978. }
  979. if !ss_info.is_present() {
  980. panic!("#SS handler");
  981. }
  982. let parameter_count = (info.raw >> 32 & 0x1F) as i32;
  983. let mut stack_space = if is_16 { 4 } else { 8 };
  984. if is_call {
  985. stack_space +=
  986. if is_16 { 4 + 2 * parameter_count } else { 8 + 4 * parameter_count };
  987. }
  988. if ss_info.is_32() {
  989. return_on_pagefault!(writable_or_pagefault(
  990. ss_info.base() + new_esp - stack_space,
  991. stack_space
  992. )); // , cs_info.dpl
  993. }
  994. else {
  995. return_on_pagefault!(writable_or_pagefault(
  996. ss_info.base() + (new_esp - stack_space & 0xFFFF),
  997. stack_space
  998. )); // , cs_info.dpl
  999. }
  1000. let old_esp = read_reg32(ESP);
  1001. let old_ss = *sreg.offset(SS as isize);
  1002. let old_stack_pointer = get_stack_pointer(0);
  1003. //dbg_log!("old_esp=" + h(old_esp));
  1004. *cpl = cs_info.dpl();
  1005. cpl_changed();
  1006. update_cs_size(cs_info.is_32());
  1007. // XXX: Should be checked before side effects
  1008. if !switch_seg(SS, new_ss) {
  1009. dbg_assert!(false)
  1010. };
  1011. set_stack_reg(new_esp);
  1012. //dbg_log!("parameter_count=" + parameter_count);
  1013. //dbg_assert!(parameter_count == 0, "TODO");
  1014. if is_16 {
  1015. push16(old_ss as i32).unwrap();
  1016. push16(old_esp).unwrap();
  1017. }
  1018. else {
  1019. push32(old_ss as i32).unwrap();
  1020. push32(old_esp).unwrap();
  1021. }
  1022. if is_call {
  1023. if is_16 {
  1024. for i in (0..parameter_count).rev() {
  1025. //for(let i = parameter_count - 1; i >= 0; i--)
  1026. let parameter = safe_read16(old_stack_pointer + 2 * i).unwrap();
  1027. push16(parameter).unwrap();
  1028. }
  1029. //writable_or_pagefault(get_stack_pointer(-4), 4);
  1030. push16(*sreg.offset(CS as isize) as i32).unwrap();
  1031. push16(get_real_eip()).unwrap();
  1032. }
  1033. else {
  1034. for i in (0..parameter_count).rev() {
  1035. //for(let i = parameter_count - 1; i >= 0; i--)
  1036. let parameter = safe_read32s(old_stack_pointer + 4 * i).unwrap();
  1037. push32(parameter).unwrap();
  1038. }
  1039. //writable_or_pagefault(get_stack_pointer(-8), 8);
  1040. push32(*sreg.offset(CS as isize) as i32).unwrap();
  1041. push32(get_real_eip()).unwrap();
  1042. }
  1043. }
  1044. }
  1045. else {
  1046. dbg_log!(
  1047. "same privilege call gate is_16={} from={} to={} conforming={}",
  1048. is_16,
  1049. *cpl,
  1050. cs_info.dpl(),
  1051. cs_info.is_dc()
  1052. );
  1053. // ok
  1054. if is_call {
  1055. if is_16 {
  1056. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-4), 4));
  1057. push16(*sreg.offset(CS as isize) as i32).unwrap();
  1058. push16(get_real_eip()).unwrap();
  1059. }
  1060. else {
  1061. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-8), 8));
  1062. push32(*sreg.offset(CS as isize) as i32).unwrap();
  1063. push32(get_real_eip()).unwrap();
  1064. }
  1065. }
  1066. }
  1067. // Note: eip from call is ignored
  1068. let mut new_eip = (info.raw & 0xFFFF) as i32;
  1069. if !is_16 {
  1070. new_eip |= ((info.raw >> 32) & 0xFFFF0000) as i32;
  1071. }
  1072. dbg_log!(
  1073. "call gate eip={:x} cs={:x} conforming={}",
  1074. new_eip as u32,
  1075. cs_selector,
  1076. cs_info.is_dc()
  1077. );
  1078. dbg_assert!((new_eip as u32) <= cs_info.effective_limit(), "todo: #gp");
  1079. update_cs_size(cs_info.is_32());
  1080. *segment_is_null.offset(CS as isize) = false;
  1081. *segment_limits.offset(CS as isize) = cs_info.effective_limit();
  1082. *segment_offsets.offset(CS as isize) = cs_info.base();
  1083. *sreg.offset(CS as isize) = cs_selector as u16 & !3 | *cpl as u16;
  1084. dbg_assert!(*sreg.offset(CS as isize) & 3 == *cpl as u16);
  1085. *instruction_pointer = get_seg_cs() + new_eip;
  1086. }
  1087. else {
  1088. dbg_assert!(false);
  1089. //let types = { 9: "Available 386 TSS", 0xb: "Busy 386 TSS", 4: "286 Call Gate", 0xc: "386 Call Gate" };
  1090. //throw debug.unimpl("load system segment descriptor, type = " + (info.access & 15) + " (" + types[info.access & 15] + ")");
  1091. }
  1092. }
  1093. else {
  1094. if !info.is_executable() {
  1095. dbg_log!("#gp non-executable cs: {:x}", selector);
  1096. trigger_gp(selector & !3);
  1097. return;
  1098. }
  1099. if info.is_dc() {
  1100. // conforming code segment
  1101. if info.dpl() > *cpl {
  1102. dbg_log!("#gp cs dpl > cpl: {:x}", selector);
  1103. trigger_gp(selector & !3);
  1104. return;
  1105. }
  1106. }
  1107. else {
  1108. // non-conforming code segment
  1109. if cs_selector.rpl() > *cpl || info.dpl() != *cpl {
  1110. dbg_log!("#gp cs rpl > cpl or dpl != cpl: {:x}", selector);
  1111. trigger_gp(selector & !3);
  1112. return;
  1113. }
  1114. }
  1115. if !info.is_present() {
  1116. dbg_log!("#NP for loading not-present in cs sel={:x}", selector);
  1117. dbg_trace();
  1118. trigger_np(selector & !3);
  1119. return;
  1120. }
  1121. if is_call {
  1122. if is_osize_32 {
  1123. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-8), 8));
  1124. push32(*sreg.offset(CS as isize) as i32).unwrap();
  1125. push32(get_real_eip()).unwrap();
  1126. }
  1127. else {
  1128. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-4), 4));
  1129. push16(*sreg.offset(CS as isize) as i32).unwrap();
  1130. push16(get_real_eip()).unwrap();
  1131. }
  1132. }
  1133. dbg_assert!((eip as u32) <= info.effective_limit(), "todo: #gp");
  1134. update_cs_size(info.is_32());
  1135. *segment_is_null.offset(CS as isize) = false;
  1136. *segment_limits.offset(CS as isize) = info.effective_limit();
  1137. *segment_offsets.offset(CS as isize) = info.base();
  1138. *sreg.offset(CS as isize) = selector as u16 & !3 | *cpl as u16;
  1139. *instruction_pointer = get_seg_cs() + eip;
  1140. }
  1141. //dbg_log!("far " + ["jump", "call"][+is_call] + " to:", LOG_CPU)
  1142. //CPU_LOG_VERBOSE && debug.dump_state("far " + ["jump", "call"][+is_call] + " end");
  1143. }
  1144. pub unsafe fn far_return(eip: i32, selector: i32, stack_adjust: i32, is_osize_32: bool) {
  1145. dbg_assert!(selector < 0x10000 && selector >= 0);
  1146. //dbg_log("far return eip=" + h(eip >>> 0, 8) + " cs=" + h(selector, 4) + " stack_adjust=" + h(stack_adjust), LOG_CPU);
  1147. //CPU_LOG_VERBOSE && this.debug.dump_state("far ret start");
  1148. if !*protected_mode {
  1149. dbg_assert!(!*is_32);
  1150. //dbg_assert(!this.stack_size_32[0]);
  1151. }
  1152. if !*protected_mode || vm86_mode() {
  1153. switch_cs_real_mode(selector);
  1154. *instruction_pointer = get_seg_cs() + eip;
  1155. adjust_stack_reg(2 * (if is_osize_32 { 4 } else { 2 }) + stack_adjust);
  1156. return;
  1157. }
  1158. let (info, cs_selector) = match return_on_pagefault!(lookup_segment_selector(selector)) {
  1159. Ok((desc, sel)) => (desc, sel),
  1160. Err(selector_unusable) => match selector_unusable {
  1161. SelectorNullOrInvalid::IsNull => {
  1162. dbg_log!("far return: #gp null cs");
  1163. trigger_gp(0);
  1164. return;
  1165. },
  1166. SelectorNullOrInvalid::IsInvalid => {
  1167. dbg_log!("far return: #gp invalid cs: {:x}", selector);
  1168. trigger_gp(selector & !3);
  1169. return;
  1170. },
  1171. },
  1172. };
  1173. if info.is_system() {
  1174. dbg_assert!(false, "is system in far return");
  1175. trigger_gp(selector & !3);
  1176. return;
  1177. }
  1178. if !info.is_executable() {
  1179. dbg_log!("non-executable cs: {:x}", selector);
  1180. trigger_gp(selector & !3);
  1181. return;
  1182. }
  1183. if cs_selector.rpl() < *cpl {
  1184. dbg_log!("cs rpl < cpl: {:x}", selector);
  1185. trigger_gp(selector & !3);
  1186. return;
  1187. }
  1188. if info.is_dc() && info.dpl() > cs_selector.rpl() {
  1189. dbg_log!("cs conforming and dpl > rpl: {:x}", selector);
  1190. trigger_gp(selector & !3);
  1191. return;
  1192. }
  1193. if !info.is_dc() && info.dpl() != cs_selector.rpl() {
  1194. dbg_log!("cs non-conforming and dpl != rpl: {:x}", selector);
  1195. trigger_gp(selector & !3);
  1196. return;
  1197. }
  1198. if !info.is_present() {
  1199. dbg_log!("#NP for loading not-present in cs sel={:x}", selector);
  1200. dbg_trace();
  1201. trigger_np(selector & !3);
  1202. return;
  1203. }
  1204. if cs_selector.rpl() > *cpl {
  1205. dbg_log!(
  1206. "far return privilege change cs: {:x} from={} to={} is_16={}",
  1207. selector,
  1208. *cpl,
  1209. cs_selector.rpl(),
  1210. is_osize_32
  1211. );
  1212. let temp_esp;
  1213. let temp_ss;
  1214. if is_osize_32 {
  1215. //dbg_log!("esp read from " + h(translate_address_system_read(get_stack_pointer(stack_adjust + 8))))
  1216. temp_esp = safe_read32s(get_stack_pointer(stack_adjust + 8)).unwrap();
  1217. //dbg_log!("esp=" + h(temp_esp));
  1218. temp_ss = safe_read16(get_stack_pointer(stack_adjust + 12)).unwrap();
  1219. }
  1220. else {
  1221. //dbg_log!("esp read from " + h(translate_address_system_read(get_stack_pointer(stack_adjust + 4))));
  1222. temp_esp = safe_read16(get_stack_pointer(stack_adjust + 4)).unwrap();
  1223. //dbg_log!("esp=" + h(temp_esp));
  1224. temp_ss = safe_read16(get_stack_pointer(stack_adjust + 6)).unwrap();
  1225. }
  1226. *cpl = cs_selector.rpl();
  1227. cpl_changed();
  1228. // XXX: This failure should be checked before side effects
  1229. if !switch_seg(SS, temp_ss) {
  1230. dbg_assert!(false);
  1231. }
  1232. set_stack_reg(temp_esp + stack_adjust);
  1233. //if(is_osize_32)
  1234. //{
  1235. // adjust_stack_reg(2 * 4);
  1236. //}
  1237. //else
  1238. //{
  1239. // adjust_stack_reg(2 * 2);
  1240. //}
  1241. //throw debug.unimpl("privilege change");
  1242. //adjust_stack_reg(stack_adjust);
  1243. }
  1244. else {
  1245. if is_osize_32 {
  1246. adjust_stack_reg(2 * 4 + stack_adjust);
  1247. }
  1248. else {
  1249. adjust_stack_reg(2 * 2 + stack_adjust);
  1250. }
  1251. }
  1252. //dbg_assert(*cpl === info.dpl);
  1253. update_cs_size(info.is_32());
  1254. *segment_is_null.offset(CS as isize) = false;
  1255. *segment_limits.offset(CS as isize) = info.effective_limit();
  1256. *segment_offsets.offset(CS as isize) = info.base();
  1257. *sreg.offset(CS as isize) = selector as u16;
  1258. dbg_assert!(selector & 3 == *cpl as i32);
  1259. *instruction_pointer = get_seg_cs() + eip;
  1260. //dbg_log("far return to:", LOG_CPU)
  1261. //CPU_LOG_VERBOSE && debug.dump_state("far ret end");
  1262. }
  1263. pub unsafe fn after_block_boundary() { jit_block_boundary = true; }
  1264. #[no_mangle]
  1265. pub fn track_jit_exit(phys_addr: u32) {
  1266. unsafe {
  1267. debug_last_jump = LastJump::Compiled { phys_addr };
  1268. }
  1269. }
  1270. #[no_mangle]
  1271. pub unsafe fn get_eflags() -> i32 {
  1272. return *flags & !FLAGS_ALL
  1273. | getcf() as i32
  1274. | (getpf() as i32) << 2
  1275. | (getaf() as i32) << 4
  1276. | (getzf() as i32) << 6
  1277. | (getsf() as i32) << 7
  1278. | (getof() as i32) << 11;
  1279. }
  1280. #[no_mangle]
  1281. pub unsafe fn get_eflags_no_arith() -> i32 { return *flags; }
  1282. pub unsafe fn translate_address_read(address: i32) -> OrPageFault<u32> {
  1283. let entry = tlb_data[(address as u32 >> 12) as usize];
  1284. let user = *cpl == 3;
  1285. if entry & (TLB_VALID | if user { TLB_NO_USER } else { 0 }) == TLB_VALID {
  1286. Ok((entry & !0xFFF ^ address) as u32)
  1287. }
  1288. else {
  1289. Ok((do_page_translation(address, false, user)? | address & 0xFFF) as u32)
  1290. }
  1291. }
  1292. pub unsafe fn translate_address_read_jit(address: i32) -> OrPageFault<u32> {
  1293. let entry = tlb_data[(address as u32 >> 12) as usize];
  1294. let user = *cpl == 3;
  1295. if entry & (TLB_VALID | if user { TLB_NO_USER } else { 0 }) == TLB_VALID {
  1296. Ok((entry & !0xFFF ^ address) as u32)
  1297. }
  1298. else {
  1299. match do_page_walk(address, false, user) {
  1300. Ok(phys_addr_high) => Ok((phys_addr_high | address & 0xFFF) as u32),
  1301. Err(pagefault) => {
  1302. trigger_pagefault_jit(pagefault);
  1303. Err(())
  1304. },
  1305. }
  1306. }
  1307. }
  1308. pub struct PageFault {
  1309. addr: i32,
  1310. for_writing: bool,
  1311. user: bool,
  1312. present: bool,
  1313. }
  1314. pub unsafe fn do_page_translation(addr: i32, for_writing: bool, user: bool) -> OrPageFault<i32> {
  1315. match do_page_walk(addr, for_writing, user) {
  1316. Ok(phys_addr) => Ok(phys_addr),
  1317. Err(pagefault) => {
  1318. trigger_pagefault(pagefault);
  1319. Err(())
  1320. },
  1321. }
  1322. }
  1323. pub unsafe fn do_page_walk(addr: i32, for_writing: bool, user: bool) -> Result<i32, PageFault> {
  1324. let mut can_write: bool = true;
  1325. let global;
  1326. let mut allow_user: bool = true;
  1327. let page = (addr as u32 >> 12) as i32;
  1328. let high;
  1329. if *cr & CR0_PG == 0 {
  1330. // paging disabled
  1331. high = (addr as u32 & 0xFFFFF000) as i32;
  1332. global = false
  1333. }
  1334. else {
  1335. let page_dir_addr = (*cr.offset(3) as u32 >> 2).wrapping_add((page >> 10) as u32) as i32;
  1336. let page_dir_entry = read_aligned32(page_dir_addr as u32);
  1337. // XXX
  1338. let kernel_write_override = !user && 0 == *cr & CR0_WP;
  1339. if 0 == page_dir_entry & PAGE_TABLE_PRESENT_MASK {
  1340. // to do at this place:
  1341. //
  1342. // - set cr2 = addr (which caused the page fault)
  1343. // - call_interrupt_vector with id 14, error code 0-7 (requires information if read or write)
  1344. // - prevent execution of the function that triggered this call
  1345. return Err(PageFault {
  1346. addr,
  1347. for_writing,
  1348. user,
  1349. present: false,
  1350. });
  1351. }
  1352. if page_dir_entry & PAGE_TABLE_RW_MASK == 0 && !kernel_write_override {
  1353. can_write = false;
  1354. if for_writing {
  1355. return Err(PageFault {
  1356. addr,
  1357. for_writing,
  1358. user,
  1359. present: true,
  1360. });
  1361. }
  1362. }
  1363. if page_dir_entry & PAGE_TABLE_USER_MASK == 0 {
  1364. allow_user = false;
  1365. if user {
  1366. // Page Fault: page table accessed by non-supervisor
  1367. return Err(PageFault {
  1368. addr,
  1369. for_writing,
  1370. user,
  1371. present: true,
  1372. });
  1373. }
  1374. }
  1375. if 0 != page_dir_entry & PAGE_TABLE_PSE_MASK && 0 != *cr.offset(4) & CR4_PSE {
  1376. // size bit is set
  1377. // set the accessed and dirty bits
  1378. let new_page_dir_entry = page_dir_entry
  1379. | PAGE_TABLE_ACCESSED_MASK
  1380. | if for_writing { PAGE_TABLE_DIRTY_MASK } else { 0 };
  1381. if page_dir_entry != new_page_dir_entry {
  1382. write_aligned32(page_dir_addr as u32, new_page_dir_entry);
  1383. }
  1384. high = (page_dir_entry as u32 & 0xFFC00000 | (addr & 0x3FF000) as u32) as i32;
  1385. global = page_dir_entry & PAGE_TABLE_GLOBAL_MASK == PAGE_TABLE_GLOBAL_MASK
  1386. }
  1387. else {
  1388. let page_table_addr = ((page_dir_entry as u32 & 0xFFFFF000) >> 2)
  1389. .wrapping_add((page & 1023) as u32) as i32;
  1390. let page_table_entry = read_aligned32(page_table_addr as u32);
  1391. if page_table_entry & PAGE_TABLE_PRESENT_MASK == 0 {
  1392. return Err(PageFault {
  1393. addr,
  1394. for_writing,
  1395. user,
  1396. present: false,
  1397. });
  1398. }
  1399. if page_table_entry & PAGE_TABLE_RW_MASK == 0 && !kernel_write_override {
  1400. can_write = false;
  1401. if for_writing {
  1402. return Err(PageFault {
  1403. addr,
  1404. for_writing,
  1405. user,
  1406. present: true,
  1407. });
  1408. }
  1409. }
  1410. if page_table_entry & PAGE_TABLE_USER_MASK == 0 {
  1411. allow_user = false;
  1412. if user {
  1413. return Err(PageFault {
  1414. addr,
  1415. for_writing,
  1416. user,
  1417. present: true,
  1418. });
  1419. }
  1420. }
  1421. // Set the accessed and dirty bits
  1422. // Note: dirty bit is only set on the page table entry
  1423. let new_page_dir_entry = page_dir_entry | PAGE_TABLE_ACCESSED_MASK;
  1424. if new_page_dir_entry != page_dir_entry {
  1425. write_aligned32(page_dir_addr as u32, new_page_dir_entry);
  1426. }
  1427. let new_page_table_entry = page_table_entry
  1428. | PAGE_TABLE_ACCESSED_MASK
  1429. | if for_writing { PAGE_TABLE_DIRTY_MASK } else { 0 };
  1430. if page_table_entry != new_page_table_entry {
  1431. write_aligned32(page_table_addr as u32, new_page_table_entry);
  1432. }
  1433. high = (page_table_entry as u32 & 0xFFFFF000) as i32;
  1434. global = page_table_entry & PAGE_TABLE_GLOBAL_MASK == PAGE_TABLE_GLOBAL_MASK
  1435. }
  1436. }
  1437. if tlb_data[page as usize] == 0 {
  1438. if valid_tlb_entries_count == VALID_TLB_ENTRY_MAX {
  1439. profiler::stat_increment(TLB_FULL);
  1440. clear_tlb();
  1441. // also clear global entries if tlb is almost full after clearing non-global pages
  1442. if valid_tlb_entries_count > VALID_TLB_ENTRY_MAX * 3 / 4 {
  1443. profiler::stat_increment(TLB_GLOBAL_FULL);
  1444. full_clear_tlb();
  1445. }
  1446. }
  1447. dbg_assert!(valid_tlb_entries_count < VALID_TLB_ENTRY_MAX);
  1448. valid_tlb_entries[valid_tlb_entries_count as usize] = page;
  1449. valid_tlb_entries_count += 1;
  1450. // TODO: Check that there are no duplicates in valid_tlb_entries
  1451. // XXX: There will probably be duplicates due to invlpg deleting
  1452. // entries from tlb_data but not from valid_tlb_entries
  1453. }
  1454. else if CHECK_TLB_INVARIANTS {
  1455. let mut found: bool = false;
  1456. for i in 0..valid_tlb_entries_count {
  1457. if valid_tlb_entries[i as usize] == page {
  1458. found = true;
  1459. break;
  1460. }
  1461. }
  1462. dbg_assert!(found);
  1463. }
  1464. let is_in_mapped_range = in_mapped_range(high as u32);
  1465. let has_code = !is_in_mapped_range && jit::jit_page_has_code(Page::page_of(high as u32));
  1466. let info_bits = TLB_VALID
  1467. | if can_write { 0 } else { TLB_READONLY }
  1468. | if allow_user { 0 } else { TLB_NO_USER }
  1469. | if is_in_mapped_range { TLB_IN_MAPPED_RANGE } else { 0 }
  1470. | if global && 0 != *cr.offset(4) & CR4_PGE { TLB_GLOBAL } else { 0 }
  1471. | if has_code { TLB_HAS_CODE } else { 0 };
  1472. dbg_assert!((high ^ page << 12) & 0xFFF == 0);
  1473. tlb_data[page as usize] = high ^ page << 12 | info_bits;
  1474. return Ok(high);
  1475. }
  1476. #[no_mangle]
  1477. pub unsafe fn full_clear_tlb() {
  1478. profiler::stat_increment(FULL_CLEAR_TLB);
  1479. // clear tlb including global pages
  1480. *last_virt_eip = -1;
  1481. for i in 0..valid_tlb_entries_count {
  1482. let page = valid_tlb_entries[i as usize];
  1483. tlb_data[page as usize] = 0;
  1484. }
  1485. valid_tlb_entries_count = 0;
  1486. if CHECK_TLB_INVARIANTS {
  1487. for i in 0..0x100000 {
  1488. dbg_assert!(tlb_data[i] == 0);
  1489. }
  1490. };
  1491. }
  1492. #[no_mangle]
  1493. pub unsafe fn clear_tlb() {
  1494. profiler::stat_increment(CLEAR_TLB);
  1495. // clear tlb excluding global pages
  1496. *last_virt_eip = -1;
  1497. let mut global_page_offset: i32 = 0;
  1498. for i in 0..valid_tlb_entries_count {
  1499. let page = valid_tlb_entries[i as usize];
  1500. let entry = tlb_data[page as usize];
  1501. if 0 != entry & TLB_GLOBAL {
  1502. // reinsert at the front
  1503. valid_tlb_entries[global_page_offset as usize] = page;
  1504. global_page_offset += 1;
  1505. }
  1506. else {
  1507. tlb_data[page as usize] = 0
  1508. }
  1509. }
  1510. valid_tlb_entries_count = global_page_offset;
  1511. if CHECK_TLB_INVARIANTS {
  1512. for i in 0..0x100000 {
  1513. dbg_assert!(tlb_data[i] == 0 || 0 != tlb_data[i] & TLB_GLOBAL);
  1514. }
  1515. };
  1516. }
  1517. /// Pagefault handling with the jit works as follows:
  1518. /// - If the slow path is taken, it calls safe_{read,write}*_jit
  1519. /// - safe_{read,write}*_jit call translate_address_{read,write}_jit
  1520. /// - translate_address_{read,write}_jit do the normal page walk and call this method instead of
  1521. /// trigger_pagefault when a page fault happens
  1522. /// - this method prepares a page fault by setting cr2, eip, prefixes and writes the error code
  1523. /// into page_fault_error_code. This method *doesn't* trigger the interrupt, as registers are
  1524. /// still stored in the wasm module
  1525. /// - back in the wasm module, the generated code detects the page fault, restores the registers
  1526. /// and finally calls trigger_pagefault_end_jit, which does the interrupt
  1527. pub unsafe fn trigger_pagefault_jit(fault: PageFault) {
  1528. let write = fault.for_writing;
  1529. let addr = fault.addr;
  1530. let present = fault.present;
  1531. let user = fault.user;
  1532. if ::config::LOG_PAGE_FAULTS {
  1533. dbg_log!(
  1534. "page fault jit w={} u={} p={} eip={:x} cr2={:x}",
  1535. write as i32,
  1536. user as i32,
  1537. present as i32,
  1538. *previous_ip,
  1539. addr
  1540. );
  1541. dbg_trace();
  1542. }
  1543. if DEBUG {
  1544. if must_not_fault {
  1545. dbg_log!("Unexpected page fault");
  1546. dbg_trace();
  1547. dbg_assert!(false);
  1548. }
  1549. }
  1550. profiler::stat_increment(PAGE_FAULT);
  1551. *cr.offset(2) = addr;
  1552. // invalidate tlb entry
  1553. let page = ((addr as u32) >> 12) as i32;
  1554. tlb_data[page as usize] = 0;
  1555. if DEBUG {
  1556. if cpu_exception_hook(CPU_EXCEPTION_PF) {
  1557. return;
  1558. }
  1559. }
  1560. *page_fault_error_code = (user as i32) << 2 | (write as i32) << 1 | present as i32;
  1561. }
  1562. #[no_mangle]
  1563. pub unsafe fn trigger_pagefault_end_jit() {
  1564. *instruction_pointer = *previous_ip;
  1565. call_interrupt_vector(CPU_EXCEPTION_PF, false, Some(*page_fault_error_code));
  1566. }
  1567. pub unsafe fn trigger_pagefault(fault: PageFault) {
  1568. let write = fault.for_writing;
  1569. let addr = fault.addr;
  1570. let present = fault.present;
  1571. let user = fault.user;
  1572. if ::config::LOG_PAGE_FAULTS {
  1573. dbg_log!(
  1574. "page fault w={} u={} p={} eip={:x} cr2={:x}",
  1575. write as i32,
  1576. user as i32,
  1577. present as i32,
  1578. *previous_ip,
  1579. addr
  1580. );
  1581. dbg_trace();
  1582. }
  1583. if DEBUG {
  1584. if must_not_fault {
  1585. dbg_log!("Unexpected page fault");
  1586. dbg_trace();
  1587. dbg_assert!(false);
  1588. }
  1589. }
  1590. profiler::stat_increment(PAGE_FAULT);
  1591. *cr.offset(2) = addr;
  1592. // invalidate tlb entry
  1593. let page = ((addr as u32) >> 12) as i32;
  1594. tlb_data[page as usize] = 0;
  1595. *instruction_pointer = *previous_ip;
  1596. call_interrupt_vector(
  1597. CPU_EXCEPTION_PF,
  1598. false,
  1599. Some((user as i32) << 2 | (write as i32) << 1 | present as i32),
  1600. );
  1601. }
  1602. pub unsafe fn translate_address_write_and_can_skip_dirty(address: i32) -> OrPageFault<(u32, bool)> {
  1603. let entry = tlb_data[(address as u32 >> 12) as usize];
  1604. let user = *cpl == 3;
  1605. if entry & (TLB_VALID | if user { TLB_NO_USER } else { 0 } | TLB_READONLY) == TLB_VALID {
  1606. return Ok(((entry & !0xFFF ^ address) as u32, entry & TLB_HAS_CODE == 0));
  1607. }
  1608. else {
  1609. return Ok((
  1610. (do_page_translation(address, true, user)? | address & 0xFFF) as u32,
  1611. false,
  1612. ));
  1613. };
  1614. }
  1615. pub unsafe fn translate_address_write(address: i32) -> OrPageFault<u32> {
  1616. let entry = tlb_data[(address as u32 >> 12) as usize];
  1617. let user = *cpl == 3;
  1618. if entry & (TLB_VALID | if user { TLB_NO_USER } else { 0 } | TLB_READONLY) == TLB_VALID {
  1619. return Ok((entry & !0xFFF ^ address) as u32);
  1620. }
  1621. else {
  1622. return Ok((do_page_translation(address, true, user)? | address & 0xFFF) as u32);
  1623. };
  1624. }
  1625. pub unsafe fn translate_address_write_jit(address: i32) -> OrPageFault<u32> {
  1626. let entry = tlb_data[(address as u32 >> 12) as usize];
  1627. let user = *cpl == 3;
  1628. if entry & (TLB_VALID | if user { TLB_NO_USER } else { 0 } | TLB_READONLY) == TLB_VALID {
  1629. Ok((entry & !0xFFF ^ address) as u32)
  1630. }
  1631. else {
  1632. match do_page_walk(address, true, user) {
  1633. Ok(phys_addr_high) => Ok((phys_addr_high | address & 0xFFF) as u32),
  1634. Err(pagefault) => {
  1635. trigger_pagefault_jit(pagefault);
  1636. Err(())
  1637. },
  1638. }
  1639. }
  1640. }
  1641. #[no_mangle]
  1642. pub fn tlb_set_has_code(physical_page: Page, has_code: bool) {
  1643. let physical_page = physical_page.to_u32();
  1644. for i in 0..unsafe { valid_tlb_entries_count } {
  1645. let page = unsafe { valid_tlb_entries[i as usize] };
  1646. let entry = unsafe { tlb_data[page as usize] };
  1647. if 0 != entry {
  1648. let tlb_physical_page = entry as u32 >> 12 ^ page as u32;
  1649. if physical_page == tlb_physical_page {
  1650. unsafe {
  1651. tlb_data[page as usize] =
  1652. if has_code { entry | TLB_HAS_CODE } else { entry & !TLB_HAS_CODE }
  1653. }
  1654. }
  1655. }
  1656. }
  1657. check_tlb_invariants();
  1658. }
  1659. #[no_mangle]
  1660. pub fn check_tlb_invariants() {
  1661. if !CHECK_TLB_INVARIANTS {
  1662. return;
  1663. }
  1664. for i in 0..unsafe { valid_tlb_entries_count } {
  1665. let page = unsafe { valid_tlb_entries[i as usize] };
  1666. let entry = unsafe { tlb_data[page as usize] };
  1667. if 0 == entry || 0 != entry & TLB_IN_MAPPED_RANGE {
  1668. // there's no code in mapped memory
  1669. continue;
  1670. }
  1671. let target = (entry ^ page << 12) as u32;
  1672. dbg_assert!(!in_mapped_range(target));
  1673. let entry_has_code = entry & TLB_HAS_CODE != 0;
  1674. let has_code = jit::jit_page_has_code(Page::page_of(target));
  1675. // If some code has been created in a page, the corresponding tlb entries must be marked
  1676. dbg_assert!(!has_code || entry_has_code);
  1677. }
  1678. }
  1679. pub unsafe fn readable_or_pagefault(addr: i32, size: i32) -> OrPageFault<()> {
  1680. dbg_assert!(size < 0x1000);
  1681. dbg_assert!(size > 0);
  1682. if *cr & CR0_PG == 0 {
  1683. return Ok(());
  1684. }
  1685. let user = *cpl == 3;
  1686. let mask = TLB_VALID | if user { TLB_NO_USER } else { 0 };
  1687. let expect = TLB_VALID;
  1688. let page = (addr as u32 >> 12) as i32;
  1689. if tlb_data[page as usize] & mask != expect {
  1690. do_page_translation(addr, false, user)?;
  1691. }
  1692. let next_page = ((addr + size - 1) as u32 >> 12) as i32;
  1693. if page != next_page {
  1694. dbg_assert!(next_page == page + 1);
  1695. if tlb_data[next_page as usize] & mask != expect {
  1696. do_page_translation(next_page << 12, false, user)?;
  1697. }
  1698. }
  1699. return Ok(());
  1700. }
  1701. pub unsafe fn writable_or_pagefault(addr: i32, size: i32) -> OrPageFault<()> {
  1702. dbg_assert!(size < 0x1000);
  1703. dbg_assert!(size > 0);
  1704. if *cr & CR0_PG == 0 {
  1705. return Ok(());
  1706. }
  1707. let user = *cpl == 3;
  1708. let mask = TLB_READONLY | TLB_VALID | if user { TLB_NO_USER } else { 0 };
  1709. let expect = TLB_VALID;
  1710. let page = (addr as u32 >> 12) as i32;
  1711. if tlb_data[page as usize] & mask != expect {
  1712. do_page_translation(addr, true, user)?;
  1713. }
  1714. let next_page = ((addr + size - 1) as u32 >> 12) as i32;
  1715. if page != next_page {
  1716. dbg_assert!(next_page == page + 1);
  1717. if tlb_data[next_page as usize] & mask != expect {
  1718. do_page_translation(next_page << 12, true, user)?;
  1719. }
  1720. }
  1721. return Ok(());
  1722. }
  1723. pub unsafe fn read_imm8() -> OrPageFault<i32> {
  1724. let eip = *instruction_pointer;
  1725. if 0 != eip & !0xFFF ^ *last_virt_eip {
  1726. *eip_phys = (translate_address_read(eip)? ^ eip as u32) as i32;
  1727. *last_virt_eip = eip & !0xFFF
  1728. }
  1729. dbg_assert!(!in_mapped_range((*eip_phys ^ eip) as u32));
  1730. let data8 = *mem8.offset((*eip_phys ^ eip) as isize) as i32;
  1731. *instruction_pointer = eip + 1;
  1732. return Ok(data8);
  1733. }
  1734. pub unsafe fn read_imm8s() -> OrPageFault<i32> { return Ok(read_imm8()? << 24 >> 24); }
  1735. pub unsafe fn read_imm16() -> OrPageFault<i32> {
  1736. // Two checks in one comparison:
  1737. // 1. Did the high 20 bits of eip change
  1738. // or 2. Are the low 12 bits of eip 0xFFF (and this read crosses a page boundary)
  1739. if (*instruction_pointer ^ *last_virt_eip) as u32 > 0xFFE {
  1740. return Ok(read_imm8()? | read_imm8()? << 8);
  1741. }
  1742. else {
  1743. let data16 = read16((*eip_phys ^ *instruction_pointer) as u32);
  1744. *instruction_pointer = *instruction_pointer + 2;
  1745. return Ok(data16);
  1746. };
  1747. }
  1748. pub unsafe fn read_imm32s() -> OrPageFault<i32> {
  1749. // Analogue to the above comment
  1750. if (*instruction_pointer ^ *last_virt_eip) as u32 > 0xFFC {
  1751. return Ok(read_imm16()? | read_imm16()? << 16);
  1752. }
  1753. else {
  1754. let data32 = read32s((*eip_phys ^ *instruction_pointer) as u32);
  1755. *instruction_pointer = *instruction_pointer + 4;
  1756. return Ok(data32);
  1757. };
  1758. }
  1759. pub unsafe fn is_osize_32() -> bool {
  1760. dbg_assert!(!in_jit);
  1761. return *is_32 != (*prefixes as i32 & PREFIX_MASK_OPSIZE == PREFIX_MASK_OPSIZE);
  1762. }
  1763. pub unsafe fn is_asize_32() -> bool {
  1764. dbg_assert!(!in_jit);
  1765. return *is_32 != (*prefixes as i32 & PREFIX_MASK_ADDRSIZE == PREFIX_MASK_ADDRSIZE);
  1766. }
  1767. pub unsafe fn lookup_segment_selector(
  1768. selector: i32,
  1769. ) -> OrPageFault<Result<(SegmentDescriptor, SegmentSelector), SelectorNullOrInvalid>> {
  1770. let selector = SegmentSelector::of_u16(selector as u16);
  1771. if selector.is_null() {
  1772. return Ok(Err(SelectorNullOrInvalid::IsNull));
  1773. }
  1774. let (table_offset, table_limit) = if selector.is_gdt() {
  1775. (*gdtr_offset as u32, *gdtr_size as u16)
  1776. }
  1777. else {
  1778. (
  1779. *segment_offsets.offset(LDTR as isize) as u32,
  1780. *segment_limits.offset(LDTR as isize) as u16,
  1781. )
  1782. };
  1783. if selector.descriptor_offset() > table_limit {
  1784. return Ok(Err(SelectorNullOrInvalid::IsInvalid));
  1785. }
  1786. let descriptor_address =
  1787. translate_address_system_read(selector.descriptor_offset() as i32 + table_offset as i32)?;
  1788. let descriptor = SegmentDescriptor::of_u64(read64s(descriptor_address) as u64);
  1789. Ok(Ok((descriptor, selector)))
  1790. }
  1791. pub unsafe fn switch_seg(reg: i32, selector_raw: i32) -> bool {
  1792. dbg_assert!(reg >= 0 && reg <= 5);
  1793. dbg_assert!(selector_raw >= 0 && selector_raw < 0x10000);
  1794. if !*protected_mode || vm86_mode() {
  1795. *sreg.offset(reg as isize) = selector_raw as u16;
  1796. *segment_is_null.offset(reg as isize) = false;
  1797. *segment_offsets.offset(reg as isize) = selector_raw << 4;
  1798. if reg == SS {
  1799. *stack_size_32 = false;
  1800. }
  1801. return true;
  1802. }
  1803. let (descriptor, selector) =
  1804. match return_on_pagefault!(lookup_segment_selector(selector_raw), false) {
  1805. Ok((desc, sel)) => (desc, sel),
  1806. Err(selector_unusable) => {
  1807. // The selector couldn't be used to fetch a descriptor, so we handle all of those
  1808. // cases
  1809. if selector_unusable == SelectorNullOrInvalid::IsNull {
  1810. if reg == SS {
  1811. dbg_log!("#GP for loading 0 in SS sel={:x}", selector_raw);
  1812. trigger_gp(0);
  1813. return false;
  1814. }
  1815. else if reg != CS {
  1816. // es, ds, fs, gs
  1817. *sreg.offset(reg as isize) = selector_raw as u16;
  1818. *segment_is_null.offset(reg as isize) = true;
  1819. return true;
  1820. }
  1821. }
  1822. else if selector_unusable == SelectorNullOrInvalid::IsInvalid {
  1823. dbg_log!(
  1824. "#GP for loading invalid in seg={} sel={:x}",
  1825. reg,
  1826. selector_raw
  1827. );
  1828. trigger_gp(selector_raw & !3);
  1829. return false;
  1830. }
  1831. dbg_assert!(false);
  1832. return false;
  1833. },
  1834. };
  1835. if reg == SS {
  1836. if descriptor.is_system()
  1837. || selector.rpl() != *cpl
  1838. || !descriptor.is_writable()
  1839. || descriptor.dpl() != *cpl
  1840. {
  1841. dbg_log!("#GP for loading invalid in SS sel={:x}", selector_raw);
  1842. trigger_gp(selector_raw & !3);
  1843. return false;
  1844. }
  1845. if !descriptor.is_present() {
  1846. dbg_log!("#SS for loading non-present in SS sel={:x}", selector_raw);
  1847. trigger_ss(selector_raw & !3);
  1848. return false;
  1849. }
  1850. *stack_size_32 = descriptor.is_32();
  1851. }
  1852. else if reg == CS {
  1853. // handled by switch_cs_real_mode, far_return or far_jump
  1854. dbg_assert!(false);
  1855. }
  1856. else {
  1857. if descriptor.is_system()
  1858. || !descriptor.is_readable()
  1859. || (!descriptor.is_conforming_executable()
  1860. && (selector.rpl() > descriptor.dpl() || *cpl > descriptor.dpl()))
  1861. {
  1862. dbg_log!(
  1863. "#GP for loading invalid in seg {} sel={:x}",
  1864. reg,
  1865. selector_raw,
  1866. );
  1867. trigger_gp(selector_raw & !3);
  1868. return false;
  1869. }
  1870. if !descriptor.is_present() {
  1871. dbg_log!(
  1872. "#NP for loading not-present in seg {} sel={:x}",
  1873. reg,
  1874. selector_raw,
  1875. );
  1876. trigger_np(selector_raw & !3);
  1877. return false;
  1878. }
  1879. }
  1880. *segment_is_null.offset(reg as isize) = false;
  1881. *segment_limits.offset(reg as isize) = descriptor.effective_limit();
  1882. *segment_offsets.offset(reg as isize) = descriptor.base();
  1883. *sreg.offset(reg as isize) = selector_raw as u16;
  1884. true
  1885. }
  1886. #[no_mangle]
  1887. pub unsafe fn log_segment_null(segment: i32) {
  1888. dbg_assert!(segment >= 0 && segment < 8);
  1889. if *segment_is_null.offset(segment as isize) {
  1890. dbg_assert!(segment != CS && segment != SS);
  1891. dbg_log!("#gp: Access null segment in jit");
  1892. }
  1893. }
  1894. pub unsafe fn get_seg(segment: i32) -> OrPageFault<i32> {
  1895. dbg_assert!(segment >= 0 && segment < 8);
  1896. if *segment_is_null.offset(segment as isize) {
  1897. dbg_assert!(segment != CS && segment != SS);
  1898. dbg_log!("#gp: Access null segment");
  1899. dbg_trace();
  1900. dbg_assert!(!in_jit, "TODO");
  1901. trigger_gp(0);
  1902. return Err(());
  1903. }
  1904. return Ok(*segment_offsets.offset(segment as isize));
  1905. }
  1906. pub unsafe fn set_cr0(cr0: i32) {
  1907. let old_cr0 = *cr;
  1908. if old_cr0 & CR0_AM == 0 && cr0 & CR0_AM != 0 {
  1909. dbg_log!("Warning: Unimplemented: cr0 alignment mask");
  1910. }
  1911. if (cr0 & (CR0_PE | CR0_PG)) == CR0_PG {
  1912. panic!("cannot load PG without PE");
  1913. }
  1914. *cr = cr0;
  1915. *cr |= CR0_ET;
  1916. if old_cr0 & (CR0_PG | CR0_WP) != cr0 & (CR0_PG | CR0_WP) {
  1917. full_clear_tlb();
  1918. }
  1919. *protected_mode = (*cr & CR0_PE) == CR0_PE;
  1920. }
  1921. pub unsafe fn cpl_changed() { *last_virt_eip = -1; }
  1922. pub unsafe fn update_cs_size(new_size: bool) {
  1923. if *is_32 != new_size {
  1924. *is_32 = new_size;
  1925. }
  1926. }
  1927. pub unsafe fn test_privileges_for_io(port: i32, size: i32) -> bool {
  1928. if *protected_mode && (*cpl > getiopl() as u8 || (*flags & FLAG_VM != 0)) {
  1929. if !*tss_size_32 {
  1930. dbg_log!("#GP for port io, 16-bit TSS port={:x} size={}", port, size);
  1931. trigger_gp(0);
  1932. return false;
  1933. }
  1934. let tsr_size = *segment_limits.offset(TR as isize);
  1935. let tsr_offset = *segment_offsets.offset(TR as isize);
  1936. if tsr_size >= 0x67 {
  1937. dbg_assert!(tsr_offset + 0x64 + 2 & 0xFFF < 0xFFF);
  1938. let iomap_base = read16(return_on_pagefault!(
  1939. translate_address_system_read(tsr_offset + 0x64 + 2),
  1940. false
  1941. ));
  1942. let high_port = port + size - 1;
  1943. if tsr_size >= (iomap_base + (high_port >> 3)) as u32 {
  1944. let mask = ((1 << size) - 1) << (port & 7);
  1945. let addr = return_on_pagefault!(
  1946. translate_address_system_read(tsr_offset + iomap_base + (port >> 3)),
  1947. false
  1948. );
  1949. let port_info = if mask & 0xFF00 != 0 { read16(addr) } else { read8(addr) };
  1950. dbg_assert!(addr & 0xFFF < 0xFFF);
  1951. if port_info & mask == 0 {
  1952. return true;
  1953. }
  1954. }
  1955. }
  1956. dbg_log!("#GP for port io port={:x} size={}", port, size);
  1957. trigger_gp(0);
  1958. return false;
  1959. }
  1960. return true;
  1961. }
  1962. pub unsafe fn popa16() {
  1963. return_on_pagefault!(readable_or_pagefault(get_stack_pointer(0), 16));
  1964. write_reg16(DI, pop16().unwrap());
  1965. write_reg16(SI, pop16().unwrap());
  1966. write_reg16(BP, pop16().unwrap());
  1967. adjust_stack_reg(2);
  1968. write_reg16(BX, pop16().unwrap());
  1969. write_reg16(DX, pop16().unwrap());
  1970. write_reg16(CX, pop16().unwrap());
  1971. write_reg16(AX, pop16().unwrap());
  1972. }
  1973. pub unsafe fn popa32() {
  1974. return_on_pagefault!(readable_or_pagefault(get_stack_pointer(0), 32));
  1975. write_reg32(EDI, pop32s().unwrap());
  1976. write_reg32(ESI, pop32s().unwrap());
  1977. write_reg32(EBP, pop32s().unwrap());
  1978. adjust_stack_reg(4);
  1979. write_reg32(EBX, pop32s().unwrap());
  1980. write_reg32(EDX, pop32s().unwrap());
  1981. write_reg32(ECX, pop32s().unwrap());
  1982. write_reg32(EAX, pop32s().unwrap());
  1983. }
  1984. #[no_mangle]
  1985. pub fn get_seg_cs() -> i32 { unsafe { *segment_offsets.offset(CS as isize) } }
  1986. #[no_mangle]
  1987. pub unsafe fn get_seg_ss() -> i32 { return *segment_offsets.offset(SS as isize); }
  1988. pub unsafe fn get_seg_prefix(default_segment: i32) -> OrPageFault<i32> {
  1989. dbg_assert!(!in_jit);
  1990. let prefix = *prefixes as i32 & PREFIX_MASK_SEGMENT;
  1991. if 0 != prefix {
  1992. if prefix == SEG_PREFIX_ZERO {
  1993. return Ok(0);
  1994. }
  1995. else {
  1996. return get_seg(prefix - 1);
  1997. }
  1998. }
  1999. else {
  2000. return get_seg(default_segment);
  2001. };
  2002. }
  2003. pub unsafe fn get_seg_prefix_ds(offset: i32) -> OrPageFault<i32> {
  2004. Ok(get_seg_prefix(DS)? + offset)
  2005. }
  2006. pub unsafe fn get_seg_prefix_ss(offset: i32) -> OrPageFault<i32> {
  2007. Ok(get_seg_prefix(SS)? + offset)
  2008. }
  2009. pub unsafe fn modrm_resolve(modrm_byte: i32) -> OrPageFault<i32> {
  2010. if is_asize_32() { resolve_modrm32(modrm_byte) } else { resolve_modrm16(modrm_byte) }
  2011. }
  2012. pub unsafe fn run_instruction(opcode: i32) { ::gen::interpreter::run(opcode as u32) }
  2013. pub unsafe fn run_instruction0f_16(opcode: i32) { ::gen::interpreter0f::run(opcode as u32) }
  2014. pub unsafe fn run_instruction0f_32(opcode: i32) { ::gen::interpreter0f::run(opcode as u32 | 0x100) }
  2015. #[no_mangle]
  2016. pub unsafe fn cycle_internal() {
  2017. profiler::stat_increment(CYCLE_INTERNAL);
  2018. if !::config::FORCE_DISABLE_JIT {
  2019. *previous_ip = *instruction_pointer;
  2020. let phys_addr = return_on_pagefault!(get_phys_eip()) as u32;
  2021. let state_flags = pack_current_state_flags();
  2022. let entry = jit::jit_find_cache_entry(phys_addr, state_flags);
  2023. if entry != jit::CachedCode::NONE {
  2024. profiler::stat_increment(RUN_FROM_CACHE);
  2025. let initial_tsc = *timestamp_counter;
  2026. let wasm_table_index = entry.wasm_table_index;
  2027. let initial_state = entry.initial_state;
  2028. #[cfg(debug_assertions)]
  2029. {
  2030. in_jit = true;
  2031. }
  2032. call_indirect1(
  2033. (wasm_table_index as u32).wrapping_add(WASM_TABLE_OFFSET as u32) as i32,
  2034. initial_state,
  2035. );
  2036. #[cfg(debug_assertions)]
  2037. {
  2038. in_jit = false;
  2039. }
  2040. profiler::stat_increment_by(
  2041. RUN_FROM_CACHE_STEPS,
  2042. (*timestamp_counter - initial_tsc) as u64,
  2043. );
  2044. dbg_assert!(*timestamp_counter != initial_tsc, "TSC didn't change");
  2045. if cfg!(feature = "profiler") {
  2046. dbg_assert!(match ::cpu::cpu::debug_last_jump {
  2047. LastJump::Compiled { .. } => true,
  2048. _ => false,
  2049. });
  2050. let last_jump_addr = ::cpu::cpu::debug_last_jump.phys_address().unwrap();
  2051. let last_jump_opcode = if last_jump_addr != 0 {
  2052. read32s(last_jump_addr)
  2053. }
  2054. else {
  2055. // Happens during exit due to loop iteration limit
  2056. 0
  2057. };
  2058. ::opstats::record_opstat_jit_exit(last_jump_opcode as u32);
  2059. }
  2060. if Page::page_of(*previous_ip as u32) == Page::page_of(*instruction_pointer as u32) {
  2061. profiler::stat_increment(RUN_FROM_CACHE_EXIT_SAME_PAGE);
  2062. }
  2063. else {
  2064. profiler::stat_increment(RUN_FROM_CACHE_EXIT_DIFFERENT_PAGE);
  2065. }
  2066. }
  2067. else {
  2068. jit::record_entry_point(phys_addr);
  2069. #[cfg(feature = "profiler")]
  2070. {
  2071. if CHECK_MISSED_ENTRY_POINTS {
  2072. jit::check_missed_entry_points(phys_addr, state_flags);
  2073. }
  2074. }
  2075. if DEBUG {
  2076. dbg_assert!(!must_not_fault);
  2077. must_not_fault = true
  2078. }
  2079. if DEBUG {
  2080. dbg_assert!(must_not_fault);
  2081. must_not_fault = false
  2082. }
  2083. let initial_tsc = *timestamp_counter;
  2084. jit_run_interpreted(phys_addr as i32);
  2085. jit::jit_increase_hotness_and_maybe_compile(
  2086. phys_addr,
  2087. get_seg_cs() as u32,
  2088. state_flags,
  2089. *timestamp_counter - initial_tsc,
  2090. );
  2091. profiler::stat_increment_by(
  2092. RUN_INTERPRETED_STEPS,
  2093. (*timestamp_counter - initial_tsc) as u64,
  2094. );
  2095. dbg_assert!(*timestamp_counter != initial_tsc, "TSC didn't change");
  2096. };
  2097. }
  2098. else {
  2099. *previous_ip = *instruction_pointer;
  2100. let opcode = return_on_pagefault!(read_imm8());
  2101. *timestamp_counter += 1;
  2102. dbg_assert!(*prefixes == 0);
  2103. run_instruction(opcode | (*is_32 as i32) << 8);
  2104. dbg_assert!(*prefixes == 0);
  2105. }
  2106. }
  2107. pub unsafe fn get_phys_eip() -> OrPageFault<u32> {
  2108. let eip = *instruction_pointer;
  2109. if 0 != eip & !0xFFF ^ *last_virt_eip {
  2110. *eip_phys = (translate_address_read(eip)? ^ eip as u32) as i32;
  2111. *last_virt_eip = eip & !0xFFF
  2112. }
  2113. let phys_addr = (*eip_phys ^ eip) as u32;
  2114. dbg_assert!(!in_mapped_range(phys_addr));
  2115. return Ok(phys_addr);
  2116. }
  2117. unsafe fn jit_run_interpreted(phys_addr: i32) {
  2118. profiler::stat_increment(RUN_INTERPRETED);
  2119. dbg_assert!(!in_mapped_range(phys_addr as u32));
  2120. if cfg!(debug_assertions) {
  2121. debug_last_jump = LastJump::Interpreted {
  2122. phys_addr: phys_addr as u32,
  2123. };
  2124. }
  2125. jit_block_boundary = false;
  2126. let opcode = *mem8.offset(phys_addr as isize) as i32;
  2127. *instruction_pointer += 1;
  2128. *timestamp_counter += 1;
  2129. dbg_assert!(*prefixes == 0);
  2130. run_instruction(opcode | (*is_32 as i32) << 8);
  2131. dbg_assert!(*prefixes == 0);
  2132. // We need to limit the number of iterations here as jumps within the same page are not counted
  2133. // as block boundaries for the interpreter (as they don't create an entry point and don't need
  2134. // a check if the jump target may have compiled code)
  2135. let mut i = 0;
  2136. while !jit_block_boundary
  2137. && Page::page_of(*previous_ip as u32) == Page::page_of(*instruction_pointer as u32)
  2138. && i < INTERPRETER_ITERATION_LIMIT
  2139. {
  2140. *previous_ip = *instruction_pointer;
  2141. let opcode = return_on_pagefault!(read_imm8());
  2142. if CHECK_MISSED_ENTRY_POINTS {
  2143. let phys_addr = return_on_pagefault!(get_phys_eip()) as u32;
  2144. let state_flags = pack_current_state_flags();
  2145. let entry = jit::jit_find_cache_entry(phys_addr, state_flags);
  2146. if entry != jit::CachedCode::NONE {
  2147. profiler::stat_increment(RUN_INTERPRETED_MISSED_COMPILED_ENTRY_RUN_INTERPRETED);
  2148. //dbg_log!(
  2149. // "missed entry point at {:x} prev_opcode={:x} opcode={:x}",
  2150. // phys_addr,
  2151. // prev_opcode,
  2152. // opcode
  2153. //);
  2154. }
  2155. }
  2156. if cfg!(debug_assertions) {
  2157. debug_last_jump = LastJump::Interpreted {
  2158. phys_addr: phys_addr as u32,
  2159. };
  2160. }
  2161. *timestamp_counter += 1;
  2162. //if DEBUG {
  2163. // logop(*previous_ip, opcode_0);
  2164. //}
  2165. dbg_assert!(*prefixes == 0);
  2166. run_instruction(opcode | (*is_32 as i32) << 8);
  2167. dbg_assert!(*prefixes == 0);
  2168. i += 1;
  2169. }
  2170. }
  2171. pub fn pack_current_state_flags() -> CachedStateFlags {
  2172. unsafe {
  2173. CachedStateFlags::of_u32(
  2174. (*is_32 as u32) << 0
  2175. | (*stack_size_32 as u32) << 1
  2176. | ((*cpl == 3) as u32) << 2
  2177. | (has_flat_segmentation() as u32) << 3,
  2178. )
  2179. }
  2180. }
  2181. #[no_mangle]
  2182. pub unsafe fn has_flat_segmentation() -> bool {
  2183. // ss can't be null
  2184. return *segment_offsets.offset(SS as isize) == 0
  2185. && !*segment_is_null.offset(DS as isize)
  2186. && *segment_offsets.offset(DS as isize) == 0;
  2187. }
  2188. pub unsafe fn run_prefix_instruction() {
  2189. run_instruction(return_on_pagefault!(read_imm8()) | (is_osize_32() as i32) << 8);
  2190. }
  2191. pub unsafe fn segment_prefix_op(seg: i32) {
  2192. dbg_assert!(seg <= 5);
  2193. *prefixes = (*prefixes as i32 | seg + 1) as u8;
  2194. run_prefix_instruction();
  2195. *prefixes = 0
  2196. }
  2197. #[no_mangle]
  2198. pub unsafe fn do_many_cycles_native() {
  2199. profiler::stat_increment(DO_MANY_CYCLES);
  2200. let initial_timestamp_counter = *timestamp_counter;
  2201. while (*timestamp_counter).wrapping_sub(initial_timestamp_counter) < LOOP_COUNTER as u32
  2202. && !*in_hlt
  2203. {
  2204. cycle_internal();
  2205. }
  2206. }
  2207. #[no_mangle]
  2208. pub unsafe fn trigger_de() {
  2209. dbg_log!("#de");
  2210. *instruction_pointer = *previous_ip;
  2211. if DEBUG {
  2212. if cpu_exception_hook(CPU_EXCEPTION_DE) {
  2213. return;
  2214. }
  2215. }
  2216. call_interrupt_vector(CPU_EXCEPTION_DE, false, None);
  2217. }
  2218. #[no_mangle]
  2219. pub unsafe fn trigger_ud() {
  2220. dbg_log!("#ud");
  2221. dbg_trace();
  2222. *instruction_pointer = *previous_ip;
  2223. if DEBUG {
  2224. if cpu_exception_hook(CPU_EXCEPTION_UD) {
  2225. return;
  2226. }
  2227. }
  2228. call_interrupt_vector(CPU_EXCEPTION_UD, false, None);
  2229. }
  2230. pub unsafe fn trigger_nm() {
  2231. dbg_log!("#nm eip={:x}", *previous_ip);
  2232. dbg_trace();
  2233. *instruction_pointer = *previous_ip;
  2234. if DEBUG {
  2235. if cpu_exception_hook(CPU_EXCEPTION_NM) {
  2236. return;
  2237. }
  2238. }
  2239. call_interrupt_vector(CPU_EXCEPTION_NM, false, None);
  2240. }
  2241. #[no_mangle]
  2242. pub unsafe fn trigger_gp(code: i32) {
  2243. dbg_log!("#gp");
  2244. *instruction_pointer = *previous_ip;
  2245. if DEBUG {
  2246. if cpu_exception_hook(CPU_EXCEPTION_GP) {
  2247. return;
  2248. }
  2249. }
  2250. call_interrupt_vector(CPU_EXCEPTION_GP, false, Some(code));
  2251. }
  2252. pub unsafe fn virt_boundary_read16(low: u32, high: u32) -> i32 {
  2253. dbg_assert!(low & 0xFFF == 0xFFF);
  2254. dbg_assert!(high & 0xFFF == 0);
  2255. return read8(low as u32) | read8(high as u32) << 8;
  2256. }
  2257. pub unsafe fn virt_boundary_read32s(low: u32, high: u32) -> i32 {
  2258. dbg_assert!(low & 0xFFF >= 0xFFD);
  2259. dbg_assert!(high - 3 & 0xFFF == low & 0xFFF);
  2260. let mid;
  2261. if 0 != low & 1 {
  2262. if 0 != low & 2 {
  2263. // 0xFFF
  2264. mid = read16(high - 2)
  2265. }
  2266. else {
  2267. // 0xFFD
  2268. mid = read16(low + 1)
  2269. }
  2270. }
  2271. else {
  2272. // 0xFFE
  2273. mid = virt_boundary_read16(low + 1, high - 1)
  2274. }
  2275. return read8(low as u32) | mid << 8 | read8(high as u32) << 24;
  2276. }
  2277. pub unsafe fn virt_boundary_write16(low: u32, high: u32, value: i32) {
  2278. dbg_assert!(low & 0xFFF == 0xFFF);
  2279. dbg_assert!(high & 0xFFF == 0);
  2280. write8(low as u32, value);
  2281. write8(high as u32, value >> 8);
  2282. }
  2283. pub unsafe fn virt_boundary_write32(low: u32, high: u32, value: i32) {
  2284. dbg_assert!(low & 0xFFF >= 0xFFD);
  2285. dbg_assert!(high - 3 & 0xFFF == low & 0xFFF);
  2286. write8(low as u32, value);
  2287. if 0 != low & 1 {
  2288. if 0 != low & 2 {
  2289. // 0xFFF
  2290. write8((high - 2) as u32, value >> 8);
  2291. write8((high - 1) as u32, value >> 16);
  2292. }
  2293. else {
  2294. // 0xFFD
  2295. write8((low + 1) as u32, value >> 8);
  2296. write8((low + 2) as u32, value >> 16);
  2297. }
  2298. }
  2299. else {
  2300. // 0xFFE
  2301. write8((low + 1) as u32, value >> 8);
  2302. write8((high - 1) as u32, value >> 16);
  2303. }
  2304. write8(high as u32, value >> 24);
  2305. }
  2306. pub unsafe fn safe_read8(addr: i32) -> OrPageFault<i32> { Ok(read8(translate_address_read(addr)?)) }
  2307. pub unsafe fn safe_read16(addr: i32) -> OrPageFault<i32> {
  2308. if addr & 0xFFF == 0xFFF {
  2309. Ok(safe_read8(addr)? | safe_read8(addr + 1)? << 8)
  2310. }
  2311. else {
  2312. Ok(read16(translate_address_read(addr)?))
  2313. }
  2314. }
  2315. pub unsafe fn safe_read32s(addr: i32) -> OrPageFault<i32> {
  2316. if addr & 0xFFF >= 0xFFD {
  2317. Ok(safe_read16(addr)? | safe_read16(addr + 2)? << 16)
  2318. }
  2319. else {
  2320. Ok(read32s(translate_address_read(addr)?))
  2321. }
  2322. }
  2323. pub unsafe fn safe_read_f32(addr: i32) -> OrPageFault<f32> {
  2324. Ok(std::mem::transmute(safe_read32s(addr)?))
  2325. }
  2326. pub unsafe fn safe_read64s(addr: i32) -> OrPageFault<u64> {
  2327. if addr & 0xFFF > 0x1000 - 8 {
  2328. Ok(safe_read32s(addr)? as u32 as u64 | (safe_read32s(addr + 4)? as u32 as u64) << 32)
  2329. }
  2330. else {
  2331. Ok(read64s(translate_address_read(addr)?) as u64)
  2332. }
  2333. }
  2334. pub unsafe fn safe_read128s(addr: i32) -> OrPageFault<reg128> {
  2335. if addr & 0xFFF > 0x1000 - 16 {
  2336. Ok(reg128 {
  2337. u64_0: [safe_read64s(addr)?, safe_read64s(addr + 8)?],
  2338. })
  2339. }
  2340. else {
  2341. Ok(read128(translate_address_read(addr)?))
  2342. }
  2343. }
  2344. #[no_mangle]
  2345. #[cfg(feature = "profiler")]
  2346. pub fn report_safe_read_jit_slow(address: u32, entry: i32) {
  2347. if entry & TLB_VALID == 0 {
  2348. profiler::stat_increment(SAFE_READ_SLOW_NOT_VALID);
  2349. }
  2350. else if entry & TLB_IN_MAPPED_RANGE != 0 {
  2351. profiler::stat_increment(SAFE_READ_SLOW_IN_MAPPED_RANGE);
  2352. }
  2353. else if entry & TLB_NO_USER != 0 {
  2354. profiler::stat_increment(SAFE_READ_SLOW_NOT_USER);
  2355. }
  2356. else if address & 0xFFF > 0x1000 - 16 {
  2357. profiler::stat_increment(SAFE_READ_SLOW_PAGE_CROSSED);
  2358. }
  2359. else {
  2360. dbg_log!("Unexpected entry bit: {:x} (read at {:x})", entry, address);
  2361. dbg_assert!(false);
  2362. }
  2363. }
  2364. #[no_mangle]
  2365. #[cfg(feature = "profiler")]
  2366. pub fn report_safe_write_jit_slow(address: u32, entry: i32) {
  2367. if entry & TLB_VALID == 0 {
  2368. profiler::stat_increment(SAFE_WRITE_SLOW_NOT_VALID);
  2369. }
  2370. else if entry & TLB_IN_MAPPED_RANGE != 0 {
  2371. profiler::stat_increment(SAFE_WRITE_SLOW_IN_MAPPED_RANGE);
  2372. }
  2373. else if entry & TLB_HAS_CODE != 0 {
  2374. profiler::stat_increment(SAFE_WRITE_SLOW_HAS_CODE);
  2375. }
  2376. else if entry & TLB_READONLY != 0 {
  2377. profiler::stat_increment(SAFE_WRITE_SLOW_READ_ONLY);
  2378. }
  2379. else if entry & TLB_NO_USER != 0 {
  2380. profiler::stat_increment(SAFE_WRITE_SLOW_NOT_USER);
  2381. }
  2382. else if address & 0xFFF > 0x1000 - 16 {
  2383. profiler::stat_increment(SAFE_WRITE_SLOW_PAGE_CROSSED);
  2384. }
  2385. else {
  2386. dbg_assert!(false);
  2387. }
  2388. }
  2389. #[no_mangle]
  2390. #[cfg(feature = "profiler")]
  2391. pub fn report_safe_read_write_jit_slow(address: u32, entry: i32) {
  2392. if entry & TLB_VALID == 0 {
  2393. profiler::stat_increment(SAFE_READ_WRITE_SLOW_NOT_VALID);
  2394. }
  2395. else if entry & TLB_IN_MAPPED_RANGE != 0 {
  2396. profiler::stat_increment(SAFE_READ_WRITE_SLOW_IN_MAPPED_RANGE);
  2397. }
  2398. else if entry & TLB_HAS_CODE != 0 {
  2399. profiler::stat_increment(SAFE_READ_WRITE_SLOW_HAS_CODE);
  2400. }
  2401. else if entry & TLB_READONLY != 0 {
  2402. profiler::stat_increment(SAFE_READ_WRITE_SLOW_READ_ONLY);
  2403. }
  2404. else if entry & TLB_NO_USER != 0 {
  2405. profiler::stat_increment(SAFE_READ_WRITE_SLOW_NOT_USER);
  2406. }
  2407. else if address & 0xFFF > 0x1000 - 16 {
  2408. profiler::stat_increment(SAFE_READ_WRITE_SLOW_PAGE_CROSSED);
  2409. }
  2410. else {
  2411. dbg_assert!(false);
  2412. }
  2413. }
  2414. #[repr(align(0x1000))]
  2415. struct ScratchBuffer([u8; 0x1000 * 2]);
  2416. static mut jit_paging_scratch_buffer: ScratchBuffer = ScratchBuffer([0; 2 * 0x1000]);
  2417. pub unsafe fn safe_read_slow_jit(addr: i32, bitsize: i32, start_eip: i32, is_write: bool) -> i32 {
  2418. let crosses_page = (addr & 0xFFF) + bitsize / 8 > 0x1000;
  2419. let addr_low = match if is_write {
  2420. translate_address_write_jit(addr)
  2421. }
  2422. else {
  2423. translate_address_read_jit(addr)
  2424. } {
  2425. Err(()) => {
  2426. *previous_ip = *instruction_pointer & !0xFFF | start_eip & 0xFFF;
  2427. return 1;
  2428. },
  2429. Ok(addr) => addr,
  2430. };
  2431. if crosses_page {
  2432. let boundary_addr = (addr | 0xFFF) + 1;
  2433. let addr_high = match if is_write {
  2434. translate_address_write_jit(boundary_addr)
  2435. }
  2436. else {
  2437. translate_address_read_jit(boundary_addr)
  2438. } {
  2439. Err(()) => {
  2440. *previous_ip = *instruction_pointer & !0xFFF | start_eip & 0xFFF;
  2441. return 1;
  2442. },
  2443. Ok(addr) => addr,
  2444. };
  2445. // TODO: Could check if virtual pages point to consecutive physical and go to fast path
  2446. // do read, write into scratch buffer
  2447. let scratch = jit_paging_scratch_buffer.0.as_mut_ptr() as u32;
  2448. dbg_assert!(scratch & 0xFFF == 0);
  2449. for s in addr_low..((addr_low | 0xFFF) + 1) {
  2450. *(scratch as *mut u8).offset((s & 0xFFF) as isize) = read8(s) as u8
  2451. }
  2452. for s in addr_high..(addr_high + (addr + bitsize / 8 & 0xFFF) as u32) {
  2453. *(scratch as *mut u8).offset((0x1000 | s & 0xFFF) as isize) = read8(s) as u8
  2454. }
  2455. (((scratch - mem8 as u32) as i32) ^ addr) & !0xFFF
  2456. }
  2457. else if in_mapped_range(addr_low) {
  2458. let scratch = jit_paging_scratch_buffer.0.as_mut_ptr() as u32;
  2459. dbg_assert!(scratch & 0xFFF == 0);
  2460. for s in addr_low..(addr_low + bitsize as u32 / 8) {
  2461. *(scratch as *mut u8).offset((s & 0xFFF) as isize) = read8(s) as u8
  2462. }
  2463. (((scratch - mem8 as u32) as i32) ^ addr) & !0xFFF
  2464. }
  2465. else {
  2466. (addr_low as i32 ^ addr) & !0xFFF
  2467. }
  2468. }
  2469. #[no_mangle]
  2470. pub unsafe fn safe_read8_slow_jit(addr: i32, eip: i32) -> i32 {
  2471. safe_read_slow_jit(addr, 8, eip, false)
  2472. }
  2473. #[no_mangle]
  2474. pub unsafe fn safe_read16_slow_jit(addr: i32, eip: i32) -> i32 {
  2475. safe_read_slow_jit(addr, 16, eip, false)
  2476. }
  2477. #[no_mangle]
  2478. pub unsafe fn safe_read32s_slow_jit(addr: i32, eip: i32) -> i32 {
  2479. safe_read_slow_jit(addr, 32, eip, false)
  2480. }
  2481. #[no_mangle]
  2482. pub unsafe fn safe_read64s_slow_jit(addr: i32, eip: i32) -> i32 {
  2483. safe_read_slow_jit(addr, 64, eip, false)
  2484. }
  2485. #[no_mangle]
  2486. pub unsafe fn safe_read128s_slow_jit(addr: i32, eip: i32) -> i32 {
  2487. safe_read_slow_jit(addr, 128, eip, false)
  2488. }
  2489. #[no_mangle]
  2490. pub unsafe fn safe_read_write8_slow_jit(addr: i32, eip: i32) -> i32 {
  2491. safe_read_slow_jit(addr, 8, eip, true)
  2492. }
  2493. #[no_mangle]
  2494. pub unsafe fn safe_read_write16_slow_jit(addr: i32, eip: i32) -> i32 {
  2495. safe_read_slow_jit(addr, 16, eip, true)
  2496. }
  2497. #[no_mangle]
  2498. pub unsafe fn safe_read_write32s_slow_jit(addr: i32, eip: i32) -> i32 {
  2499. safe_read_slow_jit(addr, 32, eip, true)
  2500. }
  2501. #[no_mangle]
  2502. pub unsafe fn safe_read_write64_slow_jit(addr: i32, eip: i32) -> i32 {
  2503. safe_read_slow_jit(addr, 64, eip, true)
  2504. }
  2505. pub unsafe fn safe_write_slow_jit(
  2506. addr: i32,
  2507. bitsize: i32,
  2508. value_low: u64,
  2509. value_high: u64,
  2510. start_eip: i32,
  2511. ) -> i32 {
  2512. if Page::page_of(*instruction_pointer as u32) == Page::page_of(addr as u32) {
  2513. // XXX: Check based on virtual address
  2514. dbg_log!(
  2515. "SMC: bits={} eip={:x} writeaddr={:x}",
  2516. bitsize,
  2517. start_eip as u32,
  2518. addr as u32
  2519. );
  2520. }
  2521. let crosses_page = (addr & 0xFFF) + bitsize / 8 > 0x1000;
  2522. let addr_low = match translate_address_write_jit(addr) {
  2523. Err(()) => {
  2524. *previous_ip = *instruction_pointer & !0xFFF | start_eip & 0xFFF;
  2525. return 1;
  2526. },
  2527. Ok(addr) => addr,
  2528. };
  2529. if crosses_page {
  2530. let addr_high = match translate_address_write_jit((addr | 0xFFF) + 1) {
  2531. Err(()) => {
  2532. *previous_ip = *instruction_pointer & !0xFFF | start_eip & 0xFFF;
  2533. *page_fault = true;
  2534. return 1;
  2535. },
  2536. Ok(addr) => addr,
  2537. };
  2538. // TODO: Could check if virtual pages point to consecutive physical and go to fast path
  2539. // do write, return dummy pointer for fast path to write into
  2540. match bitsize {
  2541. 128 => safe_write128(
  2542. addr,
  2543. reg128 {
  2544. u64_0: [value_low, value_high],
  2545. },
  2546. )
  2547. .unwrap(),
  2548. 64 => safe_write64(addr, value_low).unwrap(),
  2549. 32 => virt_boundary_write32(
  2550. addr_low,
  2551. addr_high | (addr as u32 + 3 & 3),
  2552. value_low as i32,
  2553. ),
  2554. 16 => virt_boundary_write16(addr_low, addr_high, value_low as i32),
  2555. 8 => dbg_assert!(false),
  2556. _ => dbg_assert!(false),
  2557. }
  2558. let scratch = jit_paging_scratch_buffer.0.as_mut_ptr() as u32;
  2559. dbg_assert!(scratch & 0xFFF == 0);
  2560. ((scratch as i32 - mem8 as i32) ^ addr) & !0xFFF
  2561. }
  2562. else if in_mapped_range(addr_low) {
  2563. match bitsize {
  2564. 128 => memory::mmap_write128(
  2565. addr_low,
  2566. value_low as i32,
  2567. (value_low >> 32) as i32,
  2568. value_high as i32,
  2569. (value_high >> 32) as i32,
  2570. ),
  2571. 64 => memory::mmap_write64(addr_low, value_low as i32, (value_low >> 32) as i32),
  2572. 32 => memory::mmap_write32(addr_low, value_low as i32),
  2573. 16 => memory::mmap_write16(addr_low, value_low as i32),
  2574. 8 => memory::mmap_write8(addr_low, value_low as i32),
  2575. _ => dbg_assert!(false),
  2576. }
  2577. let scratch = jit_paging_scratch_buffer.0.as_mut_ptr() as u32;
  2578. dbg_assert!(scratch & 0xFFF == 0);
  2579. ((scratch as i32 - mem8 as i32) ^ addr) & !0xFFF
  2580. }
  2581. else {
  2582. jit::jit_dirty_page(jit::get_jit_state(), Page::page_of(addr_low));
  2583. (addr_low as i32 ^ addr) & !0xFFF
  2584. }
  2585. }
  2586. #[no_mangle]
  2587. pub unsafe fn safe_write8_slow_jit(addr: i32, value: u32, start_eip: i32) -> i32 {
  2588. safe_write_slow_jit(addr, 8, value as u64, 0, start_eip)
  2589. }
  2590. #[no_mangle]
  2591. pub unsafe fn safe_write16_slow_jit(addr: i32, value: u32, start_eip: i32) -> i32 {
  2592. safe_write_slow_jit(addr, 16, value as u64, 0, start_eip)
  2593. }
  2594. #[no_mangle]
  2595. pub unsafe fn safe_write32_slow_jit(addr: i32, value: u32, start_eip: i32) -> i32 {
  2596. safe_write_slow_jit(addr, 32, value as u64, 0, start_eip)
  2597. }
  2598. #[no_mangle]
  2599. pub unsafe fn safe_write64_slow_jit(addr: i32, value: u64, start_eip: i32) -> i32 {
  2600. safe_write_slow_jit(addr, 64, value, 0, start_eip)
  2601. }
  2602. #[no_mangle]
  2603. pub unsafe fn safe_write128_slow_jit(addr: i32, low: u64, high: u64, start_eip: i32) -> i32 {
  2604. safe_write_slow_jit(addr, 128, low, high, start_eip)
  2605. }
  2606. pub unsafe fn safe_write8(addr: i32, value: i32) -> OrPageFault<()> {
  2607. let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
  2608. if in_mapped_range(phys_addr) {
  2609. memory::mmap_write8(phys_addr, value);
  2610. }
  2611. else {
  2612. if !can_skip_dirty_page {
  2613. jit::jit_dirty_page(jit::get_jit_state(), Page::page_of(phys_addr));
  2614. }
  2615. else {
  2616. dbg_assert!(!jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
  2617. }
  2618. memory::write8_no_mmap_or_dirty_check(phys_addr, value);
  2619. };
  2620. Ok(())
  2621. }
  2622. pub unsafe fn safe_write16(addr: i32, value: i32) -> OrPageFault<()> {
  2623. let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
  2624. if addr & 0xFFF == 0xFFF {
  2625. virt_boundary_write16(phys_addr, translate_address_write(addr + 1)?, value);
  2626. }
  2627. else if in_mapped_range(phys_addr) {
  2628. memory::mmap_write16(phys_addr, value);
  2629. }
  2630. else {
  2631. if !can_skip_dirty_page {
  2632. jit::jit_dirty_page(jit::get_jit_state(), Page::page_of(phys_addr));
  2633. }
  2634. else {
  2635. dbg_assert!(!jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
  2636. }
  2637. memory::write16_no_mmap_or_dirty_check(phys_addr, value);
  2638. };
  2639. Ok(())
  2640. }
  2641. pub unsafe fn safe_write32(addr: i32, value: i32) -> OrPageFault<()> {
  2642. let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
  2643. if addr & 0xFFF > 0x1000 - 4 {
  2644. virt_boundary_write32(
  2645. phys_addr,
  2646. translate_address_write(addr + 3 & !3)? | (addr as u32 + 3 & 3),
  2647. value,
  2648. );
  2649. }
  2650. else if in_mapped_range(phys_addr) {
  2651. memory::mmap_write32(phys_addr, value);
  2652. }
  2653. else {
  2654. if !can_skip_dirty_page {
  2655. jit::jit_dirty_page(jit::get_jit_state(), Page::page_of(phys_addr));
  2656. }
  2657. else {
  2658. dbg_assert!(!jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
  2659. }
  2660. memory::write32_no_mmap_or_dirty_check(phys_addr, value);
  2661. };
  2662. Ok(())
  2663. }
  2664. pub unsafe fn safe_write64(addr: i32, value: u64) -> OrPageFault<()> {
  2665. if addr & 0xFFF > 0x1000 - 8 {
  2666. writable_or_pagefault(addr, 8)?;
  2667. safe_write32(addr, value as i32).unwrap();
  2668. safe_write32(addr + 4, (value >> 32) as i32).unwrap();
  2669. }
  2670. else {
  2671. let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
  2672. if in_mapped_range(phys_addr) {
  2673. memory::mmap_write64(phys_addr, value as i32, (value >> 32) as i32);
  2674. }
  2675. else {
  2676. if !can_skip_dirty_page {
  2677. jit::jit_dirty_page(jit::get_jit_state(), Page::page_of(phys_addr));
  2678. }
  2679. else {
  2680. dbg_assert!(!jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
  2681. }
  2682. memory::write64_no_mmap_or_dirty_check(phys_addr, value);
  2683. }
  2684. };
  2685. Ok(())
  2686. }
  2687. pub unsafe fn safe_write128(addr: i32, value: reg128) -> OrPageFault<()> {
  2688. if addr & 0xFFF > 0x1000 - 16 {
  2689. writable_or_pagefault(addr, 16)?;
  2690. safe_write64(addr, value.u64_0[0]).unwrap();
  2691. safe_write64(addr + 8, value.u64_0[1]).unwrap();
  2692. }
  2693. else {
  2694. let (phys_addr, can_skip_dirty_page) = translate_address_write_and_can_skip_dirty(addr)?;
  2695. if in_mapped_range(phys_addr) {
  2696. memory::mmap_write128(
  2697. phys_addr,
  2698. value.i32_0[0],
  2699. value.i32_0[1],
  2700. value.i32_0[2],
  2701. value.i32_0[3],
  2702. );
  2703. }
  2704. else {
  2705. if !can_skip_dirty_page {
  2706. jit::jit_dirty_page(jit::get_jit_state(), Page::page_of(phys_addr));
  2707. }
  2708. else {
  2709. dbg_assert!(!jit::jit_page_has_code(Page::page_of(phys_addr as u32)));
  2710. }
  2711. memory::write128_no_mmap_or_dirty_check(phys_addr, value);
  2712. }
  2713. };
  2714. Ok(())
  2715. }
  2716. fn get_reg8_index(index: i32) -> i32 { return index << 2 & 12 | index >> 2 & 1; }
  2717. pub unsafe fn read_reg8(index: i32) -> i32 {
  2718. dbg_assert!(index >= 0 && index < 8);
  2719. return *reg8.offset(get_reg8_index(index) as isize) as i32;
  2720. }
  2721. pub unsafe fn write_reg8(index: i32, value: i32) {
  2722. dbg_assert!(index >= 0 && index < 8);
  2723. *reg8.offset(get_reg8_index(index) as isize) = value as u8;
  2724. }
  2725. fn get_reg16_index(index: i32) -> i32 { return index << 1; }
  2726. pub unsafe fn read_reg16(index: i32) -> i32 {
  2727. dbg_assert!(index >= 0 && index < 8);
  2728. return *reg16.offset(get_reg16_index(index) as isize) as i32;
  2729. }
  2730. pub unsafe fn write_reg16(index: i32, value: i32) {
  2731. dbg_assert!(index >= 0 && index < 8);
  2732. *reg16.offset(get_reg16_index(index) as isize) = value as u16;
  2733. }
  2734. pub unsafe fn read_reg32(index: i32) -> i32 {
  2735. dbg_assert!(index >= 0 && index < 8);
  2736. *reg32.offset(index as isize)
  2737. }
  2738. pub unsafe fn write_reg32(index: i32, value: i32) {
  2739. dbg_assert!(index >= 0 && index < 8);
  2740. *reg32.offset(index as isize) = value;
  2741. }
  2742. pub unsafe fn read_mmx32s(r: i32) -> i32 { (*fpu_st.offset(r as isize)).mantissa as i32 }
  2743. pub unsafe fn read_mmx64s(r: i32) -> u64 { (*fpu_st.offset(r as isize)).mantissa }
  2744. pub unsafe fn write_mmx_reg64(r: i32, data: u64) { (*fpu_st.offset(r as isize)).mantissa = data; }
  2745. pub unsafe fn read_xmm_f32(r: i32) -> f32 { return (*reg_xmm.offset(r as isize)).f32_0[0]; }
  2746. pub unsafe fn read_xmm32(r: i32) -> i32 { return (*reg_xmm.offset(r as isize)).u32_0[0] as i32; }
  2747. pub unsafe fn read_xmm64s(r: i32) -> u64 { (*reg_xmm.offset(r as isize)).u64_0[0] }
  2748. pub unsafe fn read_xmm128s(r: i32) -> reg128 { return *reg_xmm.offset(r as isize); }
  2749. pub unsafe fn write_xmm_f32(r: i32, data: f32) { (*reg_xmm.offset(r as isize)).f32_0[0] = data; }
  2750. pub unsafe fn write_xmm32(r: i32, data: i32) { (*reg_xmm.offset(r as isize)).i32_0[0] = data; }
  2751. pub unsafe fn write_xmm64(r: i32, data: u64) { (*reg_xmm.offset(r as isize)).u64_0[0] = data }
  2752. pub unsafe fn write_xmm_f64(r: i32, data: f64) { (*reg_xmm.offset(r as isize)).f64_0[0] = data }
  2753. pub unsafe fn write_xmm128(r: i32, i0: i32, i1: i32, i2: i32, i3: i32) {
  2754. let x = reg128 {
  2755. u32_0: [i0 as u32, i1 as u32, i2 as u32, i3 as u32],
  2756. };
  2757. *reg_xmm.offset(r as isize) = x;
  2758. }
  2759. pub unsafe fn write_xmm128_2(r: i32, i0: u64, i1: u64) {
  2760. *reg_xmm.offset(r as isize) = reg128 { u64_0: [i0, i1] };
  2761. }
  2762. pub unsafe fn write_xmm_reg128(r: i32, data: reg128) { *reg_xmm.offset(r as isize) = data; }
  2763. /// Set the fpu tag word to valid and the top-of-stack to 0 on mmx instructions
  2764. pub fn transition_fpu_to_mmx() {
  2765. unsafe {
  2766. fpu_set_tag_word(0);
  2767. *fpu_stack_ptr = 0;
  2768. }
  2769. }
  2770. pub unsafe fn task_switch_test() -> bool {
  2771. if 0 != *cr & (CR0_EM | CR0_TS) {
  2772. trigger_nm();
  2773. return false;
  2774. }
  2775. else {
  2776. return true;
  2777. };
  2778. }
  2779. pub unsafe fn set_mxcsr(new_mxcsr: i32) {
  2780. dbg_assert!(new_mxcsr & !MXCSR_MASK == 0); // checked by caller
  2781. if *mxcsr & MXCSR_DAZ == 0 && new_mxcsr & MXCSR_DAZ != 0 {
  2782. dbg_log!("Warning: Unimplemented MXCSR bit: Denormals Are Zero")
  2783. }
  2784. if *mxcsr & MXCSR_FZ == 0 && new_mxcsr & MXCSR_FZ != 0 {
  2785. dbg_log!("Warning: Unimplemented MXCSR bit: Flush To Zero")
  2786. }
  2787. let rounding_mode = new_mxcsr >> MXCSR_RC_SHIFT & 3;
  2788. if *mxcsr >> MXCSR_RC_SHIFT & 3 == 0 && rounding_mode != 0 {
  2789. dbg_log!(
  2790. "Warning: Unimplemented MXCSR rounding mode: {}",
  2791. rounding_mode
  2792. )
  2793. }
  2794. let exception_mask = new_mxcsr >> 7 & 0b111111;
  2795. if exception_mask != 0b111111 {
  2796. dbg_log!(
  2797. "Warning: Unimplemented MXCSR exception mask: 0b{:b}",
  2798. exception_mask
  2799. )
  2800. }
  2801. *mxcsr = new_mxcsr;
  2802. }
  2803. #[no_mangle]
  2804. pub unsafe fn task_switch_test_jit() {
  2805. let did_fault = !task_switch_test();
  2806. dbg_assert!(did_fault);
  2807. }
  2808. pub unsafe fn task_switch_test_mmx() -> bool {
  2809. if *cr.offset(4) & CR4_OSFXSR == 0 {
  2810. dbg_log!("Warning: Unimplemented task switch test with cr4.osfxsr=0");
  2811. }
  2812. if 0 != *cr & CR0_EM {
  2813. trigger_ud();
  2814. return false;
  2815. }
  2816. else if 0 != *cr & CR0_TS {
  2817. trigger_nm();
  2818. return false;
  2819. }
  2820. else {
  2821. return true;
  2822. };
  2823. }
  2824. #[no_mangle]
  2825. pub unsafe fn task_switch_test_mmx_jit() {
  2826. let did_fault = !task_switch_test_mmx();
  2827. dbg_assert!(did_fault);
  2828. }
  2829. pub unsafe fn read_moffs() -> OrPageFault<i32> {
  2830. // read 2 or 4 byte from ip, depending on address size attribute
  2831. if is_asize_32() { read_imm32s() } else { read_imm16() }
  2832. }
  2833. #[no_mangle]
  2834. pub unsafe fn get_real_eip() -> i32 {
  2835. // Returns the 'real' instruction pointer, without segment offset
  2836. return *instruction_pointer - get_seg_cs();
  2837. }
  2838. pub unsafe fn get_stack_reg() -> i32 {
  2839. if *stack_size_32 {
  2840. return read_reg32(ESP);
  2841. }
  2842. else {
  2843. return read_reg16(SP);
  2844. };
  2845. }
  2846. #[no_mangle]
  2847. pub unsafe fn set_stack_reg(value: i32) {
  2848. if *stack_size_32 {
  2849. write_reg32(ESP, value)
  2850. }
  2851. else {
  2852. write_reg16(SP, value)
  2853. };
  2854. }
  2855. pub unsafe fn get_reg_asize(reg: i32) -> i32 {
  2856. dbg_assert!(reg == ECX || reg == ESI || reg == EDI);
  2857. let r = read_reg32(reg);
  2858. if is_asize_32() {
  2859. return r;
  2860. }
  2861. else {
  2862. return r & 0xFFFF;
  2863. };
  2864. }
  2865. pub unsafe fn set_reg_asize(is_asize_32: bool, reg: i32, value: i32) {
  2866. dbg_assert!(reg == ECX || reg == ESI || reg == EDI);
  2867. if is_asize_32 {
  2868. write_reg32(reg, value)
  2869. }
  2870. else {
  2871. write_reg16(reg, value)
  2872. };
  2873. }
  2874. pub unsafe fn decr_ecx_asize(is_asize_32: bool) -> i32 {
  2875. return if is_asize_32 {
  2876. write_reg32(ECX, read_reg32(ECX) - 1);
  2877. read_reg32(ECX)
  2878. }
  2879. else {
  2880. write_reg16(CX, read_reg16(CX) - 1);
  2881. read_reg16(CX)
  2882. };
  2883. }
  2884. #[no_mangle]
  2885. pub unsafe fn set_tsc(low: u32, high: u32) {
  2886. let new_value = low as u64 | (high as u64) << 32;
  2887. let current_value = read_tsc();
  2888. tsc_offset = current_value.wrapping_sub(new_value);
  2889. }
  2890. #[no_mangle]
  2891. pub unsafe fn read_tsc() -> u64 {
  2892. let n = microtick() * TSC_RATE;
  2893. let value = (n as u64).wrapping_sub(tsc_offset);
  2894. if true {
  2895. return value;
  2896. }
  2897. else {
  2898. if value == rdtsc_last_value {
  2899. // don't go past 1ms
  2900. if (rdtsc_imprecision_offset as f64) < TSC_RATE {
  2901. rdtsc_imprecision_offset = rdtsc_imprecision_offset.wrapping_add(1)
  2902. }
  2903. }
  2904. else {
  2905. let previous_value = rdtsc_last_value.wrapping_add(rdtsc_imprecision_offset);
  2906. if previous_value <= value {
  2907. rdtsc_last_value = value;
  2908. rdtsc_imprecision_offset = 0
  2909. }
  2910. else {
  2911. dbg_log!(
  2912. "XXX: Overshot tsc prev={:x}:{:x} offset={:x}:{:x} curr={:x}:{:x}",
  2913. (rdtsc_last_value >> 32) as u32 as i32,
  2914. rdtsc_last_value as u32 as i32,
  2915. (rdtsc_imprecision_offset >> 32) as u32 as i32,
  2916. rdtsc_imprecision_offset as u32 as i32,
  2917. (value >> 32) as u32 as i32,
  2918. value as u32 as i32
  2919. );
  2920. dbg_assert!(false);
  2921. // Keep current value until time catches up
  2922. }
  2923. }
  2924. return rdtsc_last_value.wrapping_add(rdtsc_imprecision_offset);
  2925. };
  2926. }
  2927. #[no_mangle]
  2928. pub unsafe fn vm86_mode() -> bool { return *flags & FLAG_VM == FLAG_VM; }
  2929. #[no_mangle]
  2930. pub unsafe fn getiopl() -> i32 { return *flags >> 12 & 3; }
  2931. #[no_mangle]
  2932. pub unsafe fn get_opstats_buffer(
  2933. compiled: bool,
  2934. jit_exit: bool,
  2935. unguarded_register: bool,
  2936. wasm_size: bool,
  2937. opcode: u8,
  2938. is_0f: bool,
  2939. is_mem: bool,
  2940. fixed_g: u8,
  2941. ) -> u32 {
  2942. let index = (is_0f as u32) << 12 | (opcode as u32) << 4 | (is_mem as u32) << 3 | fixed_g as u32;
  2943. if compiled {
  2944. *opstats_compiled_buffer.offset(index as isize)
  2945. }
  2946. else if jit_exit {
  2947. *opstats_jit_exit_buffer.offset(index as isize)
  2948. }
  2949. else if unguarded_register {
  2950. *opstats_unguarded_register_buffer.offset(index as isize)
  2951. }
  2952. else if wasm_size {
  2953. *opstats_wasm_size.offset(index as isize)
  2954. }
  2955. else {
  2956. *opstats_buffer.offset(index as isize)
  2957. }
  2958. }
  2959. pub unsafe fn invlpg(addr: i32) {
  2960. let page = (addr as u32 >> 12) as i32;
  2961. // Note: Doesn't remove this page from valid_tlb_entries: This isn't
  2962. // necessary, because when valid_tlb_entries grows too large, it will be
  2963. // empties by calling clear_tlb, which removes this entry as it isn't global.
  2964. // This however means that valid_tlb_entries can contain some invalid entries
  2965. tlb_data[page as usize] = 0;
  2966. *last_virt_eip = -1;
  2967. }
  2968. #[no_mangle]
  2969. pub unsafe fn update_eflags(new_flags: i32) {
  2970. let mut dont_update: i32 = FLAG_RF | FLAG_VM | FLAG_VIP | FLAG_VIF;
  2971. let mut clear: i32 = !FLAG_VIP & !FLAG_VIF & FLAGS_MASK;
  2972. if 0 != *flags & FLAG_VM {
  2973. // other case needs to be handled in popf or iret
  2974. dbg_assert!(getiopl() == 3);
  2975. dont_update |= FLAG_IOPL;
  2976. // don't clear vip or vif
  2977. clear |= FLAG_VIP | FLAG_VIF
  2978. }
  2979. else {
  2980. if !*protected_mode {
  2981. dbg_assert!(*cpl == 0);
  2982. }
  2983. if 0 != *cpl {
  2984. // cpl > 0
  2985. // cannot update iopl
  2986. dont_update |= FLAG_IOPL;
  2987. if *cpl as i32 > getiopl() {
  2988. // cpl > iopl
  2989. // cannot update interrupt flag
  2990. dont_update |= FLAG_INTERRUPT
  2991. }
  2992. }
  2993. }
  2994. *flags = (new_flags ^ (*flags ^ new_flags) & dont_update) & clear | FLAGS_DEFAULT;
  2995. *flags_changed = 0;
  2996. if *flags & FLAG_TRAP != 0 {
  2997. dbg_log!("Not supported: trap flag");
  2998. }
  2999. *flags &= !FLAG_TRAP;
  3000. }
  3001. #[no_mangle]
  3002. pub unsafe fn get_valid_tlb_entries_count() -> i32 {
  3003. if !cfg!(feature = "profiler") {
  3004. return 0;
  3005. }
  3006. let mut result: i32 = 0;
  3007. for i in 0..valid_tlb_entries_count {
  3008. let page = valid_tlb_entries[i as usize];
  3009. let entry = tlb_data[page as usize];
  3010. if 0 != entry {
  3011. result += 1
  3012. }
  3013. }
  3014. return result;
  3015. }
  3016. #[no_mangle]
  3017. pub unsafe fn get_valid_global_tlb_entries_count() -> i32 {
  3018. if !cfg!(feature = "profiler") {
  3019. return 0;
  3020. }
  3021. let mut result: i32 = 0;
  3022. for i in 0..valid_tlb_entries_count {
  3023. let page = valid_tlb_entries[i as usize];
  3024. let entry = tlb_data[page as usize];
  3025. if 0 != entry & TLB_GLOBAL {
  3026. result += 1
  3027. }
  3028. }
  3029. return result;
  3030. }
  3031. pub unsafe fn translate_address_system_read(address: i32) -> OrPageFault<u32> {
  3032. let entry = tlb_data[(address as u32 >> 12) as usize];
  3033. if 0 != entry & TLB_VALID {
  3034. return Ok((entry & !0xFFF ^ address) as u32);
  3035. }
  3036. else {
  3037. return Ok((do_page_translation(address, false, false)? | address & 0xFFF) as u32);
  3038. };
  3039. }
  3040. pub unsafe fn translate_address_system_write(address: i32) -> OrPageFault<u32> {
  3041. let entry = tlb_data[(address as u32 >> 12) as usize];
  3042. if entry & (TLB_VALID | TLB_READONLY) == TLB_VALID {
  3043. return Ok((entry & !0xFFF ^ address) as u32);
  3044. }
  3045. else {
  3046. return Ok((do_page_translation(address, true, false)? | address & 0xFFF) as u32);
  3047. };
  3048. }
  3049. #[no_mangle]
  3050. pub unsafe fn trigger_np(code: i32) {
  3051. dbg_log!("#np");
  3052. *instruction_pointer = *previous_ip;
  3053. if DEBUG {
  3054. if cpu_exception_hook(CPU_EXCEPTION_NP) {
  3055. return;
  3056. }
  3057. }
  3058. call_interrupt_vector(CPU_EXCEPTION_NP, false, Some(code));
  3059. }
  3060. #[no_mangle]
  3061. pub unsafe fn trigger_ss(code: i32) {
  3062. dbg_log!("#ss");
  3063. *instruction_pointer = *previous_ip;
  3064. if DEBUG {
  3065. if cpu_exception_hook(CPU_EXCEPTION_SS) {
  3066. return;
  3067. }
  3068. }
  3069. call_interrupt_vector(CPU_EXCEPTION_SS, false, Some(code));
  3070. }
  3071. #[no_mangle]
  3072. pub unsafe fn store_current_tsc() { *current_tsc = read_tsc(); }
  3073. #[no_mangle]
  3074. pub unsafe fn handle_irqs() {
  3075. if *flags & FLAG_INTERRUPT != 0 {
  3076. pic_acknowledge()
  3077. }
  3078. }
  3079. #[no_mangle]
  3080. pub unsafe fn pic_call_irq(interrupt_nr: i32) {
  3081. *previous_ip = *instruction_pointer; // XXX: What if called after instruction (port IO)
  3082. call_interrupt_vector(interrupt_nr, false, None);
  3083. }