123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746 |
- use std::collections::{BTreeMap, HashMap, HashSet};
- use std::iter::FromIterator;
- use std::mem;
- use std::ptr::NonNull;
- use analysis::AnalysisType;
- use codegen;
- use cpu::cpu;
- use cpu::memory;
- use cpu_context::CpuContext;
- use global_pointers;
- use jit_instructions;
- use page::Page;
- use profiler;
- use profiler::stat;
- use state_flags::CachedStateFlags;
- use util::SafeToU16;
- use wasmgen::wasm_builder::{WasmBuilder, WasmLocal};
- mod unsafe_jit {
- extern "C" {
- pub fn codegen_finalize(
- wasm_table_index: u16,
- phys_addr: u32,
- end_addr: u32,
- first_opcode: u32,
- state_flags: u32,
- );
- pub fn jit_clear_func(wasm_table_index: u16);
- pub fn jit_clear_all_funcs();
- }
- }
- fn codegen_finalize(
- wasm_table_index: u16,
- phys_addr: u32,
- end_addr: u32,
- first_opcode: u32,
- state_flags: CachedStateFlags,
- ) {
- unsafe {
- unsafe_jit::codegen_finalize(
- wasm_table_index,
- phys_addr,
- end_addr,
- first_opcode,
- state_flags.to_u32(),
- )
- }
- }
- pub fn jit_clear_func(wasm_table_index: u16) {
- unsafe { unsafe_jit::jit_clear_func(wasm_table_index) }
- }
- pub fn jit_clear_all_funcs() { unsafe { unsafe_jit::jit_clear_all_funcs() } }
- pub const WASM_TABLE_SIZE: u32 = 900;
- pub const HASH_PRIME: u32 = 6151;
- pub const CHECK_JIT_CACHE_ARRAY_INVARIANTS: bool = false;
- pub const JIT_MAX_ITERATIONS_PER_FUNCTION: u32 = 10000;
- pub const JIT_ALWAYS_USE_LOOP_SAFETY: bool = true;
- pub const JIT_THRESHOLD: u32 = 200 * 1000;
- const CODE_CACHE_SEARCH_SIZE: u32 = 8;
- const MAX_INSTRUCTION_LENGTH: u32 = 16;
- #[allow(non_upper_case_globals)]
- static mut jit_state: NonNull<JitState> =
- unsafe { NonNull::new_unchecked(mem::align_of::<JitState>() as *mut _) };
- pub fn get_jit_state() -> &'static mut JitState { unsafe { jit_state.as_mut() } }
- #[no_mangle]
- pub fn rust_init() {
- let x = Box::new(JitState::create_and_initialise());
- unsafe {
- jit_state = NonNull::new(Box::into_raw(x)).unwrap()
- }
- use std::panic;
- panic::set_hook(Box::new(|panic_info| {
- console_log!("{}", panic_info.to_string());
- }));
- }
- mod jit_cache_array {
- use page::Page;
- use state_flags::CachedStateFlags;
- // Note: For performance reasons, this is global state. See jit_find_cache_entry
- const NO_NEXT_ENTRY: u32 = 0xffff_ffff;
- // When changing this, you also need to bump global-base
- pub const SIZE: u32 = 0x40000;
- pub const MASK: u32 = SIZE - 1;
- #[derive(Copy, Clone)]
- pub struct Entry {
- pub start_addr: u32,
- #[cfg(any(debug_assertions, feature = "profiler"))]
- pub len: u32,
- #[cfg(debug_assertions)]
- pub opcode: u32,
- // an index into jit_cache_array for the next code_cache entry within the same physical page
- next_index_same_page: u32,
- pub initial_state: u16,
- pub wasm_table_index: u16,
- pub state_flags: CachedStateFlags,
- pub pending: bool,
- }
- impl Entry {
- pub fn create(
- start_addr: u32,
- next_index_same_page: Option<u32>,
- wasm_table_index: u16,
- initial_state: u16,
- state_flags: CachedStateFlags,
- pending: bool,
- ) -> Entry {
- let next_index_same_page = next_index_same_page.unwrap_or(NO_NEXT_ENTRY);
- Entry {
- start_addr,
- next_index_same_page,
- wasm_table_index,
- initial_state,
- state_flags,
- pending,
- #[cfg(any(debug_assertions, feature = "profiler"))]
- len: 0,
- #[cfg(debug_assertions)]
- opcode: 0,
- }
- }
- pub fn next_index_same_page(&self) -> Option<u32> {
- if self.next_index_same_page == NO_NEXT_ENTRY {
- None
- }
- else {
- Some(self.next_index_same_page)
- }
- }
- pub fn set_next_index_same_page(&mut self, next_index: Option<u32>) {
- if let Some(i) = next_index {
- self.next_index_same_page = i
- }
- else {
- self.next_index_same_page = NO_NEXT_ENTRY
- }
- }
- }
- const DEFAULT_ENTRY: Entry = Entry {
- start_addr: 0,
- next_index_same_page: NO_NEXT_ENTRY,
- wasm_table_index: 0,
- initial_state: 0,
- state_flags: CachedStateFlags::EMPTY,
- pending: false,
- #[cfg(any(debug_assertions, feature = "profiler"))]
- len: 0,
- #[cfg(debug_assertions)]
- opcode: 0,
- };
- #[allow(non_upper_case_globals)]
- pub const jit_cache_array: *mut Entry = ::global_pointers::JIT_CACHE_ARRAY as *mut Entry;
- #[allow(unreachable_code)]
- #[cfg(debug_assertions)]
- unsafe fn _static_assert() { std::mem::transmute::<Entry, [u8; 24]>(panic!()); }
- #[allow(unreachable_code)]
- #[cfg(all(not(debug_assertions), not(feature = "profiler")))]
- unsafe fn _static_assert() { std::mem::transmute::<Entry, [u8; 16]>(panic!()); }
- // XXX: Probably doesn't need to be statically allocated
- #[allow(non_upper_case_globals)]
- pub const page_first_entry: *mut u32 = ::global_pointers::JIT_PAGE_FIRST_ENTRY as *mut u32;
- pub fn get_page_index(page: Page) -> Option<u32> {
- let index = unsafe { *page_first_entry.offset(page.to_u32() as isize) };
- if index == NO_NEXT_ENTRY { None } else { Some(index) }
- }
- pub fn set_page_index(page: Page, index: Option<u32>) {
- let index = index.unwrap_or(NO_NEXT_ENTRY);
- unsafe { *page_first_entry.offset(page.to_u32() as isize) = index }
- }
- pub fn get(i: u32) -> &'static Entry { unsafe { &*jit_cache_array.offset(i as isize) } }
- pub fn get_mut(i: u32) -> &'static mut Entry {
- unsafe { &mut *jit_cache_array.offset(i as isize) }
- }
- fn set(i: u32, entry: Entry) {
- unsafe {
- *jit_cache_array.offset(i as isize) = entry
- };
- }
- pub fn insert(index: u32, mut entry: Entry) {
- let page = Page::page_of(entry.start_addr);
- let previous_entry_index = get_page_index(page);
- if let Some(previous_entry_index) = previous_entry_index {
- let previous_entry = get(previous_entry_index);
- if previous_entry.start_addr != 0 {
- dbg_assert!(
- Page::page_of(previous_entry.start_addr) == Page::page_of(entry.start_addr)
- );
- }
- }
- set_page_index(page, Some(index));
- entry.set_next_index_same_page(previous_entry_index);
- set(index, entry);
- }
- pub fn remove(index: u32) {
- let page = Page::page_of((get(index)).start_addr);
- let mut page_index = get_page_index(page);
- let mut did_remove = false;
- if page_index == Some(index) {
- set_page_index(page, (get(index)).next_index_same_page());
- did_remove = true;
- }
- else {
- while let Some(page_index_ok) = page_index {
- let next_index = (get(page_index_ok)).next_index_same_page();
- if next_index == Some(index) {
- (get_mut(page_index_ok))
- .set_next_index_same_page((get(index)).next_index_same_page());
- did_remove = true;
- break;
- }
- page_index = next_index;
- }
- }
- (get_mut(index)).set_next_index_same_page(None);
- dbg_assert!(did_remove);
- }
- pub fn clear() {
- unsafe {
- for i in 0..SIZE {
- *jit_cache_array.offset(i as isize) = DEFAULT_ENTRY;
- }
- for i in 0..0x100000 {
- *page_first_entry.offset(i) = NO_NEXT_ENTRY;
- }
- }
- }
- pub fn check_invariants() {
- if !::jit::CHECK_JIT_CACHE_ARRAY_INVARIANTS {
- return;
- }
- // there are no loops in the linked lists
- // https://en.wikipedia.org/wiki/Cycle_detection#Floyd's_Tortoise_and_Hare
- for i in 0..(1 << 20) {
- let mut slow = get_page_index(Page::page_of(i << 12));
- let mut fast = slow;
- while let Some(fast_ok) = fast {
- fast = (get(fast_ok)).next_index_same_page();
- slow = (get(slow.unwrap())).next_index_same_page();
- if let Some(fast_ok) = fast {
- fast = (get(fast_ok)).next_index_same_page();
- }
- else {
- break;
- }
- dbg_assert!(slow != fast);
- }
- }
- let mut wasm_table_index_to_jit_cache_index = [0; ::jit::WASM_TABLE_SIZE as usize];
- for i in 0..SIZE {
- let entry = get(i);
- dbg_assert!(entry.next_index_same_page().map_or(true, |i| i < SIZE));
- if entry.pending {
- dbg_assert!(entry.start_addr != 0);
- dbg_assert!(entry.wasm_table_index != 0);
- }
- else {
- // an invalid entry has both its start_addr and wasm_table_index set to 0
- // neither start_addr nor wasm_table_index are 0 for any valid entry
- dbg_assert!((entry.start_addr == 0) == (entry.wasm_table_index == 0));
- }
- // having a next entry implies validity
- dbg_assert!(entry.next_index_same_page() == None || entry.start_addr != 0);
- // any valid wasm_table_index can only be used within a single page
- if entry.wasm_table_index != 0 {
- let j = wasm_table_index_to_jit_cache_index[entry.wasm_table_index as usize];
- if j != 0 {
- let other_entry = get(j);
- dbg_assert!(other_entry.wasm_table_index == entry.wasm_table_index);
- dbg_assert!(
- Page::page_of(other_entry.start_addr) == Page::page_of(entry.start_addr)
- );
- }
- else {
- wasm_table_index_to_jit_cache_index[entry.wasm_table_index as usize] = i as u32;
- }
- }
- if entry.start_addr != 0 {
- // valid entries can be reached from page_first_entry
- let mut reached = false;
- let page = Page::page_of(entry.start_addr);
- let mut cache_array_index = get_page_index(page);
- while let Some(index) = cache_array_index {
- let other_entry = get(index);
- if i as u32 == index {
- reached = true;
- break;
- }
- cache_array_index = other_entry.next_index_same_page();
- }
- dbg_assert!(reached);
- }
- }
- }
- }
- pub struct JitState {
- // as an alternative to HashSet, we could use a bitmap of 4096 bits here
- // (faster, but uses much more memory)
- // or a compressed bitmap (likely faster)
- hot_pages: [u32; HASH_PRIME as usize],
- wasm_table_index_free_list: Vec<u16>,
- wasm_table_index_pending_free: Vec<u16>,
- entry_points: HashMap<Page, HashSet<u16>>,
- wasm_builder: WasmBuilder,
- }
- impl JitState {
- pub fn create_and_initialise() -> JitState {
- jit_cache_array::clear();
- // don't assign 0 (XXX: Check)
- let wasm_table_indices = 1..=(WASM_TABLE_SIZE - 1) as u16;
- JitState {
- hot_pages: [0; HASH_PRIME as usize],
- wasm_table_index_free_list: Vec::from_iter(wasm_table_indices),
- wasm_table_index_pending_free: vec![],
- entry_points: HashMap::new(),
- wasm_builder: WasmBuilder::new(),
- }
- }
- }
- #[derive(PartialEq, Eq)]
- enum BasicBlockType {
- Normal {
- next_block_addr: u32,
- },
- ConditionalJump {
- next_block_addr: Option<u32>,
- next_block_branch_taken_addr: Option<u32>,
- condition: u8,
- jump_offset: i32,
- jump_offset_is_32: bool,
- },
- Exit,
- }
- struct BasicBlock {
- addr: u32,
- last_instruction_addr: u32,
- end_addr: u32,
- is_entry_block: bool,
- ty: BasicBlockType,
- has_sti: bool,
- number_of_instructions: u32,
- }
- #[repr(C)]
- #[derive(Copy, Clone, PartialEq)]
- pub struct cached_code {
- pub wasm_table_index: u16,
- pub initial_state: u16,
- }
- impl cached_code {
- pub const NONE: cached_code = cached_code {
- wasm_table_index: 0,
- initial_state: 0,
- };
- }
- pub struct JitContext<'a> {
- pub cpu: &'a mut CpuContext,
- pub builder: &'a mut WasmBuilder,
- pub register_locals: &'a mut Vec<WasmLocal>,
- pub start_of_current_instruction: u32,
- pub current_brtable_depth: u32,
- pub our_wasm_table_index: u16,
- pub basic_block_index_local: &'a WasmLocal,
- pub state_flags: CachedStateFlags,
- }
- pub const JIT_INSTR_BLOCK_BOUNDARY_FLAG: u32 = 1 << 0;
- fn jit_hot_hash_page(page: Page) -> u32 { page.to_u32() % HASH_PRIME }
- fn is_near_end_of_page(address: u32) -> bool { address & 0xFFF >= 0x1000 - MAX_INSTRUCTION_LENGTH }
- pub fn jit_find_cache_entry(phys_address: u32, state_flags: CachedStateFlags) -> cached_code {
- if is_near_end_of_page(phys_address) {
- profiler::stat_increment(stat::RUN_INTERPRETED_NEAR_END_OF_PAGE);
- }
- let mut run_interpreted_reason = None;
- for i in 0..CODE_CACHE_SEARCH_SIZE {
- let index = (phys_address + i) & jit_cache_array::MASK;
- let entry = jit_cache_array::get(index);
- if entry.start_addr == phys_address {
- if entry.pending {
- run_interpreted_reason = Some(stat::RUN_INTERPRETED_PENDING)
- }
- if entry.state_flags != state_flags {
- run_interpreted_reason = Some(stat::RUN_INTERPRETED_DIFFERENT_STATE)
- }
- }
- if is_near_end_of_page(phys_address) {
- dbg_assert!(entry.start_addr != phys_address);
- }
- if !entry.pending && entry.start_addr == phys_address && entry.state_flags == state_flags {
- #[cfg(debug_assertions)] // entry.opcode is not defined otherwise
- {
- dbg_assert!(memory::read32s(entry.start_addr) as u32 == entry.opcode);
- }
- return cached_code {
- wasm_table_index: entry.wasm_table_index,
- initial_state: entry.initial_state,
- };
- }
- }
- if let Some(reason) = run_interpreted_reason {
- profiler::stat_increment(reason);
- }
- cached_code::NONE
- }
- #[no_mangle]
- pub fn jit_find_cache_entry_in_page(
- virt_eip: i32,
- phys_eip: u32,
- wasm_table_index: u16,
- state_flags: u32,
- ) -> i32 {
- let state_flags = CachedStateFlags::of_u32(state_flags);
- let phys_address = virt_eip as u32 & 0xFFF | phys_eip & !0xFFF;
- for i in 0..CODE_CACHE_SEARCH_SIZE {
- let index = (phys_address + i) & jit_cache_array::MASK;
- let entry = jit_cache_array::get(index);
- if is_near_end_of_page(phys_address) {
- dbg_assert!(entry.start_addr != phys_address);
- }
- if !entry.pending
- && entry.start_addr == phys_address
- && entry.state_flags == state_flags
- && entry.wasm_table_index == wasm_table_index
- {
- #[cfg(debug_assertions)] // entry.opcode is not defined otherwise
- {
- dbg_assert!(memory::read32s(entry.start_addr) as u32 == entry.opcode);
- }
- if false {
- dbg_log!(
- "jit_find_cache_entry_in_page hit {:x} {:x}",
- virt_eip as u32,
- phys_eip,
- );
- }
- return entry.initial_state as i32;
- }
- }
- if false {
- dbg_log!(
- "jit_find_cache_entry_in_page miss {:x} {:x}",
- virt_eip as u32,
- phys_eip,
- );
- }
- return -1;
- }
- pub fn record_entry_point(phys_address: u32) {
- let ctx = get_jit_state();
- if is_near_end_of_page(phys_address) {
- return;
- }
- let page = Page::page_of(phys_address);
- let offset_in_page = phys_address as u16 & 0xFFF;
- let mut is_new = false;
- ctx.entry_points
- .entry(page)
- .or_insert_with(|| {
- is_new = true;
- HashSet::new()
- })
- .insert(offset_in_page);
- if is_new {
- cpu::tlb_set_has_code(page, true);
- }
- }
- fn jit_find_basic_blocks(
- page: Page,
- entry_points: &HashSet<u16>,
- cpu: CpuContext,
- ) -> (Vec<BasicBlock>, bool) {
- let mut to_visit_stack: Vec<u16> = entry_points.iter().cloned().collect();
- let mut marked_as_entry: HashSet<u16> = entry_points.clone();
- let page_high_bits = page.to_address();
- let mut basic_blocks: BTreeMap<u32, BasicBlock> = BTreeMap::new();
- let mut requires_loop_limit = false;
- while let Some(to_visit_offset) = to_visit_stack.pop() {
- let to_visit = to_visit_offset as u32 | page_high_bits;
- if basic_blocks.contains_key(&to_visit) {
- continue;
- }
- if is_near_end_of_page(to_visit) {
- // Empty basic block, don't insert
- profiler::stat_increment(stat::COMPILE_CUT_OFF_AT_END_OF_PAGE);
- continue;
- }
- let mut current_address = to_visit;
- let mut current_block = BasicBlock {
- addr: current_address,
- last_instruction_addr: 0,
- end_addr: 0,
- ty: BasicBlockType::Exit,
- is_entry_block: false,
- has_sti: false,
- number_of_instructions: 0,
- };
- loop {
- let addr_before_instruction = current_address;
- let mut ctx = &mut CpuContext {
- eip: current_address,
- ..cpu
- };
- let analysis = ::analysis::analyze_step(&mut ctx);
- current_block.number_of_instructions += 1;
- let has_next_instruction = !analysis.no_next_instruction;
- current_address = ctx.eip;
- match analysis.ty {
- AnalysisType::Normal | AnalysisType::STI => {
- dbg_assert!(has_next_instruction);
- if current_block.has_sti {
- // Convert next instruction after STI (i.e., the current instruction) into block boundary
- marked_as_entry.insert(current_address as u16 & 0xFFF);
- to_visit_stack.push(current_address as u16 & 0xFFF);
- current_block.last_instruction_addr = addr_before_instruction;
- current_block.end_addr = current_address;
- break;
- }
- if analysis.ty == AnalysisType::STI {
- current_block.has_sti = true;
- dbg_assert!(
- !is_near_end_of_page(current_address),
- "TODO: Handle STI instruction near end of page"
- );
- }
- else {
- // Only split non-STI blocks (one instruction needs to run after STI before
- // handle_irqs may be called)
- if basic_blocks.contains_key(¤t_address) {
- current_block.last_instruction_addr = addr_before_instruction;
- current_block.end_addr = current_address;
- dbg_assert!(!is_near_end_of_page(current_address));
- current_block.ty = BasicBlockType::Normal {
- next_block_addr: current_address,
- };
- break;
- }
- }
- },
- AnalysisType::Jump {
- offset,
- is_32,
- condition: Some(condition),
- } => {
- // conditional jump: continue at next and continue at jump target
- let jump_target = if is_32 {
- current_address.wrapping_add(offset as u32)
- }
- else {
- ctx.cs_offset.wrapping_add(
- (current_address
- .wrapping_sub(ctx.cs_offset)
- .wrapping_add(offset as u32))
- & 0xFFFF,
- )
- };
- dbg_assert!(has_next_instruction);
- to_visit_stack.push(current_address as u16 & 0xFFF);
- let next_block_branch_taken_addr;
- if Page::page_of(jump_target) == page && !is_near_end_of_page(jump_target) {
- to_visit_stack.push(jump_target as u16 & 0xFFF);
- next_block_branch_taken_addr = Some(jump_target);
- // Very simple heuristic for "infinite loops": This
- // detects Linux's "calibrating delay loop"
- if jump_target == current_block.addr {
- dbg_log!("Basic block looping back to front");
- requires_loop_limit = true;
- }
- }
- else {
- next_block_branch_taken_addr = None;
- }
- let next_block_addr = if is_near_end_of_page(current_address) {
- None
- }
- else {
- Some(current_address)
- };
- current_block.ty = BasicBlockType::ConditionalJump {
- next_block_addr,
- next_block_branch_taken_addr,
- condition,
- jump_offset: offset,
- jump_offset_is_32: is_32,
- };
- current_block.last_instruction_addr = addr_before_instruction;
- current_block.end_addr = current_address;
- break;
- },
- AnalysisType::Jump {
- offset,
- is_32,
- condition: None,
- } => {
- // non-conditional jump: continue at jump target
- let jump_target = if is_32 {
- current_address.wrapping_add(offset as u32)
- }
- else {
- ctx.cs_offset.wrapping_add(
- (current_address
- .wrapping_sub(ctx.cs_offset)
- .wrapping_add(offset as u32))
- & 0xFFFF,
- )
- };
- if has_next_instruction {
- // Execution will eventually come back to the next instruction (CALL)
- marked_as_entry.insert(current_address as u16 & 0xFFF);
- to_visit_stack.push(current_address as u16 & 0xFFF);
- }
- if Page::page_of(jump_target) == page && !is_near_end_of_page(jump_target) {
- current_block.ty = BasicBlockType::Normal {
- next_block_addr: jump_target,
- };
- to_visit_stack.push(jump_target as u16 & 0xFFF);
- }
- else {
- current_block.ty = BasicBlockType::Exit;
- }
- current_block.last_instruction_addr = addr_before_instruction;
- current_block.end_addr = current_address;
- break;
- },
- AnalysisType::BlockBoundary => {
- // a block boundary but not a jump, get out
- if has_next_instruction {
- // block boundary, but execution will eventually come back
- // to the next instruction. Create a new basic block
- // starting at the next instruction and register it as an
- // entry point
- marked_as_entry.insert(current_address as u16 & 0xFFF);
- to_visit_stack.push(current_address as u16 & 0xFFF);
- }
- current_block.last_instruction_addr = addr_before_instruction;
- current_block.end_addr = current_address;
- break;
- },
- }
- if is_near_end_of_page(current_address) {
- current_block.last_instruction_addr = addr_before_instruction;
- current_block.end_addr = current_address;
- profiler::stat_increment(stat::COMPILE_CUT_OFF_AT_END_OF_PAGE);
- break;
- }
- }
- let previous_block = basic_blocks
- .range(..current_block.addr)
- .next_back()
- .filter(|(_, previous_block)| (!previous_block.has_sti))
- .map(|(_, previous_block)| (previous_block.addr, previous_block.end_addr));
- if let Some((start_addr, end_addr)) = previous_block {
- if current_block.addr < end_addr {
- // If this block overlaps with the previous block, re-analyze the previous block
- let old_block = basic_blocks.remove(&start_addr);
- dbg_assert!(old_block.is_some());
- to_visit_stack.push(start_addr as u16 & 0xFFF);
- // Note that this does not ensure the invariant that two consecutive blocks don't
- // overlay. For that, we also need to check the following block.
- }
- }
- dbg_assert!(current_block.addr < current_block.end_addr);
- dbg_assert!(current_block.addr <= current_block.last_instruction_addr);
- dbg_assert!(current_block.last_instruction_addr < current_block.end_addr);
- basic_blocks.insert(current_block.addr, current_block);
- }
- for block in basic_blocks.values_mut() {
- if marked_as_entry.contains(&(block.addr as u16 & 0xFFF)) {
- block.is_entry_block = true;
- }
- }
- let basic_blocks: Vec<BasicBlock> = basic_blocks.into_iter().map(|(_, block)| block).collect();
- for i in 0..basic_blocks.len() - 1 {
- let next_block_addr = basic_blocks[i + 1].addr;
- let next_block_end_addr = basic_blocks[i + 1].end_addr;
- let next_block_is_entry = basic_blocks[i + 1].is_entry_block;
- let block = &basic_blocks[i];
- dbg_assert!(block.addr < next_block_addr);
- if next_block_addr < block.end_addr {
- dbg_log!(
- "Overlapping first=[from={:x} to={:x} is_entry={}] second=[from={:x} to={:x} is_entry={}]",
- block.addr,
- block.end_addr,
- block.is_entry_block as u8,
- next_block_addr,
- next_block_end_addr,
- next_block_is_entry as u8
- );
- }
- }
- (basic_blocks, requires_loop_limit)
- }
- fn create_cache_entry(ctx: &mut JitState, entry: jit_cache_array::Entry) {
- let mut found_entry_index = None;
- let phys_addr = entry.start_addr;
- for i in 0..CODE_CACHE_SEARCH_SIZE {
- let addr_index = (phys_addr + i) & jit_cache_array::MASK;
- let existing_entry = jit_cache_array::get(addr_index);
- if existing_entry.start_addr == entry.start_addr
- && existing_entry.state_flags == entry.state_flags
- {
- profiler::stat_increment(stat::COMPILE_DUPLICATE_ENTRY);
- }
- if existing_entry.start_addr == 0 {
- found_entry_index = Some(addr_index);
- break;
- }
- }
- let found_entry_index = match found_entry_index {
- Some(i) => i,
- None => {
- profiler::stat_increment(stat::CACHE_MISMATCH);
- // no free slots, overwrite the first one
- let found_entry_index = phys_addr & jit_cache_array::MASK;
- let old_entry = jit_cache_array::get_mut(found_entry_index);
- // if we're here, we expect to overwrite a valid index
- dbg_assert!(old_entry.start_addr != 0);
- dbg_assert!(old_entry.wasm_table_index != 0);
- if old_entry.wasm_table_index == entry.wasm_table_index {
- profiler::stat_increment(stat::INVALIDATE_SINGLE_ENTRY_CACHE_FULL);
- dbg_assert!(old_entry.pending);
- dbg_assert!(Page::page_of(old_entry.start_addr) == Page::page_of(phys_addr));
- // The old entry belongs to the same wasm table index as this entry.
- // *Don't* free the wasm table index, instead just delete the old entry
- // and use its slot for this entry.
- // TODO: Optimally, we should pick another slot instead of dropping
- // an entry has just been created.
- jit_cache_array::remove(found_entry_index);
- dbg_assert!(old_entry.next_index_same_page() == None);
- old_entry.pending = false;
- old_entry.start_addr = 0;
- }
- else {
- profiler::stat_increment(stat::INVALIDATE_MODULE_CACHE_FULL);
- let old_wasm_table_index = old_entry.wasm_table_index;
- let old_page = Page::page_of(old_entry.start_addr);
- remove_jit_cache_wasm_index(ctx, old_page, old_wasm_table_index);
- //jit_cache_array::check_invariants();
- // old entry should be removed after calling remove_jit_cache_wasm_index
- dbg_assert!(!old_entry.pending);
- dbg_assert!(old_entry.start_addr == 0);
- dbg_assert!(old_entry.wasm_table_index == 0);
- dbg_assert!(old_entry.next_index_same_page() == None);
- }
- found_entry_index
- },
- };
- jit_cache_array::insert(found_entry_index, entry);
- }
- #[no_mangle]
- #[cfg(debug_assertions)]
- pub fn jit_force_generate_unsafe(phys_addr: u32) {
- let ctx = get_jit_state();
- record_entry_point(phys_addr);
- let cs_offset = cpu::get_seg_cs() as u32;
- let state_flags = cpu::pack_current_state_flags();
- jit_analyze_and_generate(ctx, Page::page_of(phys_addr), cs_offset, state_flags);
- }
- #[inline(never)]
- fn jit_analyze_and_generate(
- ctx: &mut JitState,
- page: Page,
- cs_offset: u32,
- state_flags: CachedStateFlags,
- ) {
- if jit_page_has_pending_code(ctx, page) {
- return;
- }
- let entry_points = ctx.entry_points.remove(&page);
- if let Some(entry_points) = entry_points {
- dbg_log!("Compile code for page at {:x}", page.to_address());
- profiler::stat_increment(stat::COMPILE);
- let cpu = CpuContext {
- eip: 0,
- prefixes: 0,
- cs_offset,
- state_flags,
- };
- let (basic_blocks, requires_loop_limit) =
- jit_find_basic_blocks(page, &entry_points, cpu.clone());
- //for b in basic_blocks.iter() {
- // dbg_log!(
- // "> Basic block from {:x} to {:x}, is_entry={}",
- // b.addr,
- // b.end_addr,
- // b.is_entry_block
- // );
- //}
- if ctx.wasm_table_index_free_list.is_empty() {
- dbg_log!(
- "wasm_table_index_free_list empty ({} pending_free), clearing cache",
- ctx.wasm_table_index_pending_free.len(),
- );
- // When no free slots are available, delete all cached modules. We could increase the
- // size of the table, but this way the initial size acts as an upper bound for the
- // number of wasm modules that we generate, which we want anyway to avoid getting our
- // tab killed by browsers due to memory constraints.
- jit_clear_cache(ctx);
- profiler::stat_increment(stat::INVALIDATE_ALL_MODULES_NO_FREE_WASM_INDICES);
- dbg_log!(
- "after jit_clear_cache: {} pending_free {} free",
- ctx.wasm_table_index_pending_free.len(),
- ctx.wasm_table_index_free_list.len(),
- );
- // This assertion can fail if all entries are pending (not possible unless
- // WASM_TABLE_SIZE is set very low)
- dbg_assert!(!ctx.wasm_table_index_free_list.is_empty());
- }
- // allocate an index in the wasm table
- let wasm_table_index = ctx
- .wasm_table_index_free_list
- .pop()
- .expect("allocate wasm table index");
- dbg_assert!(wasm_table_index != 0);
- jit_generate_module(
- &basic_blocks,
- requires_loop_limit,
- cpu.clone(),
- &mut ctx.wasm_builder,
- wasm_table_index,
- state_flags,
- );
- // create entries for each basic block that is marked as an entry point
- let mut entry_point_count = 0;
- for (i, block) in basic_blocks.iter().enumerate() {
- profiler::stat_increment(stat::COMPILE_BASIC_BLOCK);
- if block.is_entry_block && block.addr != block.end_addr {
- dbg_assert!(block.addr != 0);
- let initial_state = i.safe_to_u16();
- #[allow(unused_mut)]
- let mut entry = jit_cache_array::Entry::create(
- block.addr,
- None, // to be filled in by create_cache_entry
- wasm_table_index,
- initial_state,
- state_flags,
- true,
- );
- #[cfg(any(debug_assertions, feature = "profiler"))]
- {
- entry.len = block.end_addr - block.addr;
- }
- #[cfg(debug_assertions)]
- {
- entry.opcode = memory::read32s(block.addr) as u32;
- }
- create_cache_entry(ctx, entry);
- entry_point_count += 1;
- profiler::stat_increment(stat::COMPILE_ENTRY_POINT);
- }
- }
- profiler::stat_increment_by(stat::COMPILE_WASM_TOTAL_BYTES, jit_get_op_len() as u64);
- dbg_assert!(entry_point_count > 0);
- cpu::tlb_set_has_code(page, true);
- jit_cache_array::check_invariants();
- cpu::check_tlb_invariants();
- let end_addr = 0;
- let first_opcode = 0;
- let phys_addr = page.to_address();
- // will call codegen_finalize_finished asynchronously when finished
- codegen_finalize(
- wasm_table_index,
- phys_addr,
- end_addr,
- first_opcode,
- state_flags,
- );
- profiler::stat_increment(stat::COMPILE_SUCCESS);
- }
- else {
- //dbg_log("No basic blocks, not generating code");
- // Nothing to do
- }
- }
- #[no_mangle]
- pub fn codegen_finalize_finished(
- wasm_table_index: u16,
- phys_addr: u32,
- _end_addr: u32,
- _first_opcode: u32,
- _state_flags: CachedStateFlags,
- ) {
- let ctx = get_jit_state();
- dbg_assert!(wasm_table_index != 0);
- match ctx
- .wasm_table_index_pending_free
- .iter()
- .position(|i| *i == wasm_table_index)
- {
- Some(i) => {
- ctx.wasm_table_index_pending_free.swap_remove(i);
- free_wasm_table_index(ctx, wasm_table_index);
- },
- None => {
- let page = Page::page_of(phys_addr);
- let mut cache_array_index = jit_cache_array::get_page_index(page);
- while let Some(index) = cache_array_index {
- let mut entry = jit_cache_array::get_mut(index);
- if (*entry).wasm_table_index == wasm_table_index {
- dbg_assert!((*entry).pending);
- (*entry).pending = false;
- }
- cache_array_index = (*entry).next_index_same_page();
- }
- },
- }
- jit_cache_array::check_invariants();
- if CHECK_JIT_CACHE_ARRAY_INVARIANTS {
- // sanity check that the above iteration marked all entries as not pending
- for i in 0..jit_cache_array::SIZE {
- let entry = jit_cache_array::get(i);
- if entry.wasm_table_index == wasm_table_index {
- dbg_assert!(!entry.pending);
- }
- }
- }
- }
- fn jit_generate_module(
- basic_blocks: &Vec<BasicBlock>,
- requires_loop_limit: bool,
- mut cpu: CpuContext,
- builder: &mut WasmBuilder,
- wasm_table_index: u16,
- state_flags: CachedStateFlags,
- ) {
- builder.reset();
- let basic_block_indices: HashMap<u32, u32> = basic_blocks
- .iter()
- .enumerate()
- .map(|(index, block)| (block.addr, index as u32))
- .collect();
- // set state local variable to the initial state passed as the first argument
- builder.get_local(&builder.arg_local_initial_state.unsafe_clone());
- let gen_local_state = builder.set_new_local();
- // initialise max_iterations
- let gen_local_iteration_counter = if JIT_ALWAYS_USE_LOOP_SAFETY || requires_loop_limit {
- builder.const_i32(JIT_MAX_ITERATIONS_PER_FUNCTION as i32);
- Some(builder.set_new_local())
- }
- else {
- None
- };
- let mut register_locals = (0..8)
- .map(|i| {
- builder.const_i32(global_pointers::get_reg32_offset(i) as i32);
- builder.load_aligned_i32(0);
- let local = builder.set_new_local();
- local
- })
- .collect();
- let ctx = &mut JitContext {
- cpu: &mut cpu,
- builder,
- register_locals: &mut register_locals,
- start_of_current_instruction: 0,
- current_brtable_depth: 0,
- our_wasm_table_index: wasm_table_index,
- basic_block_index_local: &gen_local_state,
- state_flags,
- };
- // main state machine loop
- ctx.builder.loop_void();
- if let Some(gen_local_iteration_counter) = gen_local_iteration_counter.as_ref() {
- profiler::stat_increment(stat::COMPILE_WITH_LOOP_SAFETY);
- // decrement max_iterations
- ctx.builder.get_local(gen_local_iteration_counter);
- ctx.builder.const_i32(-1);
- ctx.builder.add_i32();
- ctx.builder.set_local(gen_local_iteration_counter);
- // if max_iterations == 0: return
- ctx.builder.get_local(gen_local_iteration_counter);
- ctx.builder.eqz_i32();
- ctx.builder.if_void();
- codegen::gen_debug_track_jit_exit(ctx.builder, 0);
- codegen::gen_move_registers_from_locals_to_memory(ctx);
- ctx.builder.return_();
- ctx.builder.block_end();
- }
- ctx.builder.block_void(); // for the default case
- ctx.builder.block_void(); // for the exit-with-pagefault case
- // generate the opening blocks for the cases
- for _ in 0..basic_blocks.len() {
- ctx.builder.block_void();
- }
- ctx.builder.get_local(&gen_local_state);
- ctx.builder.brtable_and_cases(basic_blocks.len() as u32 + 1); // plus one for the exit-with-pagefault case
- for (i, block) in basic_blocks.iter().enumerate() {
- // Case [i] will jump after the [i]th block, so we first generate the
- // block end opcode and then the code for that block
- ctx.builder.block_end();
- ctx.current_brtable_depth = basic_blocks.len() as u32 + 1 - i as u32;
- dbg_assert!(block.addr < block.end_addr);
- jit_generate_basic_block(ctx, block);
- let invalid_connection_to_next_block = block.end_addr != ctx.cpu.eip;
- dbg_assert!(!invalid_connection_to_next_block);
- if block.has_sti {
- match block.ty {
- BasicBlockType::ConditionalJump {
- condition,
- jump_offset,
- jump_offset_is_32,
- ..
- } => {
- codegen::gen_condition_fn(ctx, condition);
- ctx.builder.if_void();
- if jump_offset_is_32 {
- codegen::gen_relative_jump(ctx.builder, jump_offset);
- }
- else {
- codegen::gen_jmp_rel16(ctx.builder, jump_offset as u16);
- }
- ctx.builder.block_end();
- },
- _ => {},
- };
- codegen::gen_debug_track_jit_exit(ctx.builder, block.last_instruction_addr);
- codegen::gen_move_registers_from_locals_to_memory(ctx);
- codegen::gen_fn0_const(ctx.builder, "handle_irqs");
- ctx.builder.return_();
- continue;
- }
- match &block.ty {
- BasicBlockType::Exit => {
- // Exit this function
- codegen::gen_debug_track_jit_exit(ctx.builder, block.last_instruction_addr);
- codegen::gen_move_registers_from_locals_to_memory(ctx);
- ctx.builder.return_();
- },
- BasicBlockType::Normal { next_block_addr } => {
- // Unconditional jump to next basic block
- // - All instructions that don't change eip
- // - Unconditional jump
- let next_basic_block_index = *basic_block_indices
- .get(&next_block_addr)
- .expect("basic_block_indices.get (Normal)");
- if next_basic_block_index == (i as u32) + 1 {
- // fallthru
- }
- else {
- // set state variable to next basic block
- ctx.builder.const_i32(next_basic_block_index as i32);
- ctx.builder.set_local(&gen_local_state);
- ctx.builder.br(ctx.current_brtable_depth); // to the loop
- }
- },
- &BasicBlockType::ConditionalJump {
- next_block_addr,
- next_block_branch_taken_addr,
- condition,
- jump_offset,
- jump_offset_is_32,
- } => {
- // Conditional jump to next basic block
- // - jnz, jc, loop, jcxz, etc.
- codegen::gen_condition_fn(ctx, condition);
- ctx.builder.if_void();
- // Branch taken
- if jump_offset_is_32 {
- codegen::gen_relative_jump(ctx.builder, jump_offset);
- }
- else {
- codegen::gen_jmp_rel16(ctx.builder, jump_offset as u16);
- }
- if let Some(next_block_branch_taken_addr) = next_block_branch_taken_addr {
- let next_basic_block_branch_taken_index = *basic_block_indices
- .get(&next_block_branch_taken_addr)
- .expect("basic_block_indices.get (branch taken)");
- ctx.builder
- .const_i32(next_basic_block_branch_taken_index as i32);
- ctx.builder.set_local(&gen_local_state);
- ctx.builder.br(basic_blocks.len() as u32 + 2 - i as u32); // to the loop
- }
- else {
- // Jump to different page
- codegen::gen_debug_track_jit_exit(ctx.builder, block.last_instruction_addr);
- codegen::gen_move_registers_from_locals_to_memory(ctx);
- ctx.builder.return_();
- }
- if let Some(next_block_addr) = next_block_addr {
- // Branch not taken
- let next_basic_block_index = *basic_block_indices
- .get(&next_block_addr)
- .expect("basic_block_indices.get (branch not taken)");
- if next_basic_block_index == (i as u32) + 1 {
- // fallthru
- ctx.builder.block_end();
- }
- else {
- ctx.builder.else_();
- ctx.builder.const_i32(next_basic_block_index as i32);
- ctx.builder.set_local(&gen_local_state);
- ctx.builder.br(basic_blocks.len() as u32 + 2 - i as u32); // to the loop
- ctx.builder.block_end();
- }
- }
- else {
- ctx.builder.else_();
- // End of this page
- codegen::gen_debug_track_jit_exit(ctx.builder, block.last_instruction_addr);
- codegen::gen_move_registers_from_locals_to_memory(ctx);
- ctx.builder.return_();
- ctx.builder.block_end();
- }
- },
- }
- }
- {
- // exit-with-pagefault case
- ctx.builder.block_end();
- codegen::gen_move_registers_from_locals_to_memory(ctx);
- codegen::gen_fn0_const(ctx.builder, "trigger_pagefault_end_jit");
- ctx.builder.return_();
- }
- ctx.builder.block_end(); // default case
- ctx.builder.unreachable();
- ctx.builder.block_end(); // loop
- ctx.builder.free_local(gen_local_state.unsafe_clone());
- if let Some(local) = gen_local_iteration_counter {
- ctx.builder.free_local(local);
- }
- for local in ctx.register_locals.drain(..) {
- ctx.builder.free_local(local);
- }
- ctx.builder.finish();
- }
- fn jit_generate_basic_block(ctx: &mut JitContext, block: &BasicBlock) {
- let start_addr = block.addr;
- let last_instruction_addr = block.last_instruction_addr;
- let stop_addr = block.end_addr;
- // First iteration of do-while assumes the caller confirms this condition
- dbg_assert!(!is_near_end_of_page(start_addr));
- codegen::gen_increment_timestamp_counter(ctx.builder, block.number_of_instructions as i32);
- ctx.cpu.eip = start_addr;
- loop {
- let mut instruction = 0;
- if cfg!(feature = "profiler") {
- instruction = memory::read32s(ctx.cpu.eip) as u32;
- ::opstats::gen_opstats(ctx.builder, instruction);
- ::opstats::record_opstat_compiled(instruction);
- }
- if ctx.cpu.eip == last_instruction_addr {
- // Before the last instruction:
- // - Set eip to *after* the instruction
- // - Set previous_eip to *before* the instruction
- codegen::gen_set_previous_eip_offset_from_eip(
- ctx.builder,
- last_instruction_addr - start_addr,
- );
- codegen::gen_increment_instruction_pointer(ctx.builder, stop_addr - start_addr);
- }
- let wasm_length_before = ctx.builder.instruction_body_length();
- ctx.start_of_current_instruction = ctx.cpu.eip;
- let start_eip = ctx.cpu.eip;
- let mut instruction_flags = 0;
- jit_instructions::jit_instruction(ctx, &mut instruction_flags);
- let end_eip = ctx.cpu.eip;
- let instruction_length = end_eip - start_eip;
- let was_block_boundary = instruction_flags & JIT_INSTR_BLOCK_BOUNDARY_FLAG != 0;
- let wasm_length = ctx.builder.instruction_body_length() - wasm_length_before;
- ::opstats::record_opstat_size_wasm(instruction, wasm_length as u32);
- dbg_assert!((end_eip == stop_addr) == (start_eip == last_instruction_addr));
- dbg_assert!(instruction_length < MAX_INSTRUCTION_LENGTH);
- let end_addr = ctx.cpu.eip;
- if end_addr == stop_addr {
- // no page was crossed
- dbg_assert!(Page::page_of(end_addr) == Page::page_of(start_addr));
- break;
- }
- if was_block_boundary || is_near_end_of_page(end_addr) || end_addr > stop_addr {
- dbg_log!(
- "Overlapping basic blocks start={:x} expected_end={:x} end={:x} was_block_boundary={} near_end_of_page={}",
- start_addr,
- stop_addr,
- end_addr,
- was_block_boundary,
- is_near_end_of_page(end_addr)
- );
- dbg_assert!(false);
- break;
- }
- }
- }
- #[no_mangle]
- pub fn jit_increase_hotness_and_maybe_compile(
- phys_address: u32,
- cs_offset: u32,
- state_flags: CachedStateFlags,
- hotness: u32,
- ) {
- let ctx = get_jit_state();
- let page = Page::page_of(phys_address);
- let address_hash = jit_hot_hash_page(page) as usize;
- ctx.hot_pages[address_hash] += hotness;
- if ctx.hot_pages[address_hash] >= JIT_THRESHOLD {
- ctx.hot_pages[address_hash] = 0;
- jit_analyze_and_generate(ctx, page, cs_offset, state_flags)
- };
- }
- fn free_wasm_table_index(ctx: &mut JitState, wasm_table_index: u16) {
- if CHECK_JIT_CACHE_ARRAY_INVARIANTS {
- dbg_assert!(!ctx.wasm_table_index_free_list.contains(&wasm_table_index));
- }
- ctx.wasm_table_index_free_list.push(wasm_table_index);
- // It is not strictly necessary to clear the function, but it will fail more predictably if we
- // accidentally use the function and may garbage collect unused modules earlier
- jit_clear_func(wasm_table_index);
- }
- /// Remove all entries with the given wasm_table_index in page
- fn remove_jit_cache_wasm_index(ctx: &mut JitState, page: Page, wasm_table_index: u16) {
- let mut cache_array_index = jit_cache_array::get_page_index(page).unwrap();
- let mut pending = false;
- loop {
- let entry = jit_cache_array::get_mut(cache_array_index);
- let next_cache_array_index = entry.next_index_same_page();
- if entry.wasm_table_index == wasm_table_index {
- // if one entry is pending, all must be pending
- dbg_assert!(!pending || entry.pending);
- pending = entry.pending;
- jit_cache_array::remove(cache_array_index);
- dbg_assert!(entry.next_index_same_page() == None);
- entry.wasm_table_index = 0;
- entry.start_addr = 0;
- entry.pending = false;
- }
- if let Some(i) = next_cache_array_index {
- cache_array_index = i;
- }
- else {
- break;
- }
- }
- if pending {
- ctx.wasm_table_index_pending_free.push(wasm_table_index);
- }
- else {
- free_wasm_table_index(ctx, wasm_table_index);
- }
- if !jit_page_has_code(page) {
- cpu::tlb_set_has_code(page, false);
- }
- if CHECK_JIT_CACHE_ARRAY_INVARIANTS {
- // sanity check that the above iteration deleted all entries
- for i in 0..jit_cache_array::SIZE {
- let entry = jit_cache_array::get(i);
- dbg_assert!(entry.wasm_table_index != wasm_table_index);
- }
- }
- }
- /// Register a write in this page: Delete all present code
- pub fn jit_dirty_page(ctx: &mut JitState, page: Page) {
- let mut did_have_code = false;
- if let Some(mut cache_array_index) = jit_cache_array::get_page_index(page) {
- did_have_code = true;
- let mut index_to_free = HashSet::new();
- let mut index_to_pending_free = HashSet::new();
- jit_cache_array::set_page_index(page, None);
- profiler::stat_increment(stat::INVALIDATE_PAGE);
- loop {
- profiler::stat_increment(stat::INVALIDATE_CACHE_ENTRY);
- let entry = jit_cache_array::get_mut(cache_array_index);
- let wasm_table_index = entry.wasm_table_index;
- dbg_assert!(page == Page::page_of(entry.start_addr));
- let next_cache_array_index = entry.next_index_same_page();
- entry.set_next_index_same_page(None);
- entry.start_addr = 0;
- entry.wasm_table_index = 0;
- if entry.pending {
- dbg_assert!(!index_to_free.contains(&wasm_table_index));
- entry.pending = false;
- index_to_pending_free.insert(wasm_table_index);
- }
- else {
- dbg_assert!(!index_to_pending_free.contains(&wasm_table_index));
- index_to_free.insert(wasm_table_index);
- }
- if let Some(i) = next_cache_array_index {
- cache_array_index = i;
- }
- else {
- break;
- }
- }
- profiler::stat_increment_by(
- stat::INVALIDATE_MODULE,
- index_to_pending_free.len() as u64 + index_to_free.len() as u64,
- );
- for index in index_to_free.iter().cloned() {
- free_wasm_table_index(ctx, index)
- }
- for index in index_to_pending_free {
- ctx.wasm_table_index_pending_free.push(index);
- }
- }
- match ctx.entry_points.remove(&page) {
- None => {},
- Some(_entry_points) => {
- did_have_code = true;
- // don't try to compile code in this page anymore until it's hot again
- ctx.hot_pages[jit_hot_hash_page(page) as usize] = 0;
- },
- }
- if did_have_code {
- cpu::tlb_set_has_code(page, false);
- }
- }
- #[no_mangle]
- pub fn jit_dirty_cache(start_addr: u32, end_addr: u32) {
- dbg_assert!(start_addr < end_addr);
- let start_page = Page::page_of(start_addr);
- let end_page = Page::page_of(end_addr - 1);
- for page in start_page.to_u32()..end_page.to_u32() + 1 {
- jit_dirty_page(get_jit_state(), Page::page_of(page << 12));
- }
- }
- /// dirty pages in the range of start_addr and end_addr, which must span at most two pages
- pub fn jit_dirty_cache_small(start_addr: u32, end_addr: u32) {
- dbg_assert!(start_addr < end_addr);
- let start_page = Page::page_of(start_addr);
- let end_page = Page::page_of(end_addr - 1);
- jit_dirty_page(get_jit_state(), start_page);
- // Note: This can't happen when paging is enabled, as writes across
- // boundaries are split up on two pages
- if start_page != end_page {
- dbg_assert!(start_page.to_u32() + 1 == end_page.to_u32());
- jit_dirty_page(get_jit_state(), end_page);
- }
- }
- #[no_mangle]
- pub fn jit_clear_cache_js() { jit_clear_cache(get_jit_state()) }
- pub fn jit_clear_cache(ctx: &mut JitState) {
- ctx.entry_points.clear();
- for page_index in 0..0x100000 {
- jit_dirty_page(ctx, Page::page_of(page_index << 12))
- }
- jit_clear_all_funcs();
- }
- pub fn jit_page_has_code(page: Page) -> bool {
- let ctx = get_jit_state();
- // Does the page have compiled code
- jit_cache_array::get_page_index(page) != None ||
- // Or are there any entry points that need to be removed on write to the page
- // (this function is used to mark the has_code bit in the tlb to optimise away calls jit_dirty_page)
- ctx.entry_points.contains_key(&page)
- }
- pub fn jit_page_has_pending_code(_ctx: &JitState, page: Page) -> bool {
- if let Some(mut cache_array_index) = jit_cache_array::get_page_index(page) {
- loop {
- let entry = jit_cache_array::get(cache_array_index);
- dbg_assert!(page == Page::page_of(entry.start_addr));
- if entry.pending {
- return true;
- }
- if let Some(i) = entry.next_index_same_page() {
- cache_array_index = i;
- }
- else {
- break;
- }
- }
- }
- return false;
- }
- #[no_mangle]
- pub fn jit_unused_cache_stat() -> u32 {
- let mut count = 0;
- if cfg!(debug_assertions) {
- for i in 0..jit_cache_array::SIZE {
- if (jit_cache_array::get(i)).start_addr == 0 {
- count += 1
- }
- }
- }
- return count;
- }
- #[no_mangle]
- pub fn jit_get_entry_length(i: u32) -> u32 {
- #[allow(unused_variables)]
- let entry = jit_cache_array::get(i);
- #[cfg(debug_assertions)]
- return entry.len;
- #[cfg(not(debug_assertions))]
- 0
- }
- #[no_mangle]
- pub fn jit_get_entry_address(i: u32) -> u32 {
- if cfg!(debug_assertions) { jit_cache_array::get(i).start_addr } else { 0 }
- }
- #[no_mangle]
- pub fn jit_get_entry_pending(i: u32) -> bool {
- if cfg!(debug_assertions) { jit_cache_array::get(i).pending } else { false }
- }
- #[no_mangle]
- pub fn jit_get_wasm_table_index_free_list_count() -> u32 {
- if cfg!(debug_assertions) {
- get_jit_state().wasm_table_index_free_list.len() as u32
- }
- else {
- 0
- }
- }
- #[no_mangle]
- pub fn jit_get_op_len() -> u32 { get_jit_state().wasm_builder.get_op_len() }
- #[no_mangle]
- pub fn jit_get_op_ptr() -> *const u8 { get_jit_state().wasm_builder.get_op_ptr() }
- #[cfg(feature = "profiler")]
- pub fn check_missed_entry_points(phys_address: u32, state_flags: CachedStateFlags) {
- let page = Page::page_of(phys_address);
- for i in page.to_address()..page.to_address() + 4096 {
- // No need to check [CODE_CACHE_SEARCH_SIZE] entries here as we look at consecutive
- // addresses anyway
- let index = i & jit_cache_array::MASK;
- let entry = jit_cache_array::get(index);
- if !entry.pending
- && entry.state_flags == state_flags
- && phys_address >= entry.start_addr
- && phys_address < entry.start_addr + entry.len
- {
- profiler::stat_increment(stat::RUN_INTERPRETED_MISSED_COMPILED_ENTRY_LOOKUP);
- let last_jump_type = unsafe { cpu::debug_last_jump.name() };
- let last_jump_addr = unsafe { cpu::debug_last_jump.phys_address() }.unwrap_or(0);
- let last_jump_opcode =
- if last_jump_addr != 0 { memory::read32s(last_jump_addr) } else { 0 };
- let opcode = memory::read32s(phys_address);
- dbg_log!(
- "Compiled exists, but no entry point, \
- start={:x} end={:x} phys_addr={:x} opcode={:02x} {:02x} {:02x} {:02x}. \
- Last jump at {:x} ({}) opcode={:02x} {:02x} {:02x} {:02x}",
- entry.start_addr,
- entry.start_addr + entry.len,
- phys_address,
- opcode & 0xFF,
- opcode >> 8 & 0xFF,
- opcode >> 16 & 0xFF,
- opcode >> 16 & 0xFF,
- last_jump_addr,
- last_jump_type,
- last_jump_opcode & 0xFF,
- last_jump_opcode >> 8 & 0xFF,
- last_jump_opcode >> 16 & 0xFF,
- last_jump_opcode >> 16 & 0xFF,
- );
- }
- }
- }
|