jit.rs 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360
  1. use std::collections::{HashMap, HashSet};
  2. use analysis::AnalysisType;
  3. use codegen;
  4. use cpu;
  5. use cpu_context::CpuContext;
  6. use jit_instructions;
  7. use page::Page;
  8. use profiler;
  9. use profiler::stat;
  10. use state_flags::CachedStateFlags;
  11. use util::SafeToU16;
  12. use wasmgen::module_init::WasmBuilder;
  13. use wasmgen::{module_init, wasm_util};
  14. pub const WASM_TABLE_SIZE: u32 = 0x10000;
  15. pub const HASH_PRIME: u32 = 6151;
  16. pub const CHECK_JIT_CACHE_ARRAY_INVARIANTS: bool = false;
  17. pub const ENABLE_JIT_NONFAULTING_OPTIMZATION: bool = true;
  18. pub const JIT_MAX_ITERATIONS_PER_FUNCTION: u32 = 10000;
  19. pub const JIT_ALWAYS_USE_LOOP_SAFETY: bool = false;
  20. pub const JIT_THRESHOLD: u32 = 2500;
  21. const CONDITION_FUNCTIONS: [&str; 16] = [
  22. "test_o", "test_no", "test_b", "test_nb", "test_z", "test_nz", "test_be", "test_nbe", "test_s",
  23. "test_ns", "test_p", "test_np", "test_l", "test_nl", "test_le", "test_nle",
  24. ];
  25. const CODE_CACHE_SEARCH_SIZE: u32 = 8;
  26. const MAX_INSTRUCTION_LENGTH: u32 = 16;
  27. mod jit_cache_array {
  28. use page::Page;
  29. use state_flags::CachedStateFlags;
  30. // Note: For performance reasons, this is global state. See jit_find_cache_entry
  31. const NO_NEXT_ENTRY: u32 = 0xffff_ffff;
  32. pub const SIZE: u32 = 0x40000;
  33. pub const MASK: u32 = (SIZE - 1);
  34. #[derive(Copy, Clone)]
  35. pub struct Entry {
  36. pub start_addr: u32,
  37. #[cfg(debug_assertions)]
  38. pub len: u32,
  39. #[cfg(debug_assertions)]
  40. pub opcode: u32,
  41. // an index into jit_cache_array for the next code_cache entry within the same physical page
  42. next_index_same_page: u32,
  43. pub initial_state: u16,
  44. pub wasm_table_index: u16,
  45. pub state_flags: CachedStateFlags,
  46. pub pending: bool,
  47. }
  48. impl Entry {
  49. pub fn create(
  50. start_addr: u32,
  51. next_index_same_page: Option<u32>,
  52. wasm_table_index: u16,
  53. initial_state: u16,
  54. state_flags: CachedStateFlags,
  55. pending: bool,
  56. ) -> Entry {
  57. let next_index_same_page = next_index_same_page.unwrap_or(NO_NEXT_ENTRY);
  58. Entry {
  59. start_addr,
  60. next_index_same_page,
  61. wasm_table_index,
  62. initial_state,
  63. state_flags,
  64. pending,
  65. #[cfg(debug_assertions)]
  66. len: 0,
  67. #[cfg(debug_assertions)]
  68. opcode: 0,
  69. }
  70. }
  71. pub fn next_index_same_page(&self) -> Option<u32> {
  72. if self.next_index_same_page == NO_NEXT_ENTRY {
  73. None
  74. }
  75. else {
  76. Some(self.next_index_same_page)
  77. }
  78. }
  79. pub fn set_next_index_same_page(&mut self, next_index: Option<u32>) {
  80. if let Some(i) = next_index {
  81. self.next_index_same_page = i
  82. }
  83. else {
  84. self.next_index_same_page = NO_NEXT_ENTRY
  85. }
  86. }
  87. }
  88. const DEFAULT_ENTRY: Entry = Entry {
  89. start_addr: 0,
  90. next_index_same_page: NO_NEXT_ENTRY,
  91. wasm_table_index: 0,
  92. initial_state: 0,
  93. state_flags: CachedStateFlags::EMPTY,
  94. pending: false,
  95. #[cfg(debug_assertions)]
  96. len: 0,
  97. #[cfg(debug_assertions)]
  98. opcode: 0,
  99. };
  100. #[allow(non_upper_case_globals)]
  101. static mut jit_cache_array: [Entry; SIZE as usize] = [Entry {
  102. start_addr: 0,
  103. next_index_same_page: 0,
  104. wasm_table_index: 0,
  105. initial_state: 0,
  106. state_flags: CachedStateFlags::EMPTY,
  107. pending: false,
  108. #[cfg(debug_assertions)]
  109. len: 0,
  110. #[cfg(debug_assertions)]
  111. opcode: 0,
  112. }; SIZE as usize];
  113. #[allow(non_upper_case_globals)]
  114. static mut page_first_entry: [u32; 0x100000] = [0; 0x100000];
  115. pub fn get_page_index(page: Page) -> Option<u32> {
  116. let index = unsafe { page_first_entry[page.to_u32() as usize] };
  117. if index == NO_NEXT_ENTRY {
  118. None
  119. }
  120. else {
  121. Some(index)
  122. }
  123. }
  124. pub fn set_page_index(page: Page, index: Option<u32>) {
  125. let index = index.unwrap_or(NO_NEXT_ENTRY);
  126. unsafe { page_first_entry[page.to_u32() as usize] = index }
  127. }
  128. pub fn get(i: u32) -> &'static Entry { unsafe { &jit_cache_array[i as usize] } }
  129. pub fn get_mut(i: u32) -> &'static mut Entry { unsafe { &mut jit_cache_array[i as usize] } }
  130. pub fn get_unchecked(i: u32) -> &'static Entry {
  131. unsafe { jit_cache_array.get_unchecked(i as usize) }
  132. }
  133. fn set(i: u32, entry: Entry) { unsafe { jit_cache_array[i as usize] = entry }; }
  134. pub fn insert(index: u32, mut entry: Entry) {
  135. let page = Page::page_of(entry.start_addr);
  136. let previous_entry_index = get_page_index(page);
  137. if let Some(previous_entry_index) = previous_entry_index {
  138. let previous_entry = get(previous_entry_index);
  139. if previous_entry.start_addr != 0 {
  140. dbg_assert!(
  141. Page::page_of(previous_entry.start_addr) == Page::page_of(entry.start_addr)
  142. );
  143. }
  144. }
  145. set_page_index(page, Some(index));
  146. entry.set_next_index_same_page(previous_entry_index);
  147. set(index, entry);
  148. }
  149. pub fn remove(index: u32) {
  150. let page = Page::page_of(get(index).start_addr);
  151. let mut page_index = get_page_index(page);
  152. let mut did_remove = false;
  153. if page_index == Some(index) {
  154. set_page_index(page, get(index).next_index_same_page());
  155. did_remove = true;
  156. }
  157. else {
  158. while let Some(page_index_ok) = page_index {
  159. let next_index = get(page_index_ok).next_index_same_page();
  160. if next_index == Some(index) {
  161. get_mut(page_index_ok)
  162. .set_next_index_same_page(get(index).next_index_same_page());
  163. did_remove = true;
  164. break;
  165. }
  166. page_index = next_index;
  167. }
  168. }
  169. get_mut(index).set_next_index_same_page(None);
  170. dbg_assert!(did_remove);
  171. }
  172. pub fn iter() -> ::std::slice::Iter<'static, Entry> { unsafe { jit_cache_array.iter() } }
  173. pub fn clear() {
  174. unsafe {
  175. for (i, _) in jit_cache_array.iter().enumerate() {
  176. jit_cache_array[i] = DEFAULT_ENTRY;
  177. }
  178. for (i, _) in page_first_entry.iter().enumerate() {
  179. page_first_entry[i] = NO_NEXT_ENTRY;
  180. }
  181. }
  182. }
  183. pub fn check_invariants() {
  184. if !::jit::CHECK_JIT_CACHE_ARRAY_INVARIANTS {
  185. return;
  186. }
  187. // there are no loops in the linked lists
  188. // https://en.wikipedia.org/wiki/Cycle_detection#Floyd's_Tortoise_and_Hare
  189. for i in 0..(1 << 20) {
  190. let mut slow = get_page_index(Page::page_of(i << 12));
  191. let mut fast = slow;
  192. while let Some(fast_ok) = fast {
  193. fast = get(fast_ok).next_index_same_page();
  194. slow = get(slow.unwrap()).next_index_same_page();
  195. if let Some(fast_ok) = fast {
  196. fast = get(fast_ok).next_index_same_page();
  197. }
  198. else {
  199. break;
  200. }
  201. dbg_assert!(slow != fast);
  202. }
  203. }
  204. let mut wasm_table_index_to_jit_cache_index = [0; ::jit::WASM_TABLE_SIZE as usize];
  205. for (i, entry) in iter().enumerate() {
  206. dbg_assert!(entry.next_index_same_page().map_or(true, |i| i < SIZE));
  207. if entry.pending {
  208. dbg_assert!(entry.start_addr != 0);
  209. dbg_assert!(entry.wasm_table_index != 0);
  210. }
  211. else {
  212. // an invalid entry has both its start_addr and wasm_table_index set to 0
  213. // neither start_addr nor wasm_table_index are 0 for any valid entry
  214. dbg_assert!((entry.start_addr == 0) == (entry.wasm_table_index == 0));
  215. }
  216. // having a next entry implies validity
  217. dbg_assert!(entry.next_index_same_page() == None || entry.start_addr != 0);
  218. // any valid wasm_table_index can only be used within a single page
  219. if entry.wasm_table_index != 0 {
  220. let j = wasm_table_index_to_jit_cache_index[entry.wasm_table_index as usize];
  221. if j != 0 {
  222. let other_entry = get(j);
  223. dbg_assert!(other_entry.wasm_table_index == entry.wasm_table_index);
  224. dbg_assert!(
  225. Page::page_of(other_entry.start_addr) == Page::page_of(entry.start_addr)
  226. );
  227. }
  228. else {
  229. wasm_table_index_to_jit_cache_index[entry.wasm_table_index as usize] = i as u32;
  230. }
  231. }
  232. if entry.start_addr != 0 {
  233. // valid entries can be reached from page_first_entry
  234. let mut reached = false;
  235. let page = Page::page_of(entry.start_addr);
  236. let mut cache_array_index = get_page_index(page);
  237. while let Some(index) = cache_array_index {
  238. let other_entry = get(index);
  239. if i as u32 == index {
  240. reached = true;
  241. break;
  242. }
  243. cache_array_index = other_entry.next_index_same_page();
  244. }
  245. dbg_assert!(reached);
  246. }
  247. }
  248. }
  249. }
  250. pub struct JitState {
  251. // as an alternative to HashSet, we could use a bitmap of 4096 bits here
  252. // (faster, but uses much more memory)
  253. // or a compressed bitmap (likely faster)
  254. hot_code_addresses: [u32; HASH_PRIME as usize],
  255. wasm_table_index_free_list: Vec<u16>,
  256. wasm_table_index_pending_free: Vec<u16>,
  257. entry_points: HashMap<Page, HashSet<u16>>,
  258. wasm_builder: WasmBuilder,
  259. }
  260. impl JitState {
  261. pub fn create_and_initialise() -> JitState {
  262. let mut wasm_builder = WasmBuilder::new();
  263. wasm_builder.init();
  264. let mut c = JitState {
  265. hot_code_addresses: [0; HASH_PRIME as usize],
  266. wasm_table_index_free_list: vec![],
  267. wasm_table_index_pending_free: vec![],
  268. entry_points: HashMap::new(),
  269. wasm_builder,
  270. };
  271. jit_empty_cache(&mut c);
  272. c
  273. }
  274. }
  275. #[derive(PartialEq, Eq)]
  276. enum BasicBlockType {
  277. Normal {
  278. next_block_addr: u32,
  279. },
  280. ConditionalJump {
  281. next_block_addr: u32,
  282. next_block_branch_taken_addr: Option<u32>,
  283. condition: u8,
  284. jump_offset: i32,
  285. jump_offset_is_32: bool,
  286. },
  287. Exit,
  288. }
  289. struct BasicBlock {
  290. addr: u32,
  291. end_addr: u32,
  292. is_entry_block: bool,
  293. ty: BasicBlockType,
  294. }
  295. #[repr(C)]
  296. #[derive(Copy, Clone)]
  297. pub struct cached_code {
  298. pub wasm_table_index: u16,
  299. pub initial_state: u16,
  300. }
  301. impl cached_code {
  302. const NONE: cached_code = cached_code {
  303. wasm_table_index: 0,
  304. initial_state: 0,
  305. };
  306. }
  307. pub struct JitContext<'a> {
  308. pub cpu: &'a mut CpuContext,
  309. pub builder: &'a mut WasmBuilder,
  310. }
  311. pub const GEN_LOCAL_ARG_INITIAL_STATE: u32 = 0;
  312. pub const GEN_LOCAL_STATE: u32 = 1;
  313. pub const GEN_LOCAL_ITERATION_COUNTER: u32 = 2;
  314. // local scratch variables for use wherever required
  315. pub const GEN_LOCAL_SCRATCH0: u32 = 3;
  316. pub const GEN_LOCAL_SCRATCH1: u32 = 4;
  317. pub const GEN_LOCAL_SCRATCH2: u32 = 5;
  318. // Function arguments are not included in the local variable count
  319. pub const GEN_NO_OF_LOCALS: u32 = 5;
  320. pub const JIT_INSTR_BLOCK_BOUNDARY_FLAG: u32 = 1 << 0;
  321. pub const JIT_INSTR_NONFAULTING_FLAG: u32 = 1 << 1;
  322. pub const FN_GET_SEG_IDX: u16 = 0;
  323. fn jit_hot_hash_page(page: Page) -> u32 { page.to_u32() % HASH_PRIME }
  324. fn is_near_end_of_page(address: u32) -> bool { address & 0xFFF >= 0x1000 - MAX_INSTRUCTION_LENGTH }
  325. pub fn jit_find_cache_entry(phys_address: u32, state_flags: CachedStateFlags) -> cached_code {
  326. for i in 0..CODE_CACHE_SEARCH_SIZE {
  327. let index = (phys_address + i) & jit_cache_array::MASK;
  328. let entry = jit_cache_array::get_unchecked(index);
  329. #[cfg(debug_assertions)]
  330. {
  331. if entry.start_addr == phys_address {
  332. if entry.pending {
  333. profiler::stat_increment(stat::S_RUN_INTERPRETED_PENDING)
  334. }
  335. if entry.state_flags != state_flags {
  336. profiler::stat_increment(stat::S_RUN_INTERPRETED_DIFFERENT_STATE)
  337. }
  338. }
  339. if is_near_end_of_page(phys_address) {
  340. assert!(entry.start_addr != phys_address);
  341. profiler::stat_increment(stat::S_RUN_INTERPRETED_NEAR_END_OF_PAGE);
  342. }
  343. }
  344. if !entry.pending && entry.start_addr == phys_address && entry.state_flags == state_flags {
  345. #[cfg(debug_assertions)]
  346. {
  347. assert!(cpu::read32(entry.start_addr) == entry.opcode)
  348. }
  349. return cached_code {
  350. wasm_table_index: entry.wasm_table_index,
  351. initial_state: entry.initial_state,
  352. };
  353. }
  354. }
  355. cached_code::NONE
  356. }
  357. fn record_entry_point(ctx: &mut JitState, phys_address: u32) {
  358. if is_near_end_of_page(phys_address) {
  359. return;
  360. }
  361. let page = Page::page_of(phys_address);
  362. let offset_in_page = phys_address as u16 & 0xFFF;
  363. let mut is_new = false;
  364. ctx.entry_points
  365. .entry(page)
  366. .or_insert_with(|| {
  367. is_new = true;
  368. HashSet::new()
  369. })
  370. .insert(offset_in_page);
  371. if is_new {
  372. cpu::tlb_set_has_code(page, true);
  373. }
  374. }
  375. fn jit_find_basic_blocks(
  376. page: Page,
  377. entry_points: &HashSet<u16>,
  378. cpu: CpuContext,
  379. ) -> (Vec<BasicBlock>, bool) {
  380. let mut to_visit_stack: Vec<u16> = entry_points.iter().cloned().collect();
  381. let mut marked_as_entry: HashSet<u16> = entry_points.clone();
  382. let page_high_bits = page.to_address();
  383. let mut basic_blocks: HashMap<u32, BasicBlock> = HashMap::new();
  384. let mut requires_loop_limit = false;
  385. while let Some(to_visit_offset) = to_visit_stack.pop() {
  386. let to_visit = to_visit_offset as u32 | page_high_bits;
  387. if basic_blocks.contains_key(&to_visit) {
  388. continue;
  389. }
  390. let mut current_address = to_visit;
  391. let mut current_block = BasicBlock {
  392. addr: current_address,
  393. end_addr: 0,
  394. ty: BasicBlockType::Exit,
  395. is_entry_block: false,
  396. };
  397. loop {
  398. if is_near_end_of_page(current_address) {
  399. // TODO: Don't insert this block if empty
  400. current_block.end_addr = current_address;
  401. profiler::stat_increment(stat::S_COMPILE_CUT_OFF_AT_END_OF_PAGE);
  402. break;
  403. }
  404. let mut ctx = &mut CpuContext {
  405. eip: current_address,
  406. ..cpu
  407. };
  408. let analysis = ::analysis::analyze_step(&mut ctx);
  409. let has_next_instruction = !analysis.no_next_instruction;
  410. current_address = ctx.eip;
  411. match analysis.ty {
  412. AnalysisType::Normal => {
  413. dbg_assert!(has_next_instruction);
  414. if basic_blocks.contains_key(&current_address) {
  415. current_block.end_addr = current_address;
  416. current_block.ty = BasicBlockType::Normal {
  417. next_block_addr: current_address,
  418. };
  419. }
  420. },
  421. AnalysisType::Jump {
  422. offset,
  423. is_32,
  424. condition,
  425. } => {
  426. let jump_target = if is_32 {
  427. current_address.wrapping_add(offset as u32)
  428. }
  429. else {
  430. ctx.cs_offset.wrapping_add(
  431. (current_address
  432. .wrapping_sub(ctx.cs_offset)
  433. .wrapping_add(offset as u32)) & 0xFFFF,
  434. )
  435. };
  436. if let Some(condition) = condition {
  437. // conditional jump: continue at next and continue at jump target
  438. dbg_assert!(has_next_instruction);
  439. to_visit_stack.push(current_address as u16 & 0xFFF);
  440. let next_block_branch_taken_addr;
  441. if Page::page_of(jump_target) == page {
  442. to_visit_stack.push(jump_target as u16 & 0xFFF);
  443. next_block_branch_taken_addr = Some(jump_target);
  444. // Very simple heuristic for "infinite loops": This
  445. // detects Linux's "calibrating delay loop"
  446. if jump_target == current_block.addr {
  447. dbg_log!("Basic block looping back to front");
  448. requires_loop_limit = true;
  449. }
  450. }
  451. else {
  452. next_block_branch_taken_addr = None;
  453. }
  454. current_block.ty = BasicBlockType::ConditionalJump {
  455. next_block_addr: current_address,
  456. next_block_branch_taken_addr,
  457. condition,
  458. jump_offset: offset,
  459. jump_offset_is_32: is_32,
  460. };
  461. current_block.end_addr = current_address;
  462. break;
  463. }
  464. else {
  465. // non-conditional jump: continue at jump target
  466. if has_next_instruction {
  467. // Execution will eventually come back to the next instruction (CALL)
  468. marked_as_entry.insert(current_address as u16 & 0xFFF);
  469. to_visit_stack.push(current_address as u16 & 0xFFF);
  470. }
  471. if Page::page_of(jump_target) == page {
  472. current_block.ty = BasicBlockType::Normal {
  473. next_block_addr: jump_target,
  474. };
  475. to_visit_stack.push(jump_target as u16 & 0xFFF);
  476. }
  477. else {
  478. current_block.ty = BasicBlockType::Exit;
  479. }
  480. current_block.end_addr = current_address;
  481. break;
  482. }
  483. },
  484. AnalysisType::BlockBoundary => {
  485. // a block boundary but not a jump, get out
  486. if has_next_instruction {
  487. // block boundary, but execution will eventually come back
  488. // to the next instruction. Create a new basic block
  489. // starting at the next instruction and register it as an
  490. // entry point
  491. marked_as_entry.insert(current_address as u16 & 0xFFF);
  492. to_visit_stack.push(current_address as u16 & 0xFFF);
  493. }
  494. current_block.end_addr = current_address;
  495. break;
  496. },
  497. }
  498. }
  499. basic_blocks.insert(to_visit, current_block);
  500. }
  501. for block in basic_blocks.values_mut() {
  502. if marked_as_entry.contains(&(block.addr as u16 & 0xFFF)) {
  503. block.is_entry_block = true;
  504. }
  505. }
  506. let mut basic_blocks: Vec<BasicBlock> =
  507. basic_blocks.into_iter().map(|(_, block)| block).collect();
  508. basic_blocks.sort_by_key(|block| block.addr);
  509. for i in 0..basic_blocks.len() - 1 {
  510. let next_block_addr = basic_blocks[i + 1].addr;
  511. let block = &mut basic_blocks[i];
  512. if next_block_addr < block.end_addr {
  513. block.ty = BasicBlockType::Normal { next_block_addr };
  514. block.end_addr = next_block_addr;
  515. // TODO: assert that the old type is equal to the type of the following block?
  516. }
  517. }
  518. (basic_blocks, requires_loop_limit)
  519. }
  520. fn create_cache_entry(ctx: &mut JitState, entry: jit_cache_array::Entry) {
  521. let mut found_entry_index = None;
  522. let phys_addr = entry.start_addr;
  523. for i in 0..CODE_CACHE_SEARCH_SIZE {
  524. let addr_index = (phys_addr + i) & jit_cache_array::MASK;
  525. let entry = jit_cache_array::get(addr_index);
  526. if entry.start_addr == 0 {
  527. found_entry_index = Some(addr_index);
  528. break;
  529. }
  530. }
  531. let found_entry_index = match found_entry_index {
  532. Some(i) => i,
  533. None => {
  534. profiler::stat_increment(stat::S_CACHE_MISMATCH);
  535. // no free slots, overwrite the first one
  536. let found_entry_index = phys_addr & jit_cache_array::MASK;
  537. let old_entry = jit_cache_array::get_mut(found_entry_index);
  538. // if we're here, we expect to overwrite a valid index
  539. dbg_assert!(old_entry.start_addr != 0);
  540. dbg_assert!(old_entry.wasm_table_index != 0);
  541. if old_entry.wasm_table_index == entry.wasm_table_index {
  542. dbg_assert!(old_entry.pending);
  543. dbg_assert!(Page::page_of(old_entry.start_addr) == Page::page_of(phys_addr));
  544. // The old entry belongs to the same wasm table index as this entry.
  545. // *Don't* free the wasm table index, instead just delete the old entry
  546. // and use its slot for this entry.
  547. // TODO: Optimally, we should pick another slot instead of dropping
  548. // an entry has just been created.
  549. //let old_page = Page::page_of(old_entry.start_addr);
  550. jit_cache_array::remove(found_entry_index);
  551. dbg_assert!(old_entry.next_index_same_page() == None);
  552. old_entry.pending = false;
  553. old_entry.start_addr = 0;
  554. }
  555. else {
  556. let old_wasm_table_index = old_entry.wasm_table_index;
  557. let old_page = Page::page_of(old_entry.start_addr);
  558. remove_jit_cache_wasm_index(ctx, old_page, old_wasm_table_index);
  559. //jit_cache_array::check_invariants();
  560. // old entry should be removed after calling remove_jit_cache_wasm_index
  561. dbg_assert!(!old_entry.pending);
  562. dbg_assert!(old_entry.start_addr == 0);
  563. dbg_assert!(old_entry.wasm_table_index == 0);
  564. dbg_assert!(old_entry.next_index_same_page() == None);
  565. }
  566. found_entry_index
  567. },
  568. };
  569. jit_cache_array::insert(found_entry_index, entry);
  570. }
  571. #[cfg(debug_assertions)]
  572. pub fn jit_force_generate_unsafe(
  573. ctx: &mut JitState,
  574. phys_addr: u32,
  575. cs_offset: u32,
  576. state_flags: CachedStateFlags,
  577. ) {
  578. record_entry_point(ctx, phys_addr);
  579. jit_analyze_and_generate(ctx, Page::page_of(phys_addr), cs_offset, state_flags);
  580. }
  581. fn jit_analyze_and_generate(
  582. ctx: &mut JitState,
  583. page: Page,
  584. cs_offset: u32,
  585. state_flags: CachedStateFlags,
  586. ) {
  587. profiler::stat_increment(stat::S_COMPILE);
  588. let entry_points = ctx.entry_points.remove(&page);
  589. let cpu = CpuContext {
  590. eip: 0,
  591. prefixes: 0,
  592. cs_offset,
  593. state_flags,
  594. };
  595. if let Some(entry_points) = entry_points {
  596. let (mut basic_blocks, requires_loop_limit) =
  597. jit_find_basic_blocks(page, &entry_points, cpu.clone());
  598. //for b in basic_blocks.iter() {
  599. // dbg_log!(
  600. // "> Basic block from {:x} to {:x}, is_entry={}",
  601. // b.addr,
  602. // b.end_addr,
  603. // b.is_entry_block
  604. // );
  605. //}
  606. jit_generate_module(
  607. &basic_blocks,
  608. requires_loop_limit,
  609. cpu.clone(),
  610. &mut ctx.wasm_builder,
  611. );
  612. // allocate an index in the wasm table
  613. let wasm_table_index = ctx
  614. .wasm_table_index_free_list
  615. .pop()
  616. .expect("allocate wasm table index");
  617. dbg_assert!(wasm_table_index != 0);
  618. // create entries for each basic block that is marked as an entry point
  619. let mut entry_point_count = 0;
  620. for (i, block) in basic_blocks.iter().enumerate() {
  621. profiler::stat_increment(stat::S_COMPILE_BASIC_BLOCK);
  622. if block.is_entry_block && block.addr != block.end_addr {
  623. dbg_assert!(block.addr != 0);
  624. let initial_state = i.safe_to_u16();
  625. let mut entry = jit_cache_array::Entry::create(
  626. block.addr,
  627. None, // to be filled in by create_cache_entry
  628. wasm_table_index,
  629. initial_state,
  630. state_flags,
  631. true,
  632. );
  633. #[cfg(debug_assertions)]
  634. {
  635. entry.len = block.end_addr - block.addr;
  636. entry.opcode = cpu::read32(block.addr);
  637. }
  638. create_cache_entry(ctx, entry);
  639. entry_point_count += 1;
  640. profiler::stat_increment(stat::S_COMPILE_ENTRY_POINT);
  641. }
  642. }
  643. dbg_assert!(entry_point_count > 0);
  644. cpu::tlb_set_has_code(page, true);
  645. jit_cache_array::check_invariants();
  646. cpu::check_tlb_invariants();
  647. let end_addr = 0;
  648. let first_opcode = 0;
  649. let phys_addr = page.to_address();
  650. // will call codegen_finalize_finished asynchronously when finished
  651. cpu::codegen_finalize(
  652. wasm_table_index,
  653. phys_addr,
  654. end_addr,
  655. first_opcode,
  656. state_flags,
  657. );
  658. profiler::stat_increment(stat::S_COMPILE_SUCCESS);
  659. }
  660. else {
  661. //dbg_log("No basic blocks, not generating code");
  662. // Nothing to do
  663. }
  664. }
  665. pub fn codegen_finalize_finished(
  666. ctx: &mut JitState,
  667. wasm_table_index: u16,
  668. phys_addr: u32,
  669. _end_addr: u32,
  670. _first_opcode: u32,
  671. _state_flags: CachedStateFlags,
  672. ) {
  673. assert!(wasm_table_index != 0);
  674. match ctx
  675. .wasm_table_index_pending_free
  676. .iter()
  677. .position(|i| *i == wasm_table_index)
  678. {
  679. Some(i) => {
  680. ctx.wasm_table_index_pending_free.swap_remove(i);
  681. free_wasm_table_index(ctx, wasm_table_index);
  682. },
  683. None => {
  684. let page = Page::page_of(phys_addr);
  685. let mut cache_array_index = jit_cache_array::get_page_index(page);
  686. while let Some(index) = cache_array_index {
  687. let mut entry = jit_cache_array::get_mut(index);
  688. if entry.wasm_table_index == wasm_table_index {
  689. dbg_assert!(entry.pending);
  690. entry.pending = false;
  691. }
  692. cache_array_index = entry.next_index_same_page();
  693. }
  694. },
  695. }
  696. jit_cache_array::check_invariants();
  697. if CHECK_JIT_CACHE_ARRAY_INVARIANTS {
  698. // sanity check that the above iteration marked all entries as not pending
  699. for entry in jit_cache_array::iter() {
  700. if entry.wasm_table_index == wasm_table_index {
  701. assert!(!entry.pending);
  702. }
  703. }
  704. }
  705. }
  706. fn jit_generate_module(
  707. basic_blocks: &Vec<BasicBlock>,
  708. requires_loop_limit: bool,
  709. mut cpu: CpuContext,
  710. builder: &mut WasmBuilder,
  711. ) {
  712. builder.reset();
  713. let fn_get_seg_idx = builder.get_fn_idx("get_seg", module_init::FN1_RET_TYPE_INDEX);
  714. dbg_assert!(fn_get_seg_idx == FN_GET_SEG_IDX);
  715. let basic_block_indices: HashMap<u32, u32> = basic_blocks
  716. .iter()
  717. .enumerate()
  718. .map(|(index, block)| (block.addr, index as u32))
  719. .collect();
  720. // set state local variable to the initial state passed as the first argument
  721. wasm_util::get_local(&mut builder.instruction_body, GEN_LOCAL_ARG_INITIAL_STATE);
  722. wasm_util::set_local(&mut builder.instruction_body, GEN_LOCAL_STATE);
  723. // initialise max_iterations
  724. // TODO: Remove if not requires_loop_limit
  725. wasm_util::push_i32(
  726. &mut builder.instruction_body,
  727. JIT_MAX_ITERATIONS_PER_FUNCTION as i32,
  728. );
  729. wasm_util::set_local(&mut builder.instruction_body, GEN_LOCAL_ITERATION_COUNTER);
  730. // main state machine loop
  731. wasm_util::loop_void(&mut builder.instruction_body);
  732. if JIT_ALWAYS_USE_LOOP_SAFETY || requires_loop_limit {
  733. profiler::stat_increment(stat::S_COMPILE_WITH_LOOP_SAFETY);
  734. // decrement max_iterations
  735. wasm_util::get_local(&mut builder.instruction_body, GEN_LOCAL_ITERATION_COUNTER);
  736. wasm_util::push_i32(&mut builder.instruction_body, -1);
  737. wasm_util::add_i32(&mut builder.instruction_body);
  738. wasm_util::set_local(&mut builder.instruction_body, GEN_LOCAL_ITERATION_COUNTER);
  739. // if max_iterations == 0: return
  740. wasm_util::get_local(&mut builder.instruction_body, GEN_LOCAL_ITERATION_COUNTER);
  741. wasm_util::eqz_i32(&mut builder.instruction_body);
  742. wasm_util::if_void(&mut builder.instruction_body);
  743. wasm_util::return_(&mut builder.instruction_body);
  744. wasm_util::block_end(&mut builder.instruction_body);
  745. }
  746. wasm_util::block_void(&mut builder.instruction_body); // for the default case
  747. // generate the opening blocks for the cases
  748. for _ in 0..basic_blocks.len() {
  749. wasm_util::block_void(&mut builder.instruction_body);
  750. }
  751. wasm_util::get_local(&mut builder.instruction_body, GEN_LOCAL_STATE);
  752. wasm_util::brtable_and_cases(&mut builder.instruction_body, basic_blocks.len() as u32);
  753. for (i, block) in basic_blocks.iter().enumerate() {
  754. // Case [i] will jump after the [i]th block, so we first generate the
  755. // block end opcode and then the code for that block
  756. wasm_util::block_end(&mut builder.instruction_body);
  757. if block.addr == block.end_addr {
  758. // Empty basic block, generate no code (for example, jump to block
  759. // that is near end of page)
  760. dbg_assert!(block.ty == BasicBlockType::Exit);
  761. }
  762. else {
  763. builder.commit_instruction_body_to_cs();
  764. jit_generate_basic_block(&mut cpu, builder, block.addr, block.end_addr);
  765. builder.commit_instruction_body_to_cs();
  766. }
  767. let invalid_connection_to_next_block = block.end_addr != cpu.eip;
  768. match (&block.ty, invalid_connection_to_next_block) {
  769. (_, true) | (BasicBlockType::Exit, _) => {
  770. // Exit this function
  771. wasm_util::return_(&mut builder.instruction_body);
  772. },
  773. (BasicBlockType::Normal { next_block_addr }, _) => {
  774. // Unconditional jump to next basic block
  775. // - All instructions that don't change eip
  776. // - Unconditional jump
  777. let next_bb_index = *basic_block_indices.get(&next_block_addr).expect("");
  778. //dbg_assert!(next_bb_index != -1);
  779. // set state variable to next basic block
  780. wasm_util::push_i32(&mut builder.instruction_body, next_bb_index as i32);
  781. wasm_util::set_local(&mut builder.instruction_body, GEN_LOCAL_STATE);
  782. wasm_util::br(
  783. &mut builder.instruction_body,
  784. basic_blocks.len() as u32 - i as u32,
  785. ); // to the loop
  786. },
  787. (
  788. &BasicBlockType::ConditionalJump {
  789. next_block_addr,
  790. next_block_branch_taken_addr,
  791. condition,
  792. jump_offset,
  793. jump_offset_is_32,
  794. },
  795. _,
  796. ) => {
  797. // Conditional jump to next basic block
  798. // - jnz, jc, etc.
  799. dbg_assert!(condition < 16);
  800. let condition = CONDITION_FUNCTIONS[condition as usize];
  801. codegen::gen_fn0_const_ret(builder, condition);
  802. wasm_util::if_void(&mut builder.instruction_body);
  803. // Branch taken
  804. if jump_offset_is_32 {
  805. codegen::gen_relative_jump(builder, jump_offset);
  806. }
  807. else {
  808. // TODO: Is this necessary?
  809. let ctx = &mut JitContext {
  810. cpu: &mut cpu.clone(),
  811. builder,
  812. };
  813. codegen::gen_fn1_const(ctx, "jmp_rel16", jump_offset as u32);
  814. }
  815. if let Some(next_block_branch_taken_addr) = next_block_branch_taken_addr {
  816. let next_basic_block_branch_taken_index = *basic_block_indices
  817. .get(&next_block_branch_taken_addr)
  818. .expect("");
  819. wasm_util::push_i32(
  820. &mut builder.instruction_body,
  821. next_basic_block_branch_taken_index as i32,
  822. );
  823. wasm_util::set_local(&mut builder.instruction_body, GEN_LOCAL_STATE);
  824. }
  825. else {
  826. // Jump to different page
  827. wasm_util::return_(&mut builder.instruction_body);
  828. }
  829. wasm_util::else_(&mut builder.instruction_body);
  830. {
  831. // Branch not taken
  832. // TODO: Could use fall-through here
  833. let next_basic_block_index =
  834. *basic_block_indices.get(&next_block_addr).expect("");
  835. wasm_util::push_i32(
  836. &mut builder.instruction_body,
  837. next_basic_block_index as i32,
  838. );
  839. wasm_util::set_local(&mut builder.instruction_body, GEN_LOCAL_STATE);
  840. }
  841. wasm_util::block_end(&mut builder.instruction_body);
  842. wasm_util::br(
  843. &mut builder.instruction_body,
  844. basic_blocks.len() as u32 - i as u32,
  845. ); // to the loop
  846. },
  847. }
  848. }
  849. wasm_util::block_end(&mut builder.instruction_body); // default case
  850. wasm_util::unreachable(&mut builder.instruction_body);
  851. wasm_util::block_end(&mut builder.instruction_body); // loop
  852. builder.commit_instruction_body_to_cs();
  853. builder.finish(GEN_NO_OF_LOCALS as u8);
  854. }
  855. fn jit_generate_basic_block(
  856. mut cpu: &mut CpuContext,
  857. builder: &mut WasmBuilder,
  858. start_addr: u32,
  859. stop_addr: u32,
  860. ) {
  861. let mut len = 0;
  862. let mut end_addr;
  863. let mut was_block_boundary;
  864. let mut eip_delta = 0;
  865. //*instruction_pointer = start_addr;
  866. // First iteration of do-while assumes the caller confirms this condition
  867. dbg_assert!(!is_near_end_of_page(start_addr));
  868. cpu.eip = start_addr;
  869. loop {
  870. if false {
  871. ::opstats::gen_opstats(builder, cpu::read32(cpu.eip));
  872. }
  873. let start_eip = cpu.eip;
  874. let mut instruction_flags = 0;
  875. jit_instructions::jit_instruction(&mut cpu, builder, &mut instruction_flags);
  876. let end_eip = cpu.eip;
  877. let instruction_length = end_eip - start_eip;
  878. was_block_boundary = instruction_flags & JIT_INSTR_BLOCK_BOUNDARY_FLAG != 0;
  879. dbg_assert!(instruction_length < MAX_INSTRUCTION_LENGTH);
  880. if ENABLE_JIT_NONFAULTING_OPTIMZATION {
  881. // There are a few conditions to keep in mind to optimize the update of previous_ip and
  882. // instruction_pointer:
  883. // - previous_ip MUST be updated just before a faulting instruction
  884. // - instruction_pointer MUST be updated before jump instructions (since they use the EIP
  885. // value for instruction logic)
  886. // - Nonfaulting instructions don't need either to be updated
  887. if was_block_boundary {
  888. // prev_ip = eip + eip_delta, so that previous_ip points to the start of this instruction
  889. codegen::gen_set_previous_eip_offset_from_eip(builder, eip_delta);
  890. // eip += eip_delta + len(jump) so instruction logic uses the correct eip
  891. codegen::gen_increment_instruction_pointer(builder, eip_delta + instruction_length);
  892. builder.commit_instruction_body_to_cs();
  893. eip_delta = 0;
  894. }
  895. else if instruction_flags & JIT_INSTR_NONFAULTING_FLAG == 0 {
  896. // Faulting instruction
  897. // prev_ip = eip + eip_delta, so that previous_ip points to the start of this instruction
  898. codegen::gen_set_previous_eip_offset_from_eip(builder, eip_delta);
  899. builder.commit_instruction_body_to_cs();
  900. // Leave this instruction's length to be updated in the next batch, whatever it may be
  901. eip_delta += instruction_length;
  902. }
  903. else {
  904. // Non-faulting, so we skip setting previous_ip and simply queue the instruction length
  905. // for whenever eip is updated next
  906. profiler::stat_increment(stat::S_NONFAULTING_OPTIMIZATION);
  907. eip_delta += instruction_length;
  908. }
  909. }
  910. else {
  911. codegen::gen_set_previous_eip(builder);
  912. codegen::gen_increment_instruction_pointer(builder, instruction_length);
  913. builder.commit_instruction_body_to_cs();
  914. }
  915. end_addr = cpu.eip;
  916. len += 1;
  917. if end_addr == stop_addr {
  918. break;
  919. }
  920. if was_block_boundary || is_near_end_of_page(end_addr) || end_addr > stop_addr {
  921. dbg_log!("Overlapping basic blocks start={:x} expected_end={:x} end={:x} was_block_boundary={} near_end_of_page={}",
  922. start_addr, stop_addr, end_addr, was_block_boundary, is_near_end_of_page(end_addr));
  923. break;
  924. }
  925. }
  926. if ENABLE_JIT_NONFAULTING_OPTIMZATION {
  927. // When the block ends in a non-jump instruction, we may have uncommitted updates still
  928. if eip_delta > 0 {
  929. builder.commit_instruction_body_to_cs();
  930. codegen::gen_increment_instruction_pointer(builder, eip_delta);
  931. }
  932. }
  933. codegen::gen_increment_timestamp_counter(builder, len);
  934. // no page was crossed
  935. dbg_assert!(Page::page_of(end_addr) == Page::page_of(start_addr));
  936. }
  937. pub fn jit_increase_hotness_and_maybe_compile(
  938. ctx: &mut JitState,
  939. phys_address: u32,
  940. cs_offset: u32,
  941. state_flags: CachedStateFlags,
  942. ) {
  943. record_entry_point(ctx, phys_address);
  944. let page = Page::page_of(phys_address);
  945. let address_hash = jit_hot_hash_page(page) as usize;
  946. ctx.hot_code_addresses[address_hash] += 1;
  947. if ctx.hot_code_addresses[address_hash] >= JIT_THRESHOLD {
  948. ctx.hot_code_addresses[address_hash] = 0;
  949. jit_analyze_and_generate(ctx, page, cs_offset, state_flags)
  950. };
  951. }
  952. fn free_wasm_table_index(ctx: &mut JitState, wasm_table_index: u16) {
  953. if CHECK_JIT_CACHE_ARRAY_INVARIANTS {
  954. dbg_assert!(!ctx.wasm_table_index_free_list.contains(&wasm_table_index));
  955. }
  956. ctx.wasm_table_index_free_list.push(wasm_table_index)
  957. // It is not strictly necessary to clear the function, but it will fail
  958. // more predictably if we accidentally use the function
  959. // XXX: This fails in Chromium:
  960. // RangeError: WebAssembly.Table.set(): Modifying existing entry in table not supported.
  961. //jit_clear_func(wasm_table_index);
  962. }
  963. /// Remove all entries with the given wasm_table_index in page
  964. fn remove_jit_cache_wasm_index(ctx: &mut JitState, page: Page, wasm_table_index: u16) {
  965. let mut cache_array_index = jit_cache_array::get_page_index(page).unwrap();
  966. let mut pending = false;
  967. loop {
  968. let entry = jit_cache_array::get_mut(cache_array_index);
  969. let next_cache_array_index = entry.next_index_same_page();
  970. if entry.wasm_table_index == wasm_table_index {
  971. // if one entry is pending, all must be pending
  972. dbg_assert!(!pending || entry.pending);
  973. pending = entry.pending;
  974. jit_cache_array::remove(cache_array_index);
  975. dbg_assert!(entry.next_index_same_page() == None);
  976. entry.wasm_table_index = 0;
  977. entry.start_addr = 0;
  978. entry.pending = false;
  979. }
  980. if let Some(i) = next_cache_array_index {
  981. cache_array_index = i;
  982. }
  983. else {
  984. break;
  985. }
  986. }
  987. if pending {
  988. ctx.wasm_table_index_pending_free.push(wasm_table_index);
  989. }
  990. else {
  991. free_wasm_table_index(ctx, wasm_table_index);
  992. }
  993. if !jit_page_has_code(ctx, page) {
  994. cpu::tlb_set_has_code(page, false);
  995. }
  996. if CHECK_JIT_CACHE_ARRAY_INVARIANTS {
  997. // sanity check that the above iteration deleted all entries
  998. for entry in jit_cache_array::iter() {
  999. dbg_assert!(entry.wasm_table_index != wasm_table_index);
  1000. }
  1001. }
  1002. }
  1003. /// Register a write in this page: Delete all present code
  1004. pub fn jit_dirty_page(ctx: &mut JitState, page: Page) {
  1005. let mut did_have_code = false;
  1006. if let Some(mut cache_array_index) = jit_cache_array::get_page_index(page) {
  1007. did_have_code = true;
  1008. let mut index_to_free = HashSet::new();
  1009. let mut index_to_pending_free = HashSet::new();
  1010. jit_cache_array::set_page_index(page, None);
  1011. profiler::stat_increment(stat::S_INVALIDATE_PAGE);
  1012. loop {
  1013. profiler::stat_increment(stat::S_INVALIDATE_CACHE_ENTRY);
  1014. let entry = jit_cache_array::get_mut(cache_array_index);
  1015. let wasm_table_index = entry.wasm_table_index;
  1016. assert!(page == Page::page_of(entry.start_addr));
  1017. let next_cache_array_index = entry.next_index_same_page();
  1018. entry.set_next_index_same_page(None);
  1019. entry.start_addr = 0;
  1020. entry.wasm_table_index = 0;
  1021. if entry.pending {
  1022. entry.pending = false;
  1023. index_to_pending_free.insert(wasm_table_index);
  1024. }
  1025. else {
  1026. index_to_free.insert(wasm_table_index);
  1027. }
  1028. if let Some(i) = next_cache_array_index {
  1029. cache_array_index = i;
  1030. }
  1031. else {
  1032. break;
  1033. }
  1034. }
  1035. for index in index_to_free.iter().cloned() {
  1036. free_wasm_table_index(ctx, index)
  1037. }
  1038. for index in index_to_pending_free {
  1039. ctx.wasm_table_index_pending_free.push(index);
  1040. }
  1041. }
  1042. match ctx.entry_points.remove(&page) {
  1043. None => {},
  1044. Some(_entry_points) => {
  1045. did_have_code = true;
  1046. // don't try to compile code in this page anymore until it's hot again
  1047. ctx.hot_code_addresses[jit_hot_hash_page(page) as usize] = 0;
  1048. },
  1049. }
  1050. if did_have_code {
  1051. cpu::tlb_set_has_code(page, false);
  1052. }
  1053. }
  1054. pub fn jit_dirty_cache(ctx: &mut JitState, start_addr: u32, end_addr: u32) {
  1055. assert!(start_addr < end_addr);
  1056. let start_page = Page::page_of(start_addr);
  1057. let end_page = Page::page_of(end_addr - 1);
  1058. for page in start_page.to_u32()..end_page.to_u32() + 1 {
  1059. jit_dirty_page(ctx, Page::page_of(page << 12));
  1060. }
  1061. }
  1062. pub fn jit_dirty_cache_small(ctx: &mut JitState, start_addr: u32, end_addr: u32) {
  1063. assert!(start_addr < end_addr);
  1064. let start_page = Page::page_of(start_addr);
  1065. let end_page = Page::page_of(end_addr - 1);
  1066. jit_dirty_page(ctx, start_page);
  1067. // Note: This can't happen when paging is enabled, as writes across
  1068. // boundaries are split up on two pages
  1069. if start_page != end_page {
  1070. assert!(start_page.to_u32() + 1 == end_page.to_u32());
  1071. jit_dirty_page(ctx, end_page);
  1072. }
  1073. }
  1074. pub fn jit_dirty_cache_single(ctx: &mut JitState, addr: u32) {
  1075. jit_dirty_page(ctx, Page::page_of(addr));
  1076. }
  1077. pub fn jit_empty_cache(ctx: &mut JitState) {
  1078. jit_cache_array::clear();
  1079. ctx.entry_points.clear();
  1080. ctx.wasm_table_index_pending_free.clear();
  1081. ctx.wasm_table_index_free_list.clear();
  1082. for i in 0..0xFFFF {
  1083. // don't assign 0 (XXX: Check)
  1084. ctx.wasm_table_index_free_list.push(i as u16 + 1);
  1085. }
  1086. }
  1087. pub fn jit_page_has_code(ctx: &JitState, page: Page) -> bool {
  1088. (jit_cache_array::get_page_index(page) != None || ctx.entry_points.contains_key(&page))
  1089. }
  1090. #[cfg(debug_assertions)]
  1091. pub fn jit_unused_cache_stat() -> u32 {
  1092. jit_cache_array::iter()
  1093. .filter(|e| e.start_addr == 0)
  1094. .count() as u32
  1095. }
  1096. #[cfg(debug_assertions)]
  1097. pub fn jit_get_entry_length(i: u32) -> u32 { jit_cache_array::get(i).len }
  1098. #[cfg(debug_assertions)]
  1099. pub fn jit_get_entry_address(i: u32) -> u32 { jit_cache_array::get(i).start_addr }
  1100. #[cfg(debug_assertions)]
  1101. pub fn jit_get_entry_pending(i: u32) -> bool { jit_cache_array::get(i).pending }
  1102. #[cfg(debug_assertions)]
  1103. pub fn jit_get_wasm_table_index_free_list_count(ctx: &JitState) -> u32 {
  1104. ctx.wasm_table_index_free_list.len() as u32
  1105. }
  1106. pub fn jit_get_op_len(ctx: &JitState) -> u32 { ctx.wasm_builder.get_op_len() }
  1107. pub fn jit_get_op_ptr(ctx: &JitState) -> *const u8 { ctx.wasm_builder.get_op_ptr() }