jit.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. #include <stdint.h>
  2. #include "const.h"
  3. #include "cpu.h"
  4. #include "global_pointers.h"
  5. #include "jit.h"
  6. #include "js_imports.h"
  7. #include "log.h"
  8. #include "profiler/profiler.h"
  9. void free_wasm_table_index(uint16_t wasm_table_index)
  10. {
  11. #if DEBUG
  12. for(int32_t i = 0; i < wasm_table_index_free_list_count; i++)
  13. {
  14. assert(wasm_table_index_free_list[i] != wasm_table_index);
  15. }
  16. #endif
  17. assert(wasm_table_index_free_list_count < WASM_TABLE_SIZE);
  18. wasm_table_index_free_list[wasm_table_index_free_list_count++] = wasm_table_index;
  19. // It is not strictly necessary to clear the function, but it will fail
  20. // more predictably if we accidentally use the function
  21. // XXX: This fails in Chromium:
  22. // RangeError: WebAssembly.Table.set(): Modifying existing entry in table not supported.
  23. //jit_clear_func(wasm_table_index);
  24. }
  25. // remove the entry with the given index from the jit_cache_arr structure
  26. void remove_jit_cache_entry(uint32_t page, int32_t addr_index)
  27. {
  28. assert(addr_index != JIT_CACHE_ARRAY_NO_NEXT_ENTRY);
  29. assert(page == (jit_cache_arr[addr_index].start_addr >> 12));
  30. int32_t page_index = page_first_jit_cache_entry[page];
  31. bool did_remove = false;
  32. if(page_index == addr_index)
  33. {
  34. page_first_jit_cache_entry[page] = jit_cache_arr[addr_index].next_index_same_page;
  35. did_remove = true;
  36. }
  37. else
  38. {
  39. while(page_index != JIT_CACHE_ARRAY_NO_NEXT_ENTRY)
  40. {
  41. int32_t next_index = jit_cache_arr[page_index].next_index_same_page;
  42. if(next_index == addr_index)
  43. {
  44. jit_cache_arr[page_index].next_index_same_page = jit_cache_arr[addr_index].next_index_same_page;
  45. did_remove = true;
  46. break;
  47. }
  48. page_index = next_index;
  49. }
  50. }
  51. jit_cache_arr[addr_index].next_index_same_page = JIT_CACHE_ARRAY_NO_NEXT_ENTRY;
  52. assert(did_remove);
  53. }
  54. // remove all entries with the given wasm_table_index from the jit_cache_arr structure
  55. void remove_jit_cache_wasm_index(int32_t page, uint16_t wasm_table_index)
  56. {
  57. int32_t cache_array_index = page_first_jit_cache_entry[page];
  58. assert(cache_array_index != JIT_CACHE_ARRAY_NO_NEXT_ENTRY);
  59. bool pending = false;
  60. do
  61. {
  62. struct code_cache* entry = &jit_cache_arr[cache_array_index];
  63. int32_t next_cache_array_index = entry->next_index_same_page;
  64. if(entry->wasm_table_index == wasm_table_index)
  65. {
  66. // if one entry is pending, all must be pending
  67. dbg_assert(!pending || entry->pending);
  68. pending = entry->pending;
  69. remove_jit_cache_entry(page, cache_array_index);
  70. assert(entry->next_index_same_page == JIT_CACHE_ARRAY_NO_NEXT_ENTRY);
  71. entry->wasm_table_index = 0;
  72. entry->start_addr = 0;
  73. entry->pending = false;
  74. }
  75. cache_array_index = next_cache_array_index;
  76. }
  77. while(cache_array_index != JIT_CACHE_ARRAY_NO_NEXT_ENTRY);
  78. if(pending)
  79. {
  80. assert(wasm_table_index_pending_free_count < WASM_TABLE_SIZE);
  81. wasm_table_index_pending_free[wasm_table_index_pending_free_count++] = wasm_table_index;
  82. }
  83. else
  84. {
  85. free_wasm_table_index(wasm_table_index);
  86. }
  87. if(page_first_jit_cache_entry[page] == JIT_CACHE_ARRAY_NO_NEXT_ENTRY &&
  88. page_entry_points[page][0] == ENTRY_POINT_END)
  89. {
  90. tlb_set_has_code(page, false);
  91. }
  92. #if CHECK_JIT_CACHE_ARRAY_INVARIANTS
  93. // sanity check that the above iteration deleted all entries
  94. for(int32_t i = 0; i < JIT_CACHE_ARRAY_SIZE; i++)
  95. {
  96. struct code_cache* entry = &jit_cache_arr[i];
  97. assert(entry->wasm_table_index != wasm_table_index);
  98. }
  99. #endif
  100. }
  101. bool find_u16(const uint16_t* array, uint16_t value, int32_t length)
  102. {
  103. for(int32_t i = 0; i < length; i++)
  104. {
  105. if(array[i] == value)
  106. {
  107. return true;
  108. }
  109. }
  110. return false;
  111. }
  112. __attribute__((noinline))
  113. void jit_clear_page(uint32_t index)
  114. {
  115. assert(index < MAX_PHYSICAL_PAGES);
  116. int32_t cache_array_index = page_first_jit_cache_entry[index];
  117. assert(cache_array_index != JIT_CACHE_ARRAY_NO_NEXT_ENTRY);
  118. uint16_t index_to_free[100];
  119. int32_t index_to_free_length = 0;
  120. uint16_t index_to_pending_free[100];
  121. int32_t index_to_pending_free_length = 0;
  122. page_first_jit_cache_entry[index] = JIT_CACHE_ARRAY_NO_NEXT_ENTRY;
  123. profiler_stat_increment(S_INVALIDATE_PAGE);
  124. do
  125. {
  126. profiler_stat_increment(S_INVALIDATE_CACHE_ENTRY);
  127. struct code_cache* entry = &jit_cache_arr[cache_array_index];
  128. uint16_t wasm_table_index = entry->wasm_table_index;
  129. assert(same_page(index << DIRTY_ARR_SHIFT, entry->start_addr));
  130. int32_t next_cache_array_index = entry->next_index_same_page;
  131. entry->next_index_same_page = JIT_CACHE_ARRAY_NO_NEXT_ENTRY;
  132. entry->start_addr = 0;
  133. entry->wasm_table_index = 0;
  134. if(entry->pending)
  135. {
  136. entry->pending = false;
  137. if(!find_u16(index_to_pending_free, wasm_table_index, index_to_pending_free_length))
  138. {
  139. assert(index_to_pending_free_length < 100);
  140. index_to_pending_free[index_to_pending_free_length++] = wasm_table_index;
  141. }
  142. }
  143. else
  144. {
  145. if(!find_u16(index_to_free, wasm_table_index, index_to_free_length))
  146. {
  147. assert(index_to_free_length < 100);
  148. index_to_free[index_to_free_length++] = wasm_table_index;
  149. }
  150. }
  151. cache_array_index = next_cache_array_index;
  152. }
  153. while(cache_array_index != JIT_CACHE_ARRAY_NO_NEXT_ENTRY);
  154. for(int32_t i = 0; i < index_to_free_length; i++)
  155. {
  156. free_wasm_table_index(index_to_free[i]);
  157. }
  158. for(int32_t i = 0; i < index_to_pending_free_length; i++)
  159. {
  160. uint16_t wasm_table_index = index_to_pending_free[i];
  161. assert(wasm_table_index_pending_free_count < WASM_TABLE_SIZE);
  162. wasm_table_index_pending_free[wasm_table_index_pending_free_count++] = wasm_table_index;
  163. }
  164. }
  165. void jit_dirty_index(uint32_t index)
  166. {
  167. assert(index < MAX_PHYSICAL_PAGES);
  168. bool did_have_code = false;
  169. int32_t cache_array_index = page_first_jit_cache_entry[index];
  170. if(cache_array_index != JIT_CACHE_ARRAY_NO_NEXT_ENTRY)
  171. {
  172. did_have_code = true;
  173. jit_clear_page(index);
  174. }
  175. uint16_t* entry_points = page_entry_points[index];
  176. if(entry_points[0] != ENTRY_POINT_END)
  177. {
  178. did_have_code = true;
  179. // don't try to compile code in this page anymore until it's hot again
  180. hot_code_addresses[jit_hot_hash_page(index)] = 0;
  181. for(int32_t i = 0; i < MAX_ENTRIES_PER_PAGE; i++)
  182. {
  183. if(entry_points[i] == ENTRY_POINT_END)
  184. {
  185. break;
  186. }
  187. entry_points[i] = ENTRY_POINT_END;
  188. }
  189. #if DEBUG
  190. for(int32_t i = 0; i < MAX_ENTRIES_PER_PAGE; i++)
  191. {
  192. assert(entry_points[i] == ENTRY_POINT_END);
  193. }
  194. #endif
  195. }
  196. if(did_have_code)
  197. {
  198. tlb_set_has_code(index, false);
  199. }
  200. }
  201. /*
  202. * There are 3 primary ways a cached basic block will be dirtied:
  203. * 1. A write dirties basic block A independently (A is clean and
  204. * write came from outside A)
  205. * 2. A write from within basic block A dirties itself
  206. * 3. A run_instruction during compilation dirties itself
  207. * #3 won't happen with generate_instruction so we don't
  208. * account for it
  209. */
  210. void jit_dirty_cache(uint32_t start_addr, uint32_t end_addr)
  211. {
  212. #if ENABLE_JIT
  213. assert(start_addr <= end_addr);
  214. for(uint32_t i = start_addr; i < end_addr; i++)
  215. {
  216. uint32_t index = i >> DIRTY_ARR_SHIFT;
  217. // XXX: Should only call once per index
  218. jit_dirty_index(index);
  219. }
  220. #endif
  221. }
  222. void jit_dirty_cache_small(uint32_t start_addr, uint32_t end_addr)
  223. {
  224. #if ENABLE_JIT
  225. assert(start_addr <= end_addr);
  226. uint32_t start_index = start_addr >> DIRTY_ARR_SHIFT;
  227. uint32_t end_index = (end_addr - 1) >> DIRTY_ARR_SHIFT;
  228. jit_dirty_index(start_index);
  229. // Note: This can't happen when paging is enabled, as writes across
  230. // boundaries are split up on two pages
  231. if(start_index != end_index)
  232. {
  233. assert(end_index == start_index + 1);
  234. jit_dirty_index(end_index);
  235. }
  236. #endif
  237. }
  238. void jit_dirty_cache_single(uint32_t addr)
  239. {
  240. #if ENABLE_JIT
  241. uint32_t index = addr >> DIRTY_ARR_SHIFT;
  242. jit_dirty_index(index);
  243. #endif
  244. }
  245. void jit_empty_cache()
  246. {
  247. for(int32_t i = 0; i < JIT_CACHE_ARRAY_SIZE; i++)
  248. {
  249. jit_cache_arr[i].start_addr = 0;
  250. jit_cache_arr[i].next_index_same_page = JIT_CACHE_ARRAY_NO_NEXT_ENTRY;
  251. jit_cache_arr[i].wasm_table_index = 0;
  252. jit_cache_arr[i].pending = false;
  253. }
  254. for(int32_t i = 0; i < GROUP_DIRTINESS_LENGTH; i++)
  255. {
  256. page_first_jit_cache_entry[i] = JIT_CACHE_ARRAY_NO_NEXT_ENTRY;
  257. }
  258. for(int32_t i = 0; i < MAX_PHYSICAL_PAGES; i++)
  259. {
  260. uint16_t* entry_points = page_entry_points[i];
  261. for(int32_t j = 0; j < MAX_ENTRIES_PER_PAGE; j++)
  262. {
  263. entry_points[j] = ENTRY_POINT_END;
  264. }
  265. }
  266. for(int32_t i = 0; i < 0xFFFF; i++)
  267. {
  268. // don't assign 0 (XXX: Check)
  269. wasm_table_index_free_list[i] = i + 1;
  270. }
  271. wasm_table_index_free_list_count = 0xFFFF;
  272. }
  273. int32_t jit_unused_cache_stat()
  274. {
  275. int32_t count = 0;
  276. for(int32_t i = 0; i < JIT_CACHE_ARRAY_SIZE; i++)
  277. {
  278. struct code_cache* entry = &jit_cache_arr[i];
  279. int32_t phys_addr = entry->start_addr;
  280. if(phys_addr == 0)
  281. {
  282. count++;
  283. }
  284. }
  285. return count;
  286. }
  287. int32_t jit_get_entry_length(int32_t i)
  288. {
  289. assert(i >= 0 && i < JIT_CACHE_ARRAY_SIZE);
  290. #if DEBUG
  291. return jit_cache_arr[i].len;
  292. #else
  293. UNUSED(i);
  294. return 0;
  295. #endif
  296. }
  297. int32_t jit_get_entry_address(int32_t i)
  298. {
  299. assert(i >= 0 && i < JIT_CACHE_ARRAY_SIZE);
  300. return jit_cache_arr[i].start_addr;
  301. }
  302. int32_t jit_get_entry_pending(int32_t i)
  303. {
  304. assert(i >= 0 && i < JIT_CACHE_ARRAY_SIZE);
  305. return jit_cache_arr[i].pending;
  306. }