memory.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. #include <assert.h>
  2. #include <stdbool.h>
  3. #include <stdint.h>
  4. #include <stdlib.h>
  5. #include "const.h"
  6. #include "global_pointers.h"
  7. #include "js_imports.h"
  8. #include "log.h"
  9. #include "memory.h"
  10. #include "profiler/profiler.h"
  11. #include "rust_imports.h"
  12. bool in_mapped_range(uint32_t addr)
  13. {
  14. return (addr >= 0xA0000 && addr < 0xC0000) || addr >= *memory_size;
  15. }
  16. int32_t read8(uint32_t addr)
  17. {
  18. if(USE_A20 && *a20_enabled) addr &= A20_MASK;
  19. if(in_mapped_range(addr))
  20. {
  21. return mmap_read8(addr);
  22. }
  23. else
  24. {
  25. return mem8[addr];
  26. }
  27. }
  28. int32_t read16(uint32_t addr)
  29. {
  30. if(USE_A20 && !*a20_enabled) addr &= A20_MASK;
  31. if(in_mapped_range(addr))
  32. {
  33. return mmap_read16(addr);
  34. }
  35. else
  36. {
  37. return *(uint16_t*)(mem8 + addr);
  38. }
  39. }
  40. int32_t read_aligned16(uint32_t addr)
  41. {
  42. dbg_assert(addr < 0x80000000);
  43. if(USE_A20 && !*a20_enabled) addr &= A20_MASK16;
  44. if(in_mapped_range(addr << 1))
  45. {
  46. return mmap_read16(addr << 1);
  47. }
  48. else
  49. {
  50. return mem16[addr];
  51. }
  52. }
  53. int32_t read32s(uint32_t addr)
  54. {
  55. if(USE_A20 && *a20_enabled) addr &= A20_MASK;
  56. if(in_mapped_range(addr))
  57. {
  58. return mmap_read32(addr);
  59. }
  60. else
  61. {
  62. return *(int32_t*)(mem8 + addr);
  63. }
  64. }
  65. int64_t read64s(uint32_t addr)
  66. {
  67. if(USE_A20 && *a20_enabled) addr &= A20_MASK;
  68. if(in_mapped_range(addr))
  69. {
  70. return (int64_t)mmap_read32(addr) | (int64_t)mmap_read32(addr + 4) << 32;
  71. }
  72. else
  73. {
  74. return *(int64_t*)(mem8 + addr);
  75. }
  76. }
  77. int32_t read_aligned32(uint32_t addr)
  78. {
  79. dbg_assert(addr < 0x40000000);
  80. if(USE_A20 && !*a20_enabled) addr &= A20_MASK32;
  81. if(in_mapped_range(addr << 2))
  82. {
  83. return mmap_read32(addr << 2);
  84. }
  85. else
  86. {
  87. return mem32s[addr];
  88. }
  89. }
  90. union reg128 read128(uint32_t addr)
  91. {
  92. if(USE_A20 && *a20_enabled) addr &= A20_MASK;
  93. union reg128 value = { { 0 } };
  94. if(in_mapped_range(addr))
  95. {
  96. value.i32[0] = mmap_read32(addr);
  97. value.i32[1] = mmap_read32(addr + 4);
  98. value.i32[2] = mmap_read32(addr + 8);
  99. value.i32[3] = mmap_read32(addr + 12);
  100. }
  101. else
  102. {
  103. value.i64[0] = *(int64_t*)(mem8 + addr);
  104. value.i64[1] = *(int64_t*)(mem8 + addr + 8);
  105. }
  106. return value;
  107. }
  108. void write8(uint32_t addr, int32_t value)
  109. {
  110. if(USE_A20 && !*a20_enabled) addr &= A20_MASK;
  111. if(in_mapped_range(addr))
  112. {
  113. mmap_write8(addr, value);
  114. }
  115. else
  116. {
  117. jit_dirty_cache_single(addr);
  118. mem8[addr] = value;
  119. }
  120. }
  121. void write16(uint32_t addr, int32_t value)
  122. {
  123. if(USE_A20 && !*a20_enabled) addr &= A20_MASK;
  124. if(in_mapped_range(addr))
  125. {
  126. mmap_write16(addr, value);
  127. }
  128. else
  129. {
  130. jit_dirty_cache_small(addr, addr + 2);
  131. *(uint16_t*)(mem8 + addr) = value;
  132. }
  133. }
  134. void write_aligned16(uint32_t addr, uint32_t value)
  135. {
  136. dbg_assert(addr < 0x80000000);
  137. if(USE_A20 && !*a20_enabled) addr &= A20_MASK16;
  138. uint32_t phys_addr = addr << 1;
  139. if(in_mapped_range(phys_addr))
  140. {
  141. mmap_write16(phys_addr, value);
  142. }
  143. else
  144. {
  145. jit_dirty_cache_small(phys_addr, phys_addr + 2);
  146. mem16[addr] = value;
  147. }
  148. }
  149. void write32(uint32_t addr, int32_t value)
  150. {
  151. if(USE_A20 && !*a20_enabled) addr &= A20_MASK;
  152. if(in_mapped_range(addr))
  153. {
  154. mmap_write32(addr, value);
  155. }
  156. else
  157. {
  158. jit_dirty_cache_small(addr, addr + 4);
  159. *(int32_t*)(mem8 + addr) = value;
  160. }
  161. }
  162. void write_aligned32(uint32_t addr, int32_t value)
  163. {
  164. dbg_assert(addr < 0x40000000);
  165. if(USE_A20 && !*a20_enabled) addr &= A20_MASK32;
  166. uint32_t phys_addr = addr << 2;
  167. if(in_mapped_range(phys_addr))
  168. {
  169. mmap_write32(phys_addr, value);
  170. }
  171. else
  172. {
  173. jit_dirty_cache_small(phys_addr, phys_addr + 4);
  174. mem32s[addr] = value;
  175. }
  176. }
  177. void write64(uint32_t addr, int64_t value)
  178. {
  179. if(USE_A20 && !*a20_enabled) addr &= A20_MASK;
  180. if(in_mapped_range(addr))
  181. {
  182. mmap_write32(addr + 0, value & 0xFFFFFFFF);
  183. mmap_write32(addr + 4, value >> 32);
  184. }
  185. else
  186. {
  187. jit_dirty_cache_small(addr, addr + 8);
  188. *(int64_t*)(mem8 + addr) = value;
  189. }
  190. }
  191. void write128(uint32_t addr, union reg128 value)
  192. {
  193. if(USE_A20 && !*a20_enabled) addr &= A20_MASK;
  194. if(in_mapped_range(addr))
  195. {
  196. mmap_write128(addr, value.i32[0], value.i32[1], value.i32[2], value.i32[3]);
  197. }
  198. else
  199. {
  200. jit_dirty_cache_small(addr, addr + 16);
  201. *(int64_t*)(mem8 + addr) = value.i64[0];
  202. *(int64_t*)(mem8 + addr + 8) = value.i64[1];
  203. }
  204. }