memory.rs 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. extern "C" {
  2. #[no_mangle]
  3. fn mmap_read8(addr: u32) -> i32;
  4. #[no_mangle]
  5. fn mmap_read16(addr: u32) -> i32;
  6. #[no_mangle]
  7. fn mmap_read32(addr: u32) -> i32;
  8. #[no_mangle]
  9. pub fn mmap_write8(addr: u32, value: i32);
  10. #[no_mangle]
  11. pub fn mmap_write16(addr: u32, value: i32);
  12. #[no_mangle]
  13. pub fn mmap_write32(addr: u32, value: i32);
  14. #[no_mangle]
  15. pub fn mmap_write64(addr: u32, v0: i32, v1: i32);
  16. #[no_mangle]
  17. pub fn mmap_write128(addr: u32, v0: i32, v1: i32, v2: i32, v3: i32);
  18. }
  19. use cpu::cpu::{mem8, reg128};
  20. use cpu::global_pointers::memory_size;
  21. use page::Page;
  22. use std::ptr;
  23. #[no_mangle]
  24. pub fn in_mapped_range(addr: u32) -> bool {
  25. return addr >= 0xA0000 && addr < 0xC0000 || addr >= unsafe { *memory_size };
  26. }
  27. #[no_mangle]
  28. pub fn read8(addr: u32) -> i32 {
  29. if in_mapped_range(addr) {
  30. return unsafe { mmap_read8(addr) };
  31. }
  32. else {
  33. return read8_no_mmap_check(addr);
  34. };
  35. }
  36. pub fn read8_no_mmap_check(addr: u32) -> i32 { unsafe { *mem8.offset(addr as isize) as i32 } }
  37. #[no_mangle]
  38. pub fn read16(addr: u32) -> i32 {
  39. if in_mapped_range(addr) {
  40. return unsafe { mmap_read16(addr) };
  41. }
  42. else {
  43. return read16_no_mmap_check(addr);
  44. };
  45. }
  46. pub fn read16_no_mmap_check(addr: u32) -> i32 {
  47. unsafe { *(mem8.offset(addr as isize) as *mut u16) as i32 }
  48. }
  49. #[no_mangle]
  50. pub fn read32s(addr: u32) -> i32 {
  51. if in_mapped_range(addr) {
  52. return unsafe { mmap_read32(addr) };
  53. }
  54. else {
  55. return read32_no_mmap_check(addr);
  56. };
  57. }
  58. pub fn read32_no_mmap_check(addr: u32) -> i32 {
  59. unsafe { *(mem8.offset(addr as isize) as *mut i32) }
  60. }
  61. #[no_mangle]
  62. pub unsafe fn read64s(addr: u32) -> i64 {
  63. if in_mapped_range(addr) {
  64. return mmap_read32(addr) as i64 | (mmap_read32(addr.wrapping_add(4 as u32)) as i64) << 32;
  65. }
  66. else {
  67. return *(mem8.offset(addr as isize) as *mut i64);
  68. };
  69. }
  70. #[no_mangle]
  71. pub unsafe fn read_aligned32(addr: u32) -> i32 {
  72. dbg_assert!(addr < 0x40000000 as u32);
  73. if in_mapped_range(addr << 2) {
  74. return mmap_read32(addr << 2);
  75. }
  76. else {
  77. return *(mem8 as *mut i32).offset(addr as isize);
  78. };
  79. }
  80. #[no_mangle]
  81. pub unsafe fn read128(addr: u32) -> reg128 {
  82. let mut value: reg128 = reg128 {
  83. i8_0: [0 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
  84. };
  85. if in_mapped_range(addr) {
  86. value.i32_0[0] = mmap_read32(addr);
  87. value.i32_0[1] = mmap_read32(addr.wrapping_add(4 as u32));
  88. value.i32_0[2] = mmap_read32(addr.wrapping_add(8 as u32));
  89. value.i32_0[3] = mmap_read32(addr.wrapping_add(12 as u32))
  90. }
  91. else {
  92. value.i64_0[0] = *(mem8.offset(addr as isize) as *mut i64);
  93. value.i64_0[1] = *(mem8.offset(addr as isize).offset(8) as *mut i64)
  94. }
  95. return value;
  96. }
  97. #[no_mangle]
  98. pub unsafe fn write8(addr: u32, value: i32) {
  99. if in_mapped_range(addr) {
  100. mmap_write8(addr, value);
  101. }
  102. else {
  103. ::jit::jit_dirty_page(::jit::get_jit_state(), Page::page_of(addr));
  104. write8_no_mmap_or_dirty_check(addr, value);
  105. };
  106. }
  107. pub unsafe fn write8_no_mmap_or_dirty_check(addr: u32, value: i32) {
  108. *mem8.offset(addr as isize) = value as u8
  109. }
  110. #[no_mangle]
  111. pub unsafe fn write16(addr: u32, value: i32) {
  112. if in_mapped_range(addr) {
  113. mmap_write16(addr, value);
  114. }
  115. else {
  116. ::jit::jit_dirty_cache_small(addr, addr.wrapping_add(2 as u32));
  117. write16_no_mmap_or_dirty_check(addr, value);
  118. };
  119. }
  120. pub unsafe fn write16_no_mmap_or_dirty_check(addr: u32, value: i32) {
  121. *(mem8.offset(addr as isize) as *mut u16) = value as u16
  122. }
  123. #[no_mangle]
  124. pub unsafe fn write32(addr: u32, value: i32) {
  125. if in_mapped_range(addr) {
  126. mmap_write32(addr, value);
  127. }
  128. else {
  129. ::jit::jit_dirty_cache_small(addr, addr.wrapping_add(4 as u32));
  130. write32_no_mmap_or_dirty_check(addr, value);
  131. };
  132. }
  133. pub unsafe fn write32_no_mmap_or_dirty_check(addr: u32, value: i32) {
  134. *(mem8.offset(addr as isize) as *mut i32) = value
  135. }
  136. pub unsafe fn write_aligned32_no_mmap_or_dirty_check(addr: u32, value: i32) {
  137. *(mem8 as *mut i32).offset(addr as isize) = value
  138. }
  139. #[no_mangle]
  140. pub unsafe fn write_aligned32(addr: u32, value: i32) {
  141. dbg_assert!(addr < 0x40000000 as u32);
  142. let phys_addr = addr << 2;
  143. if in_mapped_range(phys_addr) {
  144. mmap_write32(phys_addr, value);
  145. }
  146. else {
  147. ::jit::jit_dirty_cache_small(phys_addr, phys_addr.wrapping_add(4 as u32));
  148. write_aligned32_no_mmap_or_dirty_check(addr, value);
  149. };
  150. }
  151. #[no_mangle]
  152. pub unsafe fn write64(addr: u32, value: u64) {
  153. if in_mapped_range(addr) {
  154. mmap_write64(addr, value as i32, (value >> 32) as i32);
  155. }
  156. else {
  157. ::jit::jit_dirty_cache_small(addr, addr.wrapping_add(8 as u32));
  158. write64_no_mmap_or_dirty_check(addr, value);
  159. };
  160. }
  161. pub unsafe fn write64_no_mmap_or_dirty_check(addr: u32, value: u64) {
  162. *(mem8.offset(addr as isize) as *mut u64) = value
  163. }
  164. #[no_mangle]
  165. pub unsafe fn write128(addr: u32, value: reg128) {
  166. if in_mapped_range(addr) {
  167. mmap_write128(
  168. addr,
  169. value.i32_0[0],
  170. value.i32_0[1],
  171. value.i32_0[2],
  172. value.i32_0[3],
  173. );
  174. }
  175. else {
  176. ::jit::jit_dirty_cache_small(addr, addr.wrapping_add(16 as u32));
  177. write128_no_mmap_or_dirty_check(addr, value);
  178. };
  179. }
  180. pub unsafe fn write128_no_mmap_or_dirty_check(addr: u32, value: reg128) {
  181. *(mem8.offset(addr as isize) as *mut reg128) = value
  182. }
  183. pub unsafe fn memset_no_mmap_or_dirty_check(addr: u32, value: u8, count: u32) {
  184. ptr::write_bytes(mem8.offset(addr as isize), value, count as usize);
  185. }
  186. pub unsafe fn memcpy_no_mmap_or_dirty_check(src_addr: u32, dst_addr: u32, count: u32) {
  187. dbg_assert!(u32::max(src_addr, dst_addr) - u32::min(src_addr, dst_addr) >= count);
  188. ptr::copy_nonoverlapping(
  189. mem8.offset(src_addr as isize),
  190. mem8.offset(dst_addr as isize),
  191. count as usize,
  192. )
  193. }