modrm.rs 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. use cpu_context::CpuContext;
  2. use jit::JitContext;
  3. use prefix::{PREFIX_MASK_SEGMENT, SEG_PREFIX_ZERO};
  4. use regs::{BP, BX, DI, SI};
  5. use regs::{DS, SS};
  6. use regs::{EAX, EBP, EBX, ECX, EDI, EDX, ESI, ESP};
  7. use wasmgen::wasm_util;
  8. pub fn skip(ctx: &mut CpuContext, modrm_byte: u8) {
  9. if ctx.asize_32() {
  10. skip32(ctx, modrm_byte)
  11. }
  12. else {
  13. skip16(ctx, modrm_byte)
  14. }
  15. }
  16. fn skip16(ctx: &mut CpuContext, modrm_byte: u8) {
  17. assert!(modrm_byte < 0xC0);
  18. let r = modrm_byte & 7;
  19. if modrm_byte < 0x40 {
  20. if r == 6 {
  21. ctx.advance16()
  22. }
  23. }
  24. else if modrm_byte < 0x80 {
  25. ctx.advance8()
  26. }
  27. else {
  28. ctx.advance16()
  29. }
  30. }
  31. fn skip32(ctx: &mut CpuContext, modrm_byte: u8) {
  32. assert!(modrm_byte < 0xC0);
  33. let r = modrm_byte & 7;
  34. if r == 4 {
  35. let sib = ctx.read_imm8();
  36. if modrm_byte < 0x40 {
  37. if sib & 7 == 5 {
  38. ctx.advance32()
  39. }
  40. }
  41. else if modrm_byte < 0x80 {
  42. ctx.advance8()
  43. }
  44. else {
  45. ctx.advance32()
  46. }
  47. }
  48. else if r == 5 && modrm_byte < 0x40 {
  49. ctx.advance32();
  50. }
  51. else {
  52. if modrm_byte < 0x40 {
  53. // Nothing
  54. }
  55. else if modrm_byte < 0x80 {
  56. ctx.advance8()
  57. }
  58. else {
  59. ctx.advance32()
  60. }
  61. }
  62. }
  63. pub fn gen(ctx: &mut JitContext, modrm_byte: u8) {
  64. if ctx.cpu.asize_32() {
  65. gen32(ctx, modrm_byte)
  66. }
  67. else {
  68. gen16(ctx, modrm_byte)
  69. }
  70. }
  71. enum Imm16 {
  72. None,
  73. Imm8,
  74. Imm16,
  75. }
  76. enum Offset16 {
  77. Zero,
  78. One(u32),
  79. Two(u32, u32),
  80. }
  81. fn gen16_case(ctx: &mut JitContext, seg: u32, offset: Offset16, imm: Imm16) {
  82. // Generates one of:
  83. // - add_segment(reg)
  84. // - add_segment(imm)
  85. // - add_segment(reg1 + reg2 & 0xFFFF)
  86. // - add_segment(reg1 + imm & 0xFFFF)
  87. // - add_segment(reg1 + reg2 + imm & 0xFFFF)
  88. let immediate_value = match imm {
  89. Imm16::None => 0,
  90. Imm16::Imm8 => ctx.cpu.read_imm8s() as i32,
  91. Imm16::Imm16 => ctx.cpu.read_imm16s() as i32,
  92. };
  93. match offset {
  94. Offset16::Zero => {
  95. wasm_util::push_i32(&mut ctx.builder.instruction_body, immediate_value & 0xFFFF);
  96. },
  97. Offset16::One(r) => {
  98. wasm_util::load_aligned_u16(
  99. &mut ctx.builder.instruction_body,
  100. ::global_pointers::get_reg16_offset(r),
  101. );
  102. if immediate_value != 0 {
  103. wasm_util::push_i32(&mut ctx.builder.instruction_body, immediate_value);
  104. wasm_util::add_i32(&mut ctx.builder.instruction_body);
  105. wasm_util::push_i32(&mut ctx.builder.instruction_body, 0xFFFF);
  106. wasm_util::and_i32(&mut ctx.builder.instruction_body);
  107. }
  108. },
  109. Offset16::Two(r1, r2) => {
  110. wasm_util::load_aligned_u16(
  111. &mut ctx.builder.instruction_body,
  112. ::global_pointers::get_reg16_offset(r1),
  113. );
  114. wasm_util::load_aligned_u16(
  115. &mut ctx.builder.instruction_body,
  116. ::global_pointers::get_reg16_offset(r2),
  117. );
  118. wasm_util::add_i32(&mut ctx.builder.instruction_body);
  119. if immediate_value != 0 {
  120. wasm_util::push_i32(&mut ctx.builder.instruction_body, immediate_value);
  121. wasm_util::add_i32(&mut ctx.builder.instruction_body);
  122. }
  123. wasm_util::push_i32(&mut ctx.builder.instruction_body, 0xFFFF);
  124. wasm_util::and_i32(&mut ctx.builder.instruction_body);
  125. },
  126. }
  127. jit_add_seg_offset(ctx, seg);
  128. }
  129. fn gen16(ctx: &mut JitContext, modrm_byte: u8) {
  130. match modrm_byte & !0o070 {
  131. 0o000 => gen16_case(ctx, DS, Offset16::Two(BX, SI), Imm16::None),
  132. 0o001 => gen16_case(ctx, DS, Offset16::Two(BX, DI), Imm16::None),
  133. 0o002 => gen16_case(ctx, SS, Offset16::Two(BP, SI), Imm16::None),
  134. 0o003 => gen16_case(ctx, SS, Offset16::Two(BP, DI), Imm16::None),
  135. 0o004 => gen16_case(ctx, DS, Offset16::One(SI), Imm16::None),
  136. 0o005 => gen16_case(ctx, DS, Offset16::One(DI), Imm16::None),
  137. 0o006 => gen16_case(ctx, DS, Offset16::Zero, Imm16::Imm16),
  138. 0o007 => gen16_case(ctx, DS, Offset16::One(BX), Imm16::None),
  139. 0o100 => gen16_case(ctx, DS, Offset16::Two(BX, SI), Imm16::Imm8),
  140. 0o101 => gen16_case(ctx, DS, Offset16::Two(BX, DI), Imm16::Imm8),
  141. 0o102 => gen16_case(ctx, SS, Offset16::Two(BP, SI), Imm16::Imm8),
  142. 0o103 => gen16_case(ctx, SS, Offset16::Two(BP, DI), Imm16::Imm8),
  143. 0o104 => gen16_case(ctx, DS, Offset16::One(SI), Imm16::Imm8),
  144. 0o105 => gen16_case(ctx, DS, Offset16::One(DI), Imm16::Imm8),
  145. 0o106 => gen16_case(ctx, SS, Offset16::One(BP), Imm16::Imm8),
  146. 0o107 => gen16_case(ctx, DS, Offset16::One(BX), Imm16::Imm8),
  147. 0o200 => gen16_case(ctx, DS, Offset16::Two(BX, SI), Imm16::Imm16),
  148. 0o201 => gen16_case(ctx, DS, Offset16::Two(BX, DI), Imm16::Imm16),
  149. 0o202 => gen16_case(ctx, SS, Offset16::Two(BP, SI), Imm16::Imm16),
  150. 0o203 => gen16_case(ctx, SS, Offset16::Two(BP, DI), Imm16::Imm16),
  151. 0o204 => gen16_case(ctx, DS, Offset16::One(SI), Imm16::Imm16),
  152. 0o205 => gen16_case(ctx, DS, Offset16::One(DI), Imm16::Imm16),
  153. 0o206 => gen16_case(ctx, SS, Offset16::One(BP), Imm16::Imm16),
  154. 0o207 => gen16_case(ctx, DS, Offset16::One(BX), Imm16::Imm16),
  155. _ => assert!(false),
  156. }
  157. }
  158. #[derive(PartialEq)]
  159. enum Imm32 {
  160. None,
  161. Imm8,
  162. Imm32,
  163. }
  164. enum Offset {
  165. Reg(u32),
  166. Sib,
  167. None,
  168. }
  169. fn gen32_case(ctx: &mut JitContext, seg: u32, offset: Offset, imm: Imm32) {
  170. match offset {
  171. Offset::Sib => {
  172. gen_sib(ctx, imm != Imm32::None);
  173. let immediate_value = match imm {
  174. Imm32::None => 0,
  175. Imm32::Imm8 => ctx.cpu.read_imm8s() as i32,
  176. Imm32::Imm32 => ctx.cpu.read_imm32() as i32,
  177. };
  178. if immediate_value != 0 {
  179. wasm_util::push_i32(&mut ctx.builder.instruction_body, immediate_value);
  180. wasm_util::add_i32(&mut ctx.builder.instruction_body);
  181. }
  182. },
  183. Offset::Reg(r) => {
  184. let immediate_value = match imm {
  185. Imm32::None => 0,
  186. Imm32::Imm8 => ctx.cpu.read_imm8s() as i32,
  187. Imm32::Imm32 => ctx.cpu.read_imm32() as i32,
  188. };
  189. wasm_util::load_aligned_i32(
  190. &mut ctx.builder.instruction_body,
  191. ::global_pointers::get_reg32_offset(r),
  192. );
  193. if immediate_value != 0 {
  194. wasm_util::push_i32(&mut ctx.builder.instruction_body, immediate_value);
  195. wasm_util::add_i32(&mut ctx.builder.instruction_body);
  196. }
  197. jit_add_seg_offset(ctx, seg);
  198. },
  199. Offset::None => {
  200. let immediate_value = match imm {
  201. Imm32::None => 0,
  202. Imm32::Imm8 => ctx.cpu.read_imm8s() as i32,
  203. Imm32::Imm32 => ctx.cpu.read_imm32() as i32,
  204. };
  205. wasm_util::push_i32(&mut ctx.builder.instruction_body, immediate_value);
  206. jit_add_seg_offset(ctx, seg);
  207. },
  208. }
  209. }
  210. fn gen32(ctx: &mut JitContext, modrm_byte: u8) {
  211. match modrm_byte & !0o070 {
  212. 0o000 => gen32_case(ctx, DS, Offset::Reg(EAX), Imm32::None),
  213. 0o001 => gen32_case(ctx, DS, Offset::Reg(ECX), Imm32::None),
  214. 0o002 => gen32_case(ctx, DS, Offset::Reg(EDX), Imm32::None),
  215. 0o003 => gen32_case(ctx, DS, Offset::Reg(EBX), Imm32::None),
  216. 0o004 => gen32_case(ctx, DS, Offset::Sib, Imm32::None),
  217. 0o005 => gen32_case(ctx, DS, Offset::None, Imm32::Imm32),
  218. 0o006 => gen32_case(ctx, DS, Offset::Reg(ESI), Imm32::None),
  219. 0o007 => gen32_case(ctx, DS, Offset::Reg(EDI), Imm32::None),
  220. 0o100 => gen32_case(ctx, DS, Offset::Reg(EAX), Imm32::Imm8),
  221. 0o101 => gen32_case(ctx, DS, Offset::Reg(ECX), Imm32::Imm8),
  222. 0o102 => gen32_case(ctx, DS, Offset::Reg(EDX), Imm32::Imm8),
  223. 0o103 => gen32_case(ctx, DS, Offset::Reg(EBX), Imm32::Imm8),
  224. 0o104 => gen32_case(ctx, DS, Offset::Sib, Imm32::Imm8),
  225. 0o105 => gen32_case(ctx, SS, Offset::Reg(EBP), Imm32::Imm8),
  226. 0o106 => gen32_case(ctx, DS, Offset::Reg(ESI), Imm32::Imm8),
  227. 0o107 => gen32_case(ctx, DS, Offset::Reg(EDI), Imm32::Imm8),
  228. 0o200 => gen32_case(ctx, DS, Offset::Reg(EAX), Imm32::Imm32),
  229. 0o201 => gen32_case(ctx, DS, Offset::Reg(ECX), Imm32::Imm32),
  230. 0o202 => gen32_case(ctx, DS, Offset::Reg(EDX), Imm32::Imm32),
  231. 0o203 => gen32_case(ctx, DS, Offset::Reg(EBX), Imm32::Imm32),
  232. 0o204 => gen32_case(ctx, DS, Offset::Sib, Imm32::Imm32),
  233. 0o205 => gen32_case(ctx, SS, Offset::Reg(EBP), Imm32::Imm32),
  234. 0o206 => gen32_case(ctx, DS, Offset::Reg(ESI), Imm32::Imm32),
  235. 0o207 => gen32_case(ctx, DS, Offset::Reg(EDI), Imm32::Imm32),
  236. _ => assert!(false),
  237. }
  238. }
  239. fn gen_sib(ctx: &mut JitContext, mod_is_nonzero: bool) {
  240. let sib_byte = ctx.cpu.read_imm8();
  241. let r = sib_byte & 7;
  242. let m = sib_byte >> 3 & 7;
  243. let seg;
  244. // Generates: get_seg_prefix(seg) + base
  245. // Where base is a register or constant
  246. if r == 4 {
  247. seg = SS;
  248. let base_addr = ::global_pointers::get_reg32_offset(ESP);
  249. wasm_util::load_aligned_i32(&mut ctx.builder.instruction_body, base_addr);
  250. }
  251. else if r == 5 {
  252. if mod_is_nonzero {
  253. seg = SS;
  254. let base_addr = ::global_pointers::get_reg32_offset(EBP);
  255. wasm_util::load_aligned_i32(&mut ctx.builder.instruction_body, base_addr);
  256. }
  257. else {
  258. seg = DS;
  259. let base = ctx.cpu.read_imm32();
  260. wasm_util::push_i32(&mut ctx.builder.instruction_body, base as i32);
  261. }
  262. }
  263. else {
  264. seg = DS;
  265. let base_addr = ::global_pointers::get_reg32_offset(r as u32);
  266. wasm_util::load_aligned_i32(&mut ctx.builder.instruction_body, base_addr);
  267. }
  268. jit_add_seg_offset(ctx, seg);
  269. // We now have to generate an offset value to add
  270. if m == 4 {
  271. // offset is 0, we don't need to add anything
  272. return;
  273. }
  274. // Offset is reg32s[m] << s, where s is:
  275. let s = sib_byte >> 6 & 3;
  276. wasm_util::load_aligned_i32(
  277. &mut ctx.builder.instruction_body,
  278. ::global_pointers::get_reg32_offset(m as u32),
  279. );
  280. wasm_util::push_i32(&mut ctx.builder.instruction_body, s as i32);
  281. wasm_util::shl_i32(&mut ctx.builder.instruction_body);
  282. wasm_util::add_i32(&mut ctx.builder.instruction_body);
  283. }
  284. fn can_optimize_get_seg(ctx: &mut JitContext, segment: u32) -> bool {
  285. (segment == DS || segment == SS) && ctx.cpu.has_flat_segmentation()
  286. }
  287. fn jit_add_seg_offset(ctx: &mut JitContext, default_segment: u32) {
  288. let prefix = ctx.cpu.prefixes & PREFIX_MASK_SEGMENT;
  289. let seg = if prefix != 0 {
  290. prefix - 1
  291. }
  292. else {
  293. default_segment
  294. };
  295. if can_optimize_get_seg(ctx, seg) || prefix == SEG_PREFIX_ZERO {
  296. return;
  297. }
  298. wasm_util::push_i32(&mut ctx.builder.instruction_body, seg as i32);
  299. wasm_util::call_fn(&mut ctx.builder.instruction_body, ::jit::FN_GET_SEG_IDX);
  300. wasm_util::add_i32(&mut ctx.builder.instruction_body);
  301. }