misc_instr.rs 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. use cpu::cpu::*;
  2. use cpu::fpu::{
  3. fpu_load_m80, fpu_load_status_word, fpu_set_status_word, fpu_store_m80, set_control_word,
  4. };
  5. use cpu::global_pointers::*;
  6. use paging::OrPageFault;
  7. pub unsafe fn getcf() -> bool {
  8. if 0 != *flags_changed & 1 {
  9. let m = (2 << *last_op_size) - 1;
  10. dbg_assert!((*last_op1 as u32) <= m);
  11. dbg_assert!((*last_result as u32) <= m);
  12. let sub_mask = *flags_changed >> 31;
  13. // sub: last_op1 < last_result (or last_op1 < last_op2) (or (result ^ ((result ^ b) & (b ^ a))))
  14. // add: last_result < last_op1 (or last_result < last_op2) (or a ^ ((a ^ b) & (b ^ result)))
  15. return ((*last_result as i32 ^ sub_mask) as u32) < (*last_op1 ^ sub_mask) as u32;
  16. }
  17. else {
  18. return 0 != *flags & 1;
  19. };
  20. }
  21. #[no_mangle]
  22. pub unsafe fn getpf() -> bool {
  23. if 0 != *flags_changed & FLAG_PARITY {
  24. // inverted lookup table
  25. return 0 != 0x9669 << 2 >> ((*last_result ^ *last_result >> 4) & 15) & FLAG_PARITY;
  26. }
  27. else {
  28. return 0 != *flags & FLAG_PARITY;
  29. };
  30. }
  31. pub unsafe fn getaf() -> bool {
  32. if 0 != *flags_changed & FLAG_ADJUST {
  33. let is_sub = *flags_changed & FLAG_SUB != 0;
  34. let last_op2 = (*last_result - *last_op1) * if is_sub { -1 } else { 1 };
  35. return 0 != (*last_op1 ^ last_op2 ^ *last_result) & FLAG_ADJUST;
  36. }
  37. else {
  38. return 0 != *flags & FLAG_ADJUST;
  39. };
  40. }
  41. pub unsafe fn getzf() -> bool {
  42. if 0 != *flags_changed & FLAG_ZERO {
  43. return 0 != (!*last_result & *last_result - 1) >> *last_op_size & 1;
  44. }
  45. else {
  46. return 0 != *flags & FLAG_ZERO;
  47. };
  48. }
  49. pub unsafe fn getsf() -> bool {
  50. if 0 != *flags_changed & FLAG_SIGN {
  51. return 0 != *last_result >> *last_op_size & 1;
  52. }
  53. else {
  54. return 0 != *flags & FLAG_SIGN;
  55. };
  56. }
  57. pub unsafe fn getof() -> bool {
  58. if 0 != *flags_changed & FLAG_OVERFLOW {
  59. let is_sub = (*flags_changed as u32) >> 31;
  60. // add: (a ^ result) & (b ^ result)
  61. // sub: (a ^ result) & (b ^ result ^ 1) (or (a ^ b) & (result ^ a))
  62. let b_xor_1_if_sub = (*last_result - *last_op1) - is_sub as i32;
  63. return 0
  64. != ((*last_op1 ^ *last_result) & (b_xor_1_if_sub ^ *last_result)) >> *last_op_size & 1;
  65. }
  66. else {
  67. return 0 != *flags & FLAG_OVERFLOW;
  68. };
  69. }
  70. pub unsafe fn test_o() -> bool { return getof(); }
  71. pub unsafe fn test_b() -> bool { return getcf(); }
  72. pub unsafe fn test_z() -> bool { return getzf(); }
  73. pub unsafe fn test_s() -> bool { return getsf(); }
  74. #[no_mangle]
  75. pub unsafe fn test_p() -> bool { return getpf(); }
  76. pub unsafe fn test_be() -> bool { return getcf() || getzf(); }
  77. pub unsafe fn test_l() -> bool { return getsf() != getof(); }
  78. pub unsafe fn test_le() -> bool { return getzf() || getsf() != getof(); }
  79. pub unsafe fn test_no() -> bool { return !test_o(); }
  80. pub unsafe fn test_nb() -> bool { return !test_b(); }
  81. pub unsafe fn test_nz() -> bool { return !test_z(); }
  82. pub unsafe fn test_ns() -> bool { return !test_s(); }
  83. #[no_mangle]
  84. pub unsafe fn test_np() -> bool { return !test_p(); }
  85. pub unsafe fn test_nbe() -> bool { return !test_be(); }
  86. pub unsafe fn test_nl() -> bool { return !test_l(); }
  87. pub unsafe fn test_nle() -> bool { return !test_le(); }
  88. #[no_mangle]
  89. pub unsafe fn jmp_rel16(rel16: i32) {
  90. let cs_offset = get_seg_cs();
  91. // limit ip to 16 bit
  92. *instruction_pointer = cs_offset + (*instruction_pointer - cs_offset + rel16 & 0xFFFF);
  93. }
  94. #[no_mangle]
  95. pub unsafe fn jmpcc16(condition: bool, imm16: i32) {
  96. if condition {
  97. jmp_rel16(imm16);
  98. };
  99. }
  100. #[no_mangle]
  101. pub unsafe fn jmpcc32(condition: bool, imm32: i32) {
  102. if condition {
  103. *instruction_pointer += imm32
  104. };
  105. }
  106. #[no_mangle]
  107. pub unsafe fn loope16(imm8s: i32) { jmpcc16(0 != decr_ecx_asize(is_asize_32()) && getzf(), imm8s); }
  108. #[no_mangle]
  109. pub unsafe fn loopne16(imm8s: i32) {
  110. jmpcc16(0 != decr_ecx_asize(is_asize_32()) && !getzf(), imm8s);
  111. }
  112. #[no_mangle]
  113. pub unsafe fn loop16(imm8s: i32) { jmpcc16(0 != decr_ecx_asize(is_asize_32()), imm8s); }
  114. #[no_mangle]
  115. pub unsafe fn jcxz16(imm8s: i32) { jmpcc16(get_reg_asize(ECX) == 0, imm8s); }
  116. #[no_mangle]
  117. pub unsafe fn loope32(imm8s: i32) { jmpcc32(0 != decr_ecx_asize(is_asize_32()) && getzf(), imm8s); }
  118. #[no_mangle]
  119. pub unsafe fn loopne32(imm8s: i32) {
  120. jmpcc32(0 != decr_ecx_asize(is_asize_32()) && !getzf(), imm8s);
  121. }
  122. #[no_mangle]
  123. pub unsafe fn loop32(imm8s: i32) { jmpcc32(0 != decr_ecx_asize(is_asize_32()), imm8s); }
  124. #[no_mangle]
  125. pub unsafe fn jcxz32(imm8s: i32) { jmpcc32(get_reg_asize(ECX) == 0, imm8s); }
  126. pub unsafe fn cmovcc16(condition: bool, value: i32, r: i32) {
  127. if condition {
  128. write_reg16(r, value);
  129. };
  130. }
  131. pub unsafe fn cmovcc32(condition: bool, value: i32, r: i32) {
  132. if condition {
  133. write_reg32(r, value);
  134. };
  135. }
  136. #[no_mangle]
  137. pub unsafe fn get_stack_pointer(offset: i32) -> i32 {
  138. if *stack_size_32 {
  139. return get_seg_ss() + read_reg32(ESP) + offset;
  140. }
  141. else {
  142. return get_seg_ss() + (read_reg16(SP) + offset & 0xFFFF);
  143. };
  144. }
  145. #[no_mangle]
  146. pub unsafe fn adjust_stack_reg(adjustment: i32) {
  147. if *stack_size_32 {
  148. write_reg32(ESP, read_reg32(ESP) + adjustment);
  149. }
  150. else {
  151. write_reg16(SP, read_reg16(SP) + adjustment);
  152. };
  153. }
  154. #[no_mangle]
  155. pub unsafe fn push16_ss16(imm16: i32) -> OrPageFault<()> {
  156. let sp = get_seg_ss() + (read_reg16(SP) - 2 & 0xFFFF);
  157. safe_write16(sp, imm16)?;
  158. write_reg16(SP, read_reg16(SP) - 2);
  159. Ok(())
  160. }
  161. #[no_mangle]
  162. pub unsafe fn push16_ss32(imm16: i32) -> OrPageFault<()> {
  163. let sp = get_seg_ss() + read_reg32(ESP) - 2;
  164. safe_write16(sp, imm16)?;
  165. write_reg32(ESP, read_reg32(ESP) - 2);
  166. Ok(())
  167. }
  168. #[no_mangle]
  169. pub unsafe fn push16_ss16_mem(addr: i32) -> OrPageFault<()> { push16_ss16(safe_read16(addr)?) }
  170. #[no_mangle]
  171. pub unsafe fn push16_ss32_mem(addr: i32) -> OrPageFault<()> { push16_ss32(safe_read16(addr)?) }
  172. #[no_mangle]
  173. pub unsafe fn push16(imm16: i32) -> OrPageFault<()> {
  174. if *stack_size_32 { push16_ss32(imm16) } else { push16_ss16(imm16) }
  175. }
  176. #[no_mangle]
  177. pub unsafe fn push32_ss16(imm32: i32) -> OrPageFault<()> {
  178. let new_sp = read_reg16(SP) - 4 & 0xFFFF;
  179. safe_write32(get_seg_ss() + new_sp, imm32)?;
  180. write_reg16(SP, new_sp);
  181. Ok(())
  182. }
  183. #[no_mangle]
  184. pub unsafe fn push32_ss32(imm32: i32) -> OrPageFault<()> {
  185. let new_esp = read_reg32(ESP) - 4;
  186. safe_write32(get_seg_ss() + new_esp, imm32)?;
  187. write_reg32(ESP, new_esp);
  188. Ok(())
  189. }
  190. #[no_mangle]
  191. pub unsafe fn push32_ss16_mem(addr: i32) -> OrPageFault<()> { push32_ss16(safe_read32s(addr)?) }
  192. #[no_mangle]
  193. pub unsafe fn push32_ss32_mem(addr: i32) -> OrPageFault<()> { push32_ss32(safe_read32s(addr)?) }
  194. #[no_mangle]
  195. pub unsafe fn push32(imm32: i32) -> OrPageFault<()> {
  196. if *stack_size_32 { push32_ss32(imm32) } else { push32_ss16(imm32) }
  197. }
  198. #[no_mangle]
  199. pub unsafe fn pop16() -> OrPageFault<i32> {
  200. if *stack_size_32 { pop16_ss32() } else { pop16_ss16() }
  201. }
  202. #[no_mangle]
  203. pub unsafe fn pop16_ss16() -> OrPageFault<i32> {
  204. let sp = get_seg_ss() + read_reg16(SP);
  205. let result = safe_read16(sp)?;
  206. write_reg16(SP, read_reg16(SP) + 2);
  207. Ok(result)
  208. }
  209. #[no_mangle]
  210. pub unsafe fn pop16_ss32() -> OrPageFault<i32> {
  211. let esp = get_seg_ss() + read_reg32(ESP);
  212. let result = safe_read16(esp)?;
  213. write_reg32(ESP, read_reg32(ESP) + 2);
  214. Ok(result)
  215. }
  216. #[no_mangle]
  217. pub unsafe fn pop32s() -> OrPageFault<i32> {
  218. if *stack_size_32 { pop32s_ss32() } else { pop32s_ss16() }
  219. }
  220. #[no_mangle]
  221. pub unsafe fn pop32s_ss16() -> OrPageFault<i32> {
  222. let sp = read_reg16(SP);
  223. let result = safe_read32s(get_seg_ss() + sp)?;
  224. write_reg16(SP, sp + 4);
  225. Ok(result)
  226. }
  227. #[no_mangle]
  228. pub unsafe fn pop32s_ss32() -> OrPageFault<i32> {
  229. let esp = read_reg32(ESP);
  230. let result = safe_read32s(get_seg_ss() + esp)?;
  231. write_reg32(ESP, read_reg32(ESP) + 4);
  232. Ok(result)
  233. }
  234. #[no_mangle]
  235. pub unsafe fn pusha16() {
  236. let temp = read_reg16(SP);
  237. // make sure we don't get a pagefault after having
  238. // pushed several registers already
  239. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-16), 16));
  240. push16(read_reg16(AX)).unwrap();
  241. push16(read_reg16(CX)).unwrap();
  242. push16(read_reg16(DX)).unwrap();
  243. push16(read_reg16(BX)).unwrap();
  244. push16(temp as i32).unwrap();
  245. push16(read_reg16(BP)).unwrap();
  246. push16(read_reg16(SI)).unwrap();
  247. push16(read_reg16(DI)).unwrap();
  248. }
  249. #[no_mangle]
  250. pub unsafe fn pusha32() {
  251. let temp = read_reg32(ESP);
  252. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-32), 32));
  253. push32(read_reg32(EAX)).unwrap();
  254. push32(read_reg32(ECX)).unwrap();
  255. push32(read_reg32(EDX)).unwrap();
  256. push32(read_reg32(EBX)).unwrap();
  257. push32(temp).unwrap();
  258. push32(read_reg32(EBP)).unwrap();
  259. push32(read_reg32(ESI)).unwrap();
  260. push32(read_reg32(EDI)).unwrap();
  261. }
  262. pub unsafe fn lss16(addr: i32, reg: i32, seg: i32) {
  263. let new_reg = return_on_pagefault!(safe_read16(addr));
  264. let new_seg = return_on_pagefault!(safe_read16(addr + 2));
  265. if !switch_seg(seg, new_seg) {
  266. return;
  267. }
  268. write_reg16(reg, new_reg);
  269. }
  270. pub unsafe fn lss32(addr: i32, reg: i32, seg: i32) {
  271. let new_reg = return_on_pagefault!(safe_read32s(addr));
  272. let new_seg = return_on_pagefault!(safe_read16(addr + 4));
  273. if !switch_seg(seg, new_seg) {
  274. return;
  275. }
  276. write_reg32(reg, new_reg);
  277. }
  278. pub unsafe fn enter16(size: i32, mut nesting_level: i32) {
  279. nesting_level &= 31;
  280. if nesting_level > 0 {
  281. dbg_log!(
  282. "enter16 stack={} size={} nest={}",
  283. (if *stack_size_32 { 16 } else { 32 }),
  284. size,
  285. nesting_level,
  286. );
  287. }
  288. let ss_mask = if *stack_size_32 { -1 } else { 0xFFFF };
  289. let ss = get_seg_ss();
  290. let frame_temp = read_reg32(ESP) - 2;
  291. if nesting_level > 0 {
  292. let mut tmp_ebp = read_reg32(EBP);
  293. for _ in 1..nesting_level {
  294. tmp_ebp -= 2;
  295. push16(safe_read16(ss + (tmp_ebp & ss_mask)).unwrap()).unwrap();
  296. }
  297. push16(frame_temp).unwrap();
  298. }
  299. return_on_pagefault!(safe_write16(ss + (frame_temp & ss_mask), read_reg16(BP)));
  300. write_reg16(BP, frame_temp);
  301. adjust_stack_reg(-size - 2);
  302. }
  303. pub unsafe fn enter32(size: i32, mut nesting_level: i32) {
  304. nesting_level &= 31;
  305. if nesting_level > 0 {
  306. dbg_log!(
  307. "enter32 stack={} size={} nest={}",
  308. (if *stack_size_32 { 16 } else { 32 }),
  309. size,
  310. nesting_level,
  311. );
  312. }
  313. let ss_mask = if *stack_size_32 { -1 } else { 0xFFFF };
  314. let ss = get_seg_ss();
  315. let frame_temp = read_reg32(ESP) - 4;
  316. if nesting_level > 0 {
  317. let mut tmp_ebp = read_reg32(EBP);
  318. for _ in 1..nesting_level {
  319. tmp_ebp -= 4;
  320. push32(safe_read32s(ss + (tmp_ebp & ss_mask)).unwrap()).unwrap();
  321. }
  322. push32(frame_temp).unwrap();
  323. }
  324. return_on_pagefault!(safe_write32(ss + (frame_temp & ss_mask), read_reg32(EBP)));
  325. write_reg32(EBP, frame_temp);
  326. adjust_stack_reg(-size - 4);
  327. }
  328. #[no_mangle]
  329. pub unsafe fn setcc_reg(condition: bool, r: i32) { write_reg8(r, condition as i32); }
  330. #[no_mangle]
  331. pub unsafe fn setcc_mem(condition: bool, addr: i32) {
  332. return_on_pagefault!(safe_write8(addr, condition as i32));
  333. }
  334. #[no_mangle]
  335. pub unsafe fn fxsave(addr: i32) {
  336. dbg_assert!(addr & 0xF == 0, "TODO: #gp");
  337. return_on_pagefault!(writable_or_pagefault(addr, 288));
  338. safe_write16(addr + 0, (*fpu_control_word).into()).unwrap();
  339. safe_write16(addr + 2, fpu_load_status_word().into()).unwrap();
  340. safe_write8(addr + 4, !*fpu_stack_empty as i32 & 0xFF).unwrap();
  341. safe_write16(addr + 6, *fpu_opcode).unwrap();
  342. safe_write32(addr + 8, *fpu_ip).unwrap();
  343. safe_write16(addr + 12, *fpu_ip_selector).unwrap();
  344. safe_write32(addr + 16, *fpu_dp).unwrap();
  345. safe_write16(addr + 20, *fpu_dp_selector).unwrap();
  346. safe_write32(addr + 24, *mxcsr).unwrap();
  347. safe_write32(addr + 28, MXCSR_MASK).unwrap();
  348. for i in 0..8 {
  349. let reg_index = i + *fpu_stack_ptr as i32 & 7;
  350. fpu_store_m80(addr + 32 + (i << 4), *fpu_st.offset(reg_index as isize));
  351. }
  352. // If the OSFXSR bit in control register CR4 is not set, the FXSAVE
  353. // instruction may not save these registers. This behavior is
  354. // implementation dependent.
  355. for i in 0..8 {
  356. safe_write128(addr + 160 + (i << 4), *reg_xmm.offset(i as isize)).unwrap();
  357. }
  358. }
  359. #[no_mangle]
  360. pub unsafe fn fxrstor(addr: i32) {
  361. dbg_assert!(addr & 0xF == 0, "TODO: #gp");
  362. return_on_pagefault!(readable_or_pagefault(addr, 288));
  363. let new_mxcsr = safe_read32s(addr + 24).unwrap();
  364. if 0 != new_mxcsr & !MXCSR_MASK {
  365. dbg_log!("#gp Invalid mxcsr bits");
  366. trigger_gp(0);
  367. return;
  368. }
  369. set_control_word(safe_read16(addr + 0).unwrap() as u16);
  370. fpu_set_status_word(safe_read16(addr + 2).unwrap() as u16);
  371. *fpu_stack_empty = !safe_read8(addr.wrapping_add(4) as i32).unwrap() as u8;
  372. *fpu_opcode = safe_read16(addr + 6).unwrap();
  373. *fpu_ip = safe_read32s(addr + 8).unwrap();
  374. *fpu_ip_selector = safe_read16(addr + 12).unwrap();
  375. *fpu_dp = safe_read32s(addr + 16).unwrap();
  376. *fpu_dp_selector = safe_read16(addr + 20).unwrap();
  377. set_mxcsr(new_mxcsr);
  378. for i in 0..8 {
  379. let reg_index = *fpu_stack_ptr as i32 + i & 7;
  380. *fpu_st.offset(reg_index as isize) = fpu_load_m80(addr + 32 + (i << 4)).unwrap();
  381. }
  382. for i in 0..8 {
  383. *reg_xmm.offset(i as isize) = safe_read128s(addr + 160 + (i << 4)).unwrap();
  384. }
  385. }
  386. #[no_mangle]
  387. pub unsafe fn xchg8(data: i32, r8: i32) -> i32 {
  388. let tmp = read_reg8(r8);
  389. write_reg8(r8, data);
  390. return tmp;
  391. }
  392. #[no_mangle]
  393. pub unsafe fn xchg16(data: i32, r16: i32) -> i32 {
  394. let tmp = read_reg16(r16);
  395. write_reg16(r16, data);
  396. return tmp;
  397. }
  398. #[no_mangle]
  399. pub unsafe fn xchg16r(r16: i32) {
  400. let tmp = read_reg16(AX);
  401. write_reg16(AX, read_reg16(r16));
  402. write_reg16(r16, tmp);
  403. }
  404. #[no_mangle]
  405. pub unsafe fn xchg32(data: i32, r32: i32) -> i32 {
  406. let tmp = read_reg32(r32);
  407. write_reg32(r32, data);
  408. return tmp;
  409. }
  410. #[no_mangle]
  411. pub unsafe fn xchg32r(r32: i32) {
  412. let tmp = read_reg32(EAX);
  413. write_reg32(EAX, read_reg32(r32));
  414. write_reg32(r32, tmp);
  415. }
  416. #[no_mangle]
  417. pub unsafe fn bswap(r: i32) { write_reg32(r, read_reg32(r).swap_bytes()) }