misc_instr.rs 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. use cpu::cpu::*;
  2. use cpu::fpu::{
  3. fpu_load_m80, fpu_load_status_word, fpu_set_status_word, fpu_store_m80, set_control_word,
  4. };
  5. use cpu::global_pointers::*;
  6. use paging::OrPageFault;
  7. pub unsafe fn getcf() -> bool {
  8. if 0 != *flags_changed & 1 {
  9. let m = (2 << *last_op_size) - 1;
  10. dbg_assert!((*last_op1 as u32) <= m);
  11. dbg_assert!((*last_result as u32) <= m);
  12. let sub_mask = *flags_changed >> 31;
  13. // sub: last_op1 < last_result (or last_op1 < last_op2) (or (result ^ ((result ^ b) & (b ^ a))))
  14. // add: last_result < last_op1 (or last_result < last_op2) (or a ^ ((a ^ b) & (b ^ result)))
  15. return ((*last_result as i32 ^ sub_mask) as u32) < (*last_op1 ^ sub_mask) as u32;
  16. }
  17. else {
  18. return 0 != *flags & 1;
  19. };
  20. }
  21. #[no_mangle]
  22. pub unsafe fn getpf() -> bool {
  23. if 0 != *flags_changed & FLAG_PARITY {
  24. // inverted lookup table
  25. return 0 != 0x9669 << 2 >> ((*last_result ^ *last_result >> 4) & 15) & FLAG_PARITY;
  26. }
  27. else {
  28. return 0 != *flags & FLAG_PARITY;
  29. };
  30. }
  31. pub unsafe fn getaf() -> bool {
  32. if 0 != *flags_changed & FLAG_ADJUST {
  33. let is_sub = *flags_changed & FLAG_SUB != 0;
  34. let last_op2 = (*last_result - *last_op1) * if is_sub { -1 } else { 1 };
  35. return 0 != (*last_op1 ^ last_op2 ^ *last_result) & FLAG_ADJUST;
  36. }
  37. else {
  38. return 0 != *flags & FLAG_ADJUST;
  39. };
  40. }
  41. pub unsafe fn getzf() -> bool {
  42. if 0 != *flags_changed & FLAG_ZERO {
  43. return 0 != (!*last_result & *last_result - 1) >> *last_op_size & 1;
  44. }
  45. else {
  46. return 0 != *flags & FLAG_ZERO;
  47. };
  48. }
  49. pub unsafe fn getsf() -> bool {
  50. if 0 != *flags_changed & FLAG_SIGN {
  51. return 0 != *last_result >> *last_op_size & 1;
  52. }
  53. else {
  54. return 0 != *flags & FLAG_SIGN;
  55. };
  56. }
  57. pub unsafe fn getof() -> bool {
  58. if 0 != *flags_changed & FLAG_OVERFLOW {
  59. let is_sub = (*flags_changed as u32) >> 31;
  60. // add: (a ^ result) & (b ^ result)
  61. // sub: (a ^ result) & (b ^ result ^ 1) (or (a ^ b) & (result ^ a))
  62. let b_xor_1_if_sub = (*last_result - *last_op1) - is_sub as i32;
  63. return 0
  64. != ((*last_op1 ^ *last_result) & (b_xor_1_if_sub ^ *last_result)) >> *last_op_size & 1;
  65. }
  66. else {
  67. return 0 != *flags & FLAG_OVERFLOW;
  68. };
  69. }
  70. pub unsafe fn test_o() -> bool { return getof(); }
  71. pub unsafe fn test_b() -> bool { return getcf(); }
  72. pub unsafe fn test_z() -> bool { return getzf(); }
  73. pub unsafe fn test_s() -> bool { return getsf(); }
  74. #[no_mangle]
  75. pub unsafe fn test_p() -> bool { return getpf(); }
  76. pub unsafe fn test_be() -> bool { return getcf() || getzf(); }
  77. pub unsafe fn test_l() -> bool { return getsf() != getof(); }
  78. pub unsafe fn test_le() -> bool { return getzf() || getsf() != getof(); }
  79. pub unsafe fn test_no() -> bool { return !test_o(); }
  80. pub unsafe fn test_nb() -> bool { return !test_b(); }
  81. pub unsafe fn test_nz() -> bool { return !test_z(); }
  82. pub unsafe fn test_ns() -> bool { return !test_s(); }
  83. #[no_mangle]
  84. pub unsafe fn test_np() -> bool { return !test_p(); }
  85. pub unsafe fn test_nbe() -> bool { return !test_be(); }
  86. pub unsafe fn test_nl() -> bool { return !test_l(); }
  87. pub unsafe fn test_nle() -> bool { return !test_le(); }
  88. pub unsafe fn jmp_rel16(rel16: i32) {
  89. let cs_offset = get_seg_cs();
  90. // limit ip to 16 bit
  91. *instruction_pointer = cs_offset + (*instruction_pointer - cs_offset + rel16 & 0xFFFF);
  92. }
  93. pub unsafe fn jmpcc16(condition: bool, imm16: i32) {
  94. if condition {
  95. jmp_rel16(imm16);
  96. };
  97. }
  98. pub unsafe fn jmpcc32(condition: bool, imm32: i32) {
  99. if condition {
  100. *instruction_pointer += imm32
  101. };
  102. }
  103. pub unsafe fn loope16(imm8s: i32) { jmpcc16(0 != decr_ecx_asize(is_asize_32()) && getzf(), imm8s); }
  104. pub unsafe fn loopne16(imm8s: i32) {
  105. jmpcc16(0 != decr_ecx_asize(is_asize_32()) && !getzf(), imm8s);
  106. }
  107. pub unsafe fn loop16(imm8s: i32) { jmpcc16(0 != decr_ecx_asize(is_asize_32()), imm8s); }
  108. pub unsafe fn jcxz16(imm8s: i32) { jmpcc16(get_reg_asize(ECX) == 0, imm8s); }
  109. pub unsafe fn loope32(imm8s: i32) { jmpcc32(0 != decr_ecx_asize(is_asize_32()) && getzf(), imm8s); }
  110. pub unsafe fn loopne32(imm8s: i32) {
  111. jmpcc32(0 != decr_ecx_asize(is_asize_32()) && !getzf(), imm8s);
  112. }
  113. pub unsafe fn loop32(imm8s: i32) { jmpcc32(0 != decr_ecx_asize(is_asize_32()), imm8s); }
  114. pub unsafe fn jcxz32(imm8s: i32) { jmpcc32(get_reg_asize(ECX) == 0, imm8s); }
  115. pub unsafe fn cmovcc16(condition: bool, value: i32, r: i32) {
  116. if condition {
  117. write_reg16(r, value);
  118. };
  119. }
  120. pub unsafe fn cmovcc32(condition: bool, value: i32, r: i32) {
  121. if condition {
  122. write_reg32(r, value);
  123. };
  124. }
  125. pub unsafe fn get_stack_pointer(offset: i32) -> i32 {
  126. if *stack_size_32 {
  127. return get_seg_ss() + read_reg32(ESP) + offset;
  128. }
  129. else {
  130. return get_seg_ss() + (read_reg16(SP) + offset & 0xFFFF);
  131. };
  132. }
  133. pub unsafe fn adjust_stack_reg(adjustment: i32) {
  134. if *stack_size_32 {
  135. write_reg32(ESP, read_reg32(ESP) + adjustment);
  136. }
  137. else {
  138. write_reg16(SP, read_reg16(SP) + adjustment);
  139. };
  140. }
  141. pub unsafe fn push16_ss16(imm16: i32) -> OrPageFault<()> {
  142. let sp = get_seg_ss() + (read_reg16(SP) - 2 & 0xFFFF);
  143. safe_write16(sp, imm16)?;
  144. write_reg16(SP, read_reg16(SP) - 2);
  145. Ok(())
  146. }
  147. pub unsafe fn push16_ss32(imm16: i32) -> OrPageFault<()> {
  148. let sp = get_seg_ss() + read_reg32(ESP) - 2;
  149. safe_write16(sp, imm16)?;
  150. write_reg32(ESP, read_reg32(ESP) - 2);
  151. Ok(())
  152. }
  153. pub unsafe fn push16_ss16_mem(addr: i32) -> OrPageFault<()> { push16_ss16(safe_read16(addr)?) }
  154. pub unsafe fn push16_ss32_mem(addr: i32) -> OrPageFault<()> { push16_ss32(safe_read16(addr)?) }
  155. pub unsafe fn push16(imm16: i32) -> OrPageFault<()> {
  156. if *stack_size_32 { push16_ss32(imm16) } else { push16_ss16(imm16) }
  157. }
  158. pub unsafe fn push32_ss16(imm32: i32) -> OrPageFault<()> {
  159. let new_sp = read_reg16(SP) - 4 & 0xFFFF;
  160. safe_write32(get_seg_ss() + new_sp, imm32)?;
  161. write_reg16(SP, new_sp);
  162. Ok(())
  163. }
  164. pub unsafe fn push32_ss32(imm32: i32) -> OrPageFault<()> {
  165. let new_esp = read_reg32(ESP) - 4;
  166. safe_write32(get_seg_ss() + new_esp, imm32)?;
  167. write_reg32(ESP, new_esp);
  168. Ok(())
  169. }
  170. pub unsafe fn push32_ss16_mem(addr: i32) -> OrPageFault<()> { push32_ss16(safe_read32s(addr)?) }
  171. pub unsafe fn push32_ss32_mem(addr: i32) -> OrPageFault<()> { push32_ss32(safe_read32s(addr)?) }
  172. pub unsafe fn push32(imm32: i32) -> OrPageFault<()> {
  173. if *stack_size_32 { push32_ss32(imm32) } else { push32_ss16(imm32) }
  174. }
  175. pub unsafe fn push32_sreg(i: i32) -> OrPageFault<()> {
  176. // you can't make this up ...
  177. if *stack_size_32 {
  178. let new_esp = read_reg32(ESP) - 4;
  179. safe_write16(get_seg_ss() + new_esp, *sreg.offset(i as isize) as i32)?;
  180. write_reg32(ESP, new_esp);
  181. }
  182. else {
  183. let new_sp = read_reg16(SP) - 4 & 0xFFFF;
  184. safe_write16(get_seg_ss() + new_sp, *sreg.offset(i as isize) as i32)?;
  185. write_reg16(SP, new_sp);
  186. }
  187. Ok(())
  188. }
  189. pub unsafe fn pop16() -> OrPageFault<i32> {
  190. if *stack_size_32 { pop16_ss32() } else { pop16_ss16() }
  191. }
  192. pub unsafe fn pop16_ss16() -> OrPageFault<i32> {
  193. let sp = get_seg_ss() + read_reg16(SP);
  194. let result = safe_read16(sp)?;
  195. write_reg16(SP, read_reg16(SP) + 2);
  196. Ok(result)
  197. }
  198. pub unsafe fn pop16_ss32() -> OrPageFault<i32> {
  199. let esp = get_seg_ss() + read_reg32(ESP);
  200. let result = safe_read16(esp)?;
  201. write_reg32(ESP, read_reg32(ESP) + 2);
  202. Ok(result)
  203. }
  204. pub unsafe fn pop32s() -> OrPageFault<i32> {
  205. if *stack_size_32 { pop32s_ss32() } else { pop32s_ss16() }
  206. }
  207. pub unsafe fn pop32s_ss16() -> OrPageFault<i32> {
  208. let sp = read_reg16(SP);
  209. let result = safe_read32s(get_seg_ss() + sp)?;
  210. write_reg16(SP, sp + 4);
  211. Ok(result)
  212. }
  213. pub unsafe fn pop32s_ss32() -> OrPageFault<i32> {
  214. let esp = read_reg32(ESP);
  215. let result = safe_read32s(get_seg_ss() + esp)?;
  216. write_reg32(ESP, read_reg32(ESP) + 4);
  217. Ok(result)
  218. }
  219. pub unsafe fn pusha16() {
  220. let temp = read_reg16(SP);
  221. // make sure we don't get a pagefault after having
  222. // pushed several registers already
  223. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-16), 16));
  224. push16(read_reg16(AX)).unwrap();
  225. push16(read_reg16(CX)).unwrap();
  226. push16(read_reg16(DX)).unwrap();
  227. push16(read_reg16(BX)).unwrap();
  228. push16(temp as i32).unwrap();
  229. push16(read_reg16(BP)).unwrap();
  230. push16(read_reg16(SI)).unwrap();
  231. push16(read_reg16(DI)).unwrap();
  232. }
  233. pub unsafe fn pusha32() {
  234. let temp = read_reg32(ESP);
  235. return_on_pagefault!(writable_or_pagefault(get_stack_pointer(-32), 32));
  236. push32(read_reg32(EAX)).unwrap();
  237. push32(read_reg32(ECX)).unwrap();
  238. push32(read_reg32(EDX)).unwrap();
  239. push32(read_reg32(EBX)).unwrap();
  240. push32(temp).unwrap();
  241. push32(read_reg32(EBP)).unwrap();
  242. push32(read_reg32(ESI)).unwrap();
  243. push32(read_reg32(EDI)).unwrap();
  244. }
  245. pub unsafe fn lss16(addr: i32, reg: i32, seg: i32) {
  246. let new_reg = return_on_pagefault!(safe_read16(addr));
  247. let new_seg = return_on_pagefault!(safe_read16(addr + 2));
  248. if !switch_seg(seg, new_seg) {
  249. return;
  250. }
  251. write_reg16(reg, new_reg);
  252. }
  253. pub unsafe fn lss32(addr: i32, reg: i32, seg: i32) {
  254. let new_reg = return_on_pagefault!(safe_read32s(addr));
  255. let new_seg = return_on_pagefault!(safe_read16(addr + 4));
  256. if !switch_seg(seg, new_seg) {
  257. return;
  258. }
  259. write_reg32(reg, new_reg);
  260. }
  261. pub unsafe fn enter16(size: i32, mut nesting_level: i32) {
  262. nesting_level &= 31;
  263. if nesting_level > 0 {
  264. dbg_log!(
  265. "enter16 stack={} size={} nest={}",
  266. (if *stack_size_32 { 16 } else { 32 }),
  267. size,
  268. nesting_level,
  269. );
  270. }
  271. let ss_mask = if *stack_size_32 { -1 } else { 0xFFFF };
  272. let ss = get_seg_ss();
  273. let frame_temp = read_reg32(ESP) - 2;
  274. if nesting_level > 0 {
  275. let mut tmp_ebp = read_reg32(EBP);
  276. for _ in 1..nesting_level {
  277. tmp_ebp -= 2;
  278. push16(safe_read16(ss + (tmp_ebp & ss_mask)).unwrap()).unwrap();
  279. }
  280. push16(frame_temp).unwrap();
  281. }
  282. return_on_pagefault!(safe_write16(ss + (frame_temp & ss_mask), read_reg16(BP)));
  283. write_reg16(BP, frame_temp);
  284. adjust_stack_reg(-size - 2);
  285. }
  286. pub unsafe fn enter32(size: i32, mut nesting_level: i32) {
  287. nesting_level &= 31;
  288. if nesting_level > 0 {
  289. dbg_log!(
  290. "enter32 stack={} size={} nest={}",
  291. (if *stack_size_32 { 16 } else { 32 }),
  292. size,
  293. nesting_level,
  294. );
  295. }
  296. let ss_mask = if *stack_size_32 { -1 } else { 0xFFFF };
  297. let ss = get_seg_ss();
  298. let frame_temp = read_reg32(ESP) - 4;
  299. if nesting_level > 0 {
  300. let mut tmp_ebp = read_reg32(EBP);
  301. for _ in 1..nesting_level {
  302. tmp_ebp -= 4;
  303. push32(safe_read32s(ss + (tmp_ebp & ss_mask)).unwrap()).unwrap();
  304. }
  305. push32(frame_temp).unwrap();
  306. }
  307. return_on_pagefault!(safe_write32(ss + (frame_temp & ss_mask), read_reg32(EBP)));
  308. write_reg32(EBP, frame_temp);
  309. adjust_stack_reg(-size - 4);
  310. }
  311. pub unsafe fn setcc_reg(condition: bool, r: i32) { write_reg8(r, condition as i32); }
  312. pub unsafe fn setcc_mem(condition: bool, addr: i32) {
  313. return_on_pagefault!(safe_write8(addr, condition as i32));
  314. }
  315. pub unsafe fn fxsave(addr: i32) {
  316. dbg_assert!(addr & 0xF == 0, "TODO: #gp");
  317. return_on_pagefault!(writable_or_pagefault(addr, 288));
  318. safe_write16(addr + 0, (*fpu_control_word).into()).unwrap();
  319. safe_write16(addr + 2, fpu_load_status_word().into()).unwrap();
  320. safe_write8(addr + 4, !*fpu_stack_empty as i32 & 0xFF).unwrap();
  321. safe_write16(addr + 6, *fpu_opcode).unwrap();
  322. safe_write32(addr + 8, *fpu_ip).unwrap();
  323. safe_write16(addr + 12, *fpu_ip_selector).unwrap();
  324. safe_write32(addr + 16, *fpu_dp).unwrap();
  325. safe_write16(addr + 20, *fpu_dp_selector).unwrap();
  326. safe_write32(addr + 24, *mxcsr).unwrap();
  327. safe_write32(addr + 28, MXCSR_MASK).unwrap();
  328. for i in 0..8 {
  329. let reg_index = i + *fpu_stack_ptr as i32 & 7;
  330. fpu_store_m80(addr + 32 + (i << 4), *fpu_st.offset(reg_index as isize));
  331. }
  332. // If the OSFXSR bit in control register CR4 is not set, the FXSAVE
  333. // instruction may not save these registers. This behavior is
  334. // implementation dependent.
  335. for i in 0..8 {
  336. safe_write128(addr + 160 + (i << 4), *reg_xmm.offset(i as isize)).unwrap();
  337. }
  338. }
  339. pub unsafe fn fxrstor(addr: i32) {
  340. dbg_assert!(addr & 0xF == 0, "TODO: #gp");
  341. return_on_pagefault!(readable_or_pagefault(addr, 288));
  342. let new_mxcsr = safe_read32s(addr + 24).unwrap();
  343. if 0 != new_mxcsr & !MXCSR_MASK {
  344. dbg_log!("#gp Invalid mxcsr bits");
  345. trigger_gp(0);
  346. return;
  347. }
  348. set_control_word(safe_read16(addr + 0).unwrap() as u16);
  349. fpu_set_status_word(safe_read16(addr + 2).unwrap() as u16);
  350. *fpu_stack_empty = !safe_read8(addr + 4).unwrap() as u8;
  351. *fpu_opcode = safe_read16(addr + 6).unwrap();
  352. *fpu_ip = safe_read32s(addr + 8).unwrap();
  353. *fpu_ip_selector = safe_read16(addr + 12).unwrap();
  354. *fpu_dp = safe_read32s(addr + 16).unwrap();
  355. *fpu_dp_selector = safe_read16(addr + 20).unwrap();
  356. set_mxcsr(new_mxcsr);
  357. for i in 0..8 {
  358. let reg_index = *fpu_stack_ptr as i32 + i & 7;
  359. *fpu_st.offset(reg_index as isize) = fpu_load_m80(addr + 32 + (i << 4)).unwrap();
  360. }
  361. for i in 0..8 {
  362. *reg_xmm.offset(i as isize) = safe_read128s(addr + 160 + (i << 4)).unwrap();
  363. }
  364. }
  365. pub unsafe fn xchg8(data: i32, r8: i32) -> i32 {
  366. let tmp = read_reg8(r8);
  367. write_reg8(r8, data);
  368. return tmp;
  369. }
  370. pub unsafe fn xchg16(data: i32, r16: i32) -> i32 {
  371. let tmp = read_reg16(r16);
  372. write_reg16(r16, data);
  373. return tmp;
  374. }
  375. pub unsafe fn xchg16r(r16: i32) {
  376. let tmp = read_reg16(AX);
  377. write_reg16(AX, read_reg16(r16));
  378. write_reg16(r16, tmp);
  379. }
  380. pub unsafe fn xchg32(data: i32, r32: i32) -> i32 {
  381. let tmp = read_reg32(r32);
  382. write_reg32(r32, data);
  383. return tmp;
  384. }
  385. pub unsafe fn xchg32r(r32: i32) {
  386. let tmp = read_reg32(EAX);
  387. write_reg32(EAX, read_reg32(r32));
  388. write_reg32(r32, tmp);
  389. }
  390. pub unsafe fn bswap(r: i32) { write_reg32(r, read_reg32(r).swap_bytes()) }
  391. pub unsafe fn lar(selector: i32, original: i32) -> i32 {
  392. if false {
  393. dbg_log!("lar sel={:x}", selector);
  394. }
  395. const LAR_INVALID_TYPE: u32 =
  396. 1 << 0 | 1 << 6 | 1 << 7 | 1 << 8 | 1 << 0xA | 1 << 0xD | 1 << 0xE | 1 << 0xF;
  397. let sel = SegmentSelector::of_u16(selector as u16);
  398. match lookup_segment_selector(sel) {
  399. Err(()) => {
  400. // pagefault
  401. return original;
  402. },
  403. Ok(Err(_)) => {
  404. *flags_changed &= !FLAG_ZERO;
  405. *flags &= !FLAG_ZERO;
  406. dbg_log!("lar: invalid selector={:x}: null or invalid", selector);
  407. return original;
  408. },
  409. Ok(Ok((desc, _))) => {
  410. *flags_changed &= !FLAG_ZERO;
  411. let dpl_bad = desc.dpl() < *cpl || desc.dpl() < sel.rpl();
  412. if if desc.is_system() {
  413. (LAR_INVALID_TYPE >> desc.system_type() & 1 == 1) || dpl_bad
  414. }
  415. else {
  416. !desc.is_conforming_executable() && dpl_bad
  417. } {
  418. dbg_log!(
  419. "lar: invalid selector={:x} is_null={} is_system={}",
  420. selector,
  421. false,
  422. desc.is_system()
  423. );
  424. *flags &= !FLAG_ZERO;
  425. return original;
  426. }
  427. else {
  428. *flags |= FLAG_ZERO;
  429. return (desc.raw >> 32) as i32 & 0x00FFFF00;
  430. }
  431. },
  432. }
  433. }
  434. pub unsafe fn lsl(selector: i32, original: i32) -> i32 {
  435. if false {
  436. dbg_log!("lsl sel={:x}", selector);
  437. }
  438. const LSL_INVALID_TYPE: i32 = 1 << 0
  439. | 1 << 4
  440. | 1 << 5
  441. | 1 << 6
  442. | 1 << 7
  443. | 1 << 8
  444. | 1 << 0xA
  445. | 1 << 0xC
  446. | 1 << 0xD
  447. | 1 << 0xE
  448. | 1 << 0xF;
  449. let sel = SegmentSelector::of_u16(selector as u16);
  450. match lookup_segment_selector(sel) {
  451. Err(()) => {
  452. // pagefault
  453. return original;
  454. },
  455. Ok(Err(_)) => {
  456. *flags_changed &= !FLAG_ZERO;
  457. *flags &= !FLAG_ZERO;
  458. dbg_log!("lsl: invalid selector={:x}: null or invalid", selector);
  459. return original;
  460. },
  461. Ok(Ok((desc, _))) => {
  462. *flags_changed &= !FLAG_ZERO;
  463. let dpl_bad = desc.dpl() < *cpl || desc.dpl() < sel.rpl();
  464. if if desc.is_system() {
  465. (LSL_INVALID_TYPE >> desc.system_type() & 1 == 1) || dpl_bad
  466. }
  467. else {
  468. !desc.is_conforming_executable() && dpl_bad
  469. } {
  470. dbg_log!(
  471. "lsl: invalid selector={:x} is_null={} is_system={}",
  472. selector,
  473. false,
  474. desc.is_system(),
  475. );
  476. *flags &= !FLAG_ZERO;
  477. return original;
  478. }
  479. else {
  480. *flags |= FLAG_ZERO;
  481. return desc.effective_limit() as i32;
  482. }
  483. },
  484. }
  485. }
  486. pub unsafe fn verr(selector: i32) {
  487. *flags_changed &= !FLAG_ZERO;
  488. let sel = SegmentSelector::of_u16(selector as u16);
  489. match return_on_pagefault!(lookup_segment_selector(sel)) {
  490. Err(_) => {
  491. *flags &= !FLAG_ZERO;
  492. dbg_log!("verr -> invalid. selector={:x}", selector);
  493. },
  494. Ok((desc, _)) => {
  495. if desc.is_system()
  496. || !desc.is_readable()
  497. || (!desc.is_conforming_executable()
  498. && (desc.dpl() < *cpl || desc.dpl() < sel.rpl()))
  499. {
  500. dbg_log!("verr -> invalid. selector={:x}", selector);
  501. *flags &= !FLAG_ZERO;
  502. }
  503. else {
  504. dbg_log!("verr -> valid. selector={:x}", selector);
  505. *flags |= FLAG_ZERO;
  506. }
  507. },
  508. }
  509. }
  510. pub unsafe fn verw(selector: i32) {
  511. *flags_changed &= !FLAG_ZERO;
  512. let sel = SegmentSelector::of_u16(selector as u16);
  513. match return_on_pagefault!(lookup_segment_selector(sel)) {
  514. Err(_) => {
  515. *flags &= !FLAG_ZERO;
  516. dbg_log!("verw -> invalid. selector={:x}", selector);
  517. },
  518. Ok((desc, _)) => {
  519. if desc.is_system()
  520. || !desc.is_writable()
  521. || desc.dpl() < *cpl
  522. || desc.dpl() < sel.rpl()
  523. {
  524. dbg_log!(
  525. "verw invalid selector={:x} is_system={} is_writable={}",
  526. selector,
  527. desc.is_system(),
  528. desc.is_writable(),
  529. );
  530. *flags &= !FLAG_ZERO;
  531. }
  532. else {
  533. *flags |= FLAG_ZERO;
  534. }
  535. },
  536. }
  537. }