wasm_util.rs 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. use leb::{write_fixed_leb16_at_idx, write_leb_i32, write_leb_u32};
  2. use wasmgen::wasm_opcodes as op;
  3. pub fn push_i32(buf: &mut Vec<u8>, v: i32) {
  4. buf.push(op::OP_I32CONST);
  5. write_leb_i32(buf, v);
  6. }
  7. pub fn load_aligned_u16(buf: &mut Vec<u8>, addr: u32) {
  8. // doesn't cause a failure in the generated code, but it will be much slower
  9. dbg_assert!((addr & 1) == 0);
  10. buf.push(op::OP_I32CONST);
  11. write_leb_u32(buf, addr);
  12. buf.push(op::OP_I32LOAD16U);
  13. buf.push(op::MEM_ALIGN16);
  14. buf.push(0); // immediate offset
  15. }
  16. pub fn load_aligned_i32(buf: &mut Vec<u8>, addr: u32) {
  17. // doesn't cause a failure in the generated code, but it will be much slower
  18. dbg_assert!((addr & 3) == 0);
  19. push_i32(buf, addr as i32);
  20. load_aligned_i32_from_stack(buf, 0);
  21. }
  22. pub fn store_aligned_u16(buf: &mut Vec<u8>) {
  23. buf.push(op::OP_I32STORE16);
  24. buf.push(op::MEM_ALIGN16);
  25. buf.push(0); // immediate offset
  26. }
  27. pub fn store_aligned_i32(buf: &mut Vec<u8>) {
  28. buf.push(op::OP_I32STORE);
  29. buf.push(op::MEM_ALIGN32);
  30. buf.push(0); // immediate offset
  31. }
  32. pub fn add_i32(buf: &mut Vec<u8>) { buf.push(op::OP_I32ADD); }
  33. pub fn and_i32(buf: &mut Vec<u8>) { buf.push(op::OP_I32AND); }
  34. pub fn or_i32(buf: &mut Vec<u8>) { buf.push(op::OP_I32OR); }
  35. pub fn shl_i32(buf: &mut Vec<u8>) { buf.push(op::OP_I32SHL); }
  36. pub fn call_fn(buf: &mut Vec<u8>, fn_idx: u16) {
  37. buf.push(op::OP_CALL);
  38. let buf_len = buf.len();
  39. buf.push(0);
  40. buf.push(0);
  41. write_fixed_leb16_at_idx(buf, buf_len, fn_idx);
  42. }
  43. pub fn eq_i32(buf: &mut Vec<u8>) { buf.push(op::OP_I32EQ); }
  44. pub fn ne_i32(buf: &mut Vec<u8>) { buf.push(op::OP_I32NE); }
  45. pub fn le_i32(buf: &mut Vec<u8>) { buf.push(op::OP_I32LES); }
  46. pub fn lt_i32(buf: &mut Vec<u8>) { buf.push(op::OP_I32LTS); }
  47. pub fn ge_i32(buf: &mut Vec<u8>) { buf.push(op::OP_I32GES); }
  48. pub fn gt_i32(buf: &mut Vec<u8>) { buf.push(op::OP_I32GTS); }
  49. pub fn if_i32(buf: &mut Vec<u8>) {
  50. buf.push(op::OP_IF);
  51. buf.push(op::TYPE_I32);
  52. }
  53. pub fn block_i32(buf: &mut Vec<u8>) {
  54. buf.push(op::OP_BLOCK);
  55. buf.push(op::TYPE_I32);
  56. }
  57. pub fn xor_i32(buf: &mut Vec<u8>) { buf.push(op::OP_I32XOR); }
  58. pub fn load_unaligned_i32_from_stack(buf: &mut Vec<u8>, byte_offset: u32) {
  59. buf.push(op::OP_I32LOAD);
  60. buf.push(op::MEM_NO_ALIGN);
  61. write_leb_u32(buf, byte_offset);
  62. }
  63. pub fn load_aligned_i32_from_stack(buf: &mut Vec<u8>, byte_offset: u32) {
  64. buf.push(op::OP_I32LOAD);
  65. buf.push(op::MEM_ALIGN32);
  66. write_leb_u32(buf, byte_offset);
  67. }
  68. // XXX: Function naming should be consistent regarding both alignment and accepting an
  69. // offset. Leaving as-is for the Rust port to cleanup
  70. pub fn store_unaligned_i32(buf: &mut Vec<u8>, byte_offset: u32) {
  71. buf.push(op::OP_I32STORE);
  72. buf.push(op::MEM_NO_ALIGN);
  73. write_leb_u32(buf, byte_offset);
  74. }
  75. pub fn shr_u32(buf: &mut Vec<u8>) { buf.push(op::OP_I32SHRU); }
  76. pub fn shr_i32(buf: &mut Vec<u8>) { buf.push(op::OP_I32SHRS); }
  77. pub fn eqz_i32(buf: &mut Vec<u8>) { buf.push(op::OP_I32EQZ); }
  78. pub fn if_void(buf: &mut Vec<u8>) {
  79. buf.push(op::OP_IF);
  80. buf.push(op::TYPE_VOID_BLOCK);
  81. }
  82. pub fn else_(buf: &mut Vec<u8>) { buf.push(op::OP_ELSE); }
  83. pub fn loop_void(buf: &mut Vec<u8>) {
  84. buf.push(op::OP_LOOP);
  85. buf.push(op::TYPE_VOID_BLOCK);
  86. }
  87. pub fn block_void(buf: &mut Vec<u8>) {
  88. buf.push(op::OP_BLOCK);
  89. buf.push(op::TYPE_VOID_BLOCK);
  90. }
  91. pub fn block_end(buf: &mut Vec<u8>) { buf.push(op::OP_END); }
  92. pub fn return_(buf: &mut Vec<u8>) { buf.push(op::OP_RETURN); }
  93. pub fn drop(buf: &mut Vec<u8>) { buf.push(op::OP_DROP); }
  94. // Generate a br_table where an input of [i] will branch [i]th outer block,
  95. // where [i] is passed on the wasm stack
  96. pub fn brtable_and_cases(buf: &mut Vec<u8>, cases_count: u32) {
  97. buf.push(op::OP_BRTABLE);
  98. write_leb_u32(buf, cases_count);
  99. for i in 0..(cases_count + 1) {
  100. write_leb_u32(buf, i);
  101. }
  102. }
  103. pub fn br(buf: &mut Vec<u8>, depth: u32) {
  104. buf.push(op::OP_BR);
  105. write_leb_u32(buf, depth);
  106. }
  107. pub fn get_local(buf: &mut Vec<u8>, idx: u32) {
  108. buf.push(op::OP_GETLOCAL);
  109. write_leb_u32(buf, idx);
  110. }
  111. pub fn set_local(buf: &mut Vec<u8>, idx: u32) {
  112. buf.push(op::OP_SETLOCAL);
  113. write_leb_u32(buf, idx);
  114. }
  115. pub fn tee_local(buf: &mut Vec<u8>, idx: u32) {
  116. buf.push(op::OP_TEELOCAL);
  117. write_leb_u32(buf, idx);
  118. }
  119. pub fn unreachable(buf: &mut Vec<u8>) { buf.push(op::OP_UNREACHABLE); }
  120. pub fn increment_mem32(buf: &mut Vec<u8>, addr: u32) { increment_variable(buf, addr, 1) }
  121. pub fn increment_variable(buf: &mut Vec<u8>, addr: u32, n: i32) {
  122. push_i32(buf, addr as i32);
  123. load_aligned_i32(buf, addr);
  124. push_i32(buf, n);
  125. add_i32(buf);
  126. store_aligned_i32(buf);
  127. }
  128. pub fn load_aligned_u16_from_stack(buf: &mut Vec<u8>, byte_offset: u32) {
  129. buf.push(op::OP_I32LOAD16U);
  130. buf.push(op::MEM_ALIGN16);
  131. write_leb_u32(buf, byte_offset);
  132. }