1
0

identity.cc 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. #include "identity.hh"
  2. #include "exception.hh"
  3. #include <stdlib.h>
  4. #include <stdio.h>
  5. namespace identity {
  6. typedef unsigned long ulong;
  7. hole::hole()
  8. : address(), size()
  9. {
  10. }
  11. hole::hole(void* address, size_t size)
  12. : address(address), size(size)
  13. {
  14. }
  15. vm::vm(kvm::vm& vm, mem_map& mmap, hole h)
  16. {
  17. int ret = posix_memalign(&tss, 4096, 4 * 4096);
  18. if (ret) {
  19. throw errno_exception(ret);
  20. }
  21. uint64_t hole_gpa = reinterpret_cast<uintptr_t>(h.address);
  22. char* hole_hva = static_cast<char*>(h.address);
  23. uint64_t tss_addr = reinterpret_cast<uintptr_t>(tss);
  24. uint64_t tss_end = tss_addr + 4 * 4096;
  25. uint64_t hole_end = hole_gpa + h.size;
  26. if (hole_gpa < tss_addr) {
  27. if (hole_gpa) {
  28. _slots.push_back(mem_slot_ptr(new mem_slot(mmap, 0, hole_gpa, NULL)));
  29. }
  30. _slots.push_back(mem_slot_ptr(new mem_slot(mmap, hole_end, tss_addr - hole_end,
  31. hole_hva + h.size)));
  32. _slots.push_back(mem_slot_ptr(new mem_slot(mmap, tss_end, (uint32_t)-tss_end,
  33. (char*)tss + 4 * 4096)));
  34. } else {
  35. _slots.push_back(mem_slot_ptr(new mem_slot(mmap, 0, tss_addr, NULL)));
  36. _slots.push_back(mem_slot_ptr(new mem_slot(mmap, tss_end, hole_gpa - tss_end,
  37. (char*)tss + 4 * 4096)));
  38. _slots.push_back(mem_slot_ptr(new mem_slot(mmap, hole_end, (uint32_t)-hole_end,
  39. hole_hva + h.size)));
  40. }
  41. vm.set_tss_addr(tss_addr);
  42. vm.set_ept_identity_map_addr(tss_addr + 3 * 4096);
  43. }
  44. vm::~vm()
  45. {
  46. free(tss);
  47. }
  48. void vcpu::setup_sregs()
  49. {
  50. kvm_sregs sregs = { };
  51. kvm_segment dseg = { };
  52. dseg.base = 0; dseg.limit = -1U; dseg.type = 3; dseg.present = 1;
  53. dseg.dpl = 3; dseg.db = 1; dseg.s = 1; dseg.l = 0; dseg.g = 1;
  54. kvm_segment cseg = dseg;
  55. cseg.type = 11;
  56. sregs.cs = cseg; asm ("mov %%cs, %0" : "=rm"(sregs.cs.selector));
  57. sregs.ds = dseg; asm ("mov %%ds, %0" : "=rm"(sregs.ds.selector));
  58. sregs.es = dseg; asm ("mov %%es, %0" : "=rm"(sregs.es.selector));
  59. sregs.fs = dseg; asm ("mov %%fs, %0" : "=rm"(sregs.fs.selector));
  60. sregs.gs = dseg; asm ("mov %%gs, %0" : "=rm"(sregs.gs.selector));
  61. sregs.ss = dseg; asm ("mov %%ss, %0" : "=rm"(sregs.ss.selector));
  62. uint32_t gsbase;
  63. asm ("mov %%gs:0, %0" : "=r"(gsbase));
  64. sregs.gs.base = gsbase;
  65. sregs.tr.base = reinterpret_cast<uintptr_t>(&*_stack.begin());
  66. sregs.tr.type = 11;
  67. sregs.tr.s = 0;
  68. sregs.tr.present = 1;
  69. sregs.cr0 = 0x11; /* PE, ET, !PG */
  70. sregs.cr4 = 0;
  71. sregs.efer = 0;
  72. sregs.apic_base = 0xfee00000;
  73. _vcpu.set_sregs(sregs);
  74. }
  75. void vcpu::thunk(vcpu* zis)
  76. {
  77. zis->_guest_func();
  78. asm volatile("outb %%al, %%dx" : : "a"(0), "d"(0));
  79. }
  80. void vcpu::setup_regs()
  81. {
  82. kvm_regs regs = {};
  83. regs.rflags = 0x3202;
  84. regs.rsp = reinterpret_cast<ulong>(&*_stack.end());
  85. regs.rsp &= ~15UL;
  86. ulong* sp = reinterpret_cast<ulong *>(regs.rsp);
  87. *--sp = reinterpret_cast<ulong>((char*)this);
  88. *--sp = 0;
  89. regs.rsp = reinterpret_cast<ulong>(sp);
  90. regs.rip = reinterpret_cast<ulong>(&vcpu::thunk);
  91. printf("rip %llx\n", regs.rip);
  92. _vcpu.set_regs(regs);
  93. }
  94. vcpu::vcpu(kvm::vcpu& vcpu, std::function<void ()> guest_func,
  95. unsigned long stack_size)
  96. : _vcpu(vcpu), _guest_func(guest_func), _stack(stack_size)
  97. {
  98. setup_sregs();
  99. setup_regs();
  100. }
  101. }