kvmxx.cc 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. #include "kvmxx.hh"
  2. #include "exception.hh"
  3. #include <fcntl.h>
  4. #include <sys/ioctl.h>
  5. #include <sys/mman.h>
  6. #include <stdlib.h>
  7. #include <memory>
  8. #include <algorithm>
  9. namespace kvm {
  10. static long check_error(long r)
  11. {
  12. if (r == -1) {
  13. throw errno_exception(errno);
  14. }
  15. return r;
  16. }
  17. fd::fd(int fd)
  18. : _fd(fd)
  19. {
  20. }
  21. fd::fd(const fd& other)
  22. : _fd(::dup(other._fd))
  23. {
  24. check_error(_fd);
  25. }
  26. fd::fd(std::string device_node, int flags)
  27. : _fd(::open(device_node.c_str(), flags))
  28. {
  29. check_error(_fd);
  30. }
  31. long fd::ioctl(unsigned nr, long arg)
  32. {
  33. return check_error(::ioctl(_fd, nr, arg));
  34. }
  35. vcpu::vcpu(vm& vm, int id)
  36. : _vm(vm), _fd(vm._fd.ioctl(KVM_CREATE_VCPU, id)), _shared(NULL)
  37. , _mmap_size(_vm._system._fd.ioctl(KVM_GET_VCPU_MMAP_SIZE, 0))
  38. {
  39. kvm_run *shared = static_cast<kvm_run*>(::mmap(NULL, _mmap_size,
  40. PROT_READ | PROT_WRITE,
  41. MAP_SHARED,
  42. _fd.get(), 0));
  43. if (shared == MAP_FAILED) {
  44. throw errno_exception(errno);
  45. }
  46. _shared = shared;
  47. }
  48. vcpu::~vcpu()
  49. {
  50. munmap(_shared, _mmap_size);
  51. }
  52. void vcpu::run()
  53. {
  54. _fd.ioctl(KVM_RUN, 0);
  55. }
  56. kvm_regs vcpu::regs()
  57. {
  58. kvm_regs regs;
  59. _fd.ioctlp(KVM_GET_REGS, &regs);
  60. return regs;
  61. }
  62. void vcpu::set_regs(const kvm_regs& regs)
  63. {
  64. _fd.ioctlp(KVM_SET_REGS, const_cast<kvm_regs*>(&regs));
  65. }
  66. kvm_sregs vcpu::sregs()
  67. {
  68. kvm_sregs sregs;
  69. _fd.ioctlp(KVM_GET_SREGS, &sregs);
  70. return sregs;
  71. }
  72. void vcpu::set_sregs(const kvm_sregs& sregs)
  73. {
  74. _fd.ioctlp(KVM_SET_SREGS, const_cast<kvm_sregs*>(&sregs));
  75. }
  76. class vcpu::kvm_msrs_ptr {
  77. public:
  78. explicit kvm_msrs_ptr(size_t nmsrs);
  79. ~kvm_msrs_ptr() { ::free(_kvm_msrs); }
  80. kvm_msrs* operator->() { return _kvm_msrs; }
  81. kvm_msrs* get() { return _kvm_msrs; }
  82. private:
  83. kvm_msrs* _kvm_msrs;
  84. };
  85. vcpu::kvm_msrs_ptr::kvm_msrs_ptr(size_t nmsrs)
  86. : _kvm_msrs(0)
  87. {
  88. size_t size = sizeof(kvm_msrs) + sizeof(kvm_msr_entry) * nmsrs;
  89. _kvm_msrs = static_cast<kvm_msrs*>(::malloc(size));
  90. if (!_kvm_msrs) {
  91. throw std::bad_alloc();
  92. }
  93. }
  94. std::vector<kvm_msr_entry> vcpu::msrs(std::vector<uint32_t> indices)
  95. {
  96. kvm_msrs_ptr msrs(indices.size());
  97. msrs->nmsrs = indices.size();
  98. for (unsigned i = 0; i < msrs->nmsrs; ++i) {
  99. msrs->entries[i].index = indices[i];
  100. }
  101. _fd.ioctlp(KVM_GET_MSRS, msrs.get());
  102. return std::vector<kvm_msr_entry>(msrs->entries,
  103. msrs->entries + msrs->nmsrs);
  104. }
  105. void vcpu::set_msrs(const std::vector<kvm_msr_entry>& msrs)
  106. {
  107. kvm_msrs_ptr _msrs(msrs.size());
  108. _msrs->nmsrs = msrs.size();
  109. std::copy(msrs.begin(), msrs.end(), _msrs->entries);
  110. _fd.ioctlp(KVM_SET_MSRS, _msrs.get());
  111. }
  112. void vcpu::set_debug(uint64_t dr[8], bool enabled, bool singlestep)
  113. {
  114. kvm_guest_debug gd;
  115. gd.control = 0;
  116. if (enabled) {
  117. gd.control |= KVM_GUESTDBG_ENABLE;
  118. }
  119. if (singlestep) {
  120. gd.control |= KVM_GUESTDBG_SINGLESTEP;
  121. }
  122. for (int i = 0; i < 8; ++i) {
  123. gd.arch.debugreg[i] = dr[i];
  124. }
  125. _fd.ioctlp(KVM_SET_GUEST_DEBUG, &gd);
  126. }
  127. vm::vm(system& system)
  128. : _system(system), _fd(system._fd.ioctl(KVM_CREATE_VM, 0))
  129. {
  130. }
  131. void vm::set_memory_region(int slot, void *addr, uint64_t gpa, size_t len,
  132. uint32_t flags)
  133. {
  134. struct kvm_userspace_memory_region umr;
  135. umr.slot = slot;
  136. umr.flags = flags;
  137. umr.guest_phys_addr = gpa;
  138. umr.memory_size = len;
  139. umr.userspace_addr = reinterpret_cast<uintptr_t>(addr);
  140. _fd.ioctlp(KVM_SET_USER_MEMORY_REGION, &umr);
  141. }
  142. void vm::get_dirty_log(int slot, void *log)
  143. {
  144. struct kvm_dirty_log kdl;
  145. kdl.slot = slot;
  146. kdl.dirty_bitmap = log;
  147. _fd.ioctlp(KVM_GET_DIRTY_LOG, &kdl);
  148. }
  149. void vm::set_tss_addr(uint32_t addr)
  150. {
  151. _fd.ioctl(KVM_SET_TSS_ADDR, addr);
  152. }
  153. void vm::set_ept_identity_map_addr(uint64_t addr)
  154. {
  155. _fd.ioctlp(KVM_SET_IDENTITY_MAP_ADDR, &addr);
  156. }
  157. system::system(std::string device_node)
  158. : _fd(device_node, O_RDWR)
  159. {
  160. }
  161. bool system::check_extension(int extension)
  162. {
  163. return _fd.ioctl(KVM_CHECK_EXTENSION, extension);
  164. }
  165. int system::get_extension_int(int extension)
  166. {
  167. return _fd.ioctl(KVM_CHECK_EXTENSION, extension);
  168. }
  169. };