vmx.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807
  1. #ifndef __VMX_H
  2. #define __VMX_H
  3. #include "libcflat.h"
  4. #include "processor.h"
  5. #include "bitops.h"
  6. #include "asm/page.h"
  7. #include "asm/io.h"
  8. struct vmcs {
  9. u32 revision_id; /* vmcs revision identifier */
  10. u32 abort; /* VMX-abort indicator */
  11. /* VMCS data */
  12. char data[0];
  13. };
  14. struct invvpid_operand {
  15. u64 vpid;
  16. u64 gla;
  17. };
  18. struct regs {
  19. u64 rax;
  20. u64 rcx;
  21. u64 rdx;
  22. u64 rbx;
  23. u64 cr2;
  24. u64 rbp;
  25. u64 rsi;
  26. u64 rdi;
  27. u64 r8;
  28. u64 r9;
  29. u64 r10;
  30. u64 r11;
  31. u64 r12;
  32. u64 r13;
  33. u64 r14;
  34. u64 r15;
  35. u64 rflags;
  36. };
  37. struct vmentry_failure {
  38. /* Did a vmlaunch or vmresume fail? */
  39. bool vmlaunch;
  40. /* Instruction mnemonic (for convenience). */
  41. const char *instr;
  42. /* Did the instruction return right away, or did we jump to HOST_RIP? */
  43. bool early;
  44. /* Contents of [re]flags after failed entry. */
  45. unsigned long flags;
  46. };
  47. struct vmx_test {
  48. const char *name;
  49. int (*init)(struct vmcs *vmcs);
  50. void (*guest_main)();
  51. int (*exit_handler)();
  52. void (*syscall_handler)(u64 syscall_no);
  53. struct regs guest_regs;
  54. int (*entry_failure_handler)(struct vmentry_failure *failure);
  55. struct vmcs *vmcs;
  56. int exits;
  57. /* Alternative test interface. */
  58. void (*v2)(void);
  59. };
  60. union vmx_basic {
  61. u64 val;
  62. struct {
  63. u32 revision;
  64. u32 size:13,
  65. reserved1: 3,
  66. width:1,
  67. dual:1,
  68. type:4,
  69. insouts:1,
  70. ctrl:1,
  71. reserved2:8;
  72. };
  73. };
  74. union vmx_ctrl_msr {
  75. u64 val;
  76. struct {
  77. u32 set, clr;
  78. };
  79. };
  80. union vmx_ept_vpid {
  81. u64 val;
  82. struct {
  83. u32:16,
  84. super:2,
  85. : 2,
  86. invept:1,
  87. : 11;
  88. u32 invvpid:1;
  89. };
  90. };
  91. enum Encoding {
  92. /* 16-Bit Control Fields */
  93. VPID = 0x0000ul,
  94. /* Posted-interrupt notification vector */
  95. PINV = 0x0002ul,
  96. /* EPTP index */
  97. EPTP_IDX = 0x0004ul,
  98. /* 16-Bit Guest State Fields */
  99. GUEST_SEL_ES = 0x0800ul,
  100. GUEST_SEL_CS = 0x0802ul,
  101. GUEST_SEL_SS = 0x0804ul,
  102. GUEST_SEL_DS = 0x0806ul,
  103. GUEST_SEL_FS = 0x0808ul,
  104. GUEST_SEL_GS = 0x080aul,
  105. GUEST_SEL_LDTR = 0x080cul,
  106. GUEST_SEL_TR = 0x080eul,
  107. GUEST_INT_STATUS = 0x0810ul,
  108. GUEST_PML_INDEX = 0x0812ul,
  109. /* 16-Bit Host State Fields */
  110. HOST_SEL_ES = 0x0c00ul,
  111. HOST_SEL_CS = 0x0c02ul,
  112. HOST_SEL_SS = 0x0c04ul,
  113. HOST_SEL_DS = 0x0c06ul,
  114. HOST_SEL_FS = 0x0c08ul,
  115. HOST_SEL_GS = 0x0c0aul,
  116. HOST_SEL_TR = 0x0c0cul,
  117. /* 64-Bit Control Fields */
  118. IO_BITMAP_A = 0x2000ul,
  119. IO_BITMAP_B = 0x2002ul,
  120. MSR_BITMAP = 0x2004ul,
  121. EXIT_MSR_ST_ADDR = 0x2006ul,
  122. EXIT_MSR_LD_ADDR = 0x2008ul,
  123. ENTER_MSR_LD_ADDR = 0x200aul,
  124. VMCS_EXEC_PTR = 0x200cul,
  125. TSC_OFFSET = 0x2010ul,
  126. TSC_OFFSET_HI = 0x2011ul,
  127. APIC_VIRT_ADDR = 0x2012ul,
  128. APIC_ACCS_ADDR = 0x2014ul,
  129. EPTP = 0x201aul,
  130. EPTP_HI = 0x201bul,
  131. PMLADDR = 0x200eul,
  132. PMLADDR_HI = 0x200ful,
  133. /* 64-Bit Readonly Data Field */
  134. INFO_PHYS_ADDR = 0x2400ul,
  135. /* 64-Bit Guest State */
  136. VMCS_LINK_PTR = 0x2800ul,
  137. VMCS_LINK_PTR_HI = 0x2801ul,
  138. GUEST_DEBUGCTL = 0x2802ul,
  139. GUEST_DEBUGCTL_HI = 0x2803ul,
  140. GUEST_EFER = 0x2806ul,
  141. GUEST_PAT = 0x2804ul,
  142. GUEST_PERF_GLOBAL_CTRL = 0x2808ul,
  143. GUEST_PDPTE = 0x280aul,
  144. /* 64-Bit Host State */
  145. HOST_PAT = 0x2c00ul,
  146. HOST_EFER = 0x2c02ul,
  147. HOST_PERF_GLOBAL_CTRL = 0x2c04ul,
  148. /* 32-Bit Control Fields */
  149. PIN_CONTROLS = 0x4000ul,
  150. CPU_EXEC_CTRL0 = 0x4002ul,
  151. EXC_BITMAP = 0x4004ul,
  152. PF_ERROR_MASK = 0x4006ul,
  153. PF_ERROR_MATCH = 0x4008ul,
  154. CR3_TARGET_COUNT = 0x400aul,
  155. EXI_CONTROLS = 0x400cul,
  156. EXI_MSR_ST_CNT = 0x400eul,
  157. EXI_MSR_LD_CNT = 0x4010ul,
  158. ENT_CONTROLS = 0x4012ul,
  159. ENT_MSR_LD_CNT = 0x4014ul,
  160. ENT_INTR_INFO = 0x4016ul,
  161. ENT_INTR_ERROR = 0x4018ul,
  162. ENT_INST_LEN = 0x401aul,
  163. TPR_THRESHOLD = 0x401cul,
  164. CPU_EXEC_CTRL1 = 0x401eul,
  165. /* 32-Bit R/O Data Fields */
  166. VMX_INST_ERROR = 0x4400ul,
  167. EXI_REASON = 0x4402ul,
  168. EXI_INTR_INFO = 0x4404ul,
  169. EXI_INTR_ERROR = 0x4406ul,
  170. IDT_VECT_INFO = 0x4408ul,
  171. IDT_VECT_ERROR = 0x440aul,
  172. EXI_INST_LEN = 0x440cul,
  173. EXI_INST_INFO = 0x440eul,
  174. /* 32-Bit Guest State Fields */
  175. GUEST_LIMIT_ES = 0x4800ul,
  176. GUEST_LIMIT_CS = 0x4802ul,
  177. GUEST_LIMIT_SS = 0x4804ul,
  178. GUEST_LIMIT_DS = 0x4806ul,
  179. GUEST_LIMIT_FS = 0x4808ul,
  180. GUEST_LIMIT_GS = 0x480aul,
  181. GUEST_LIMIT_LDTR = 0x480cul,
  182. GUEST_LIMIT_TR = 0x480eul,
  183. GUEST_LIMIT_GDTR = 0x4810ul,
  184. GUEST_LIMIT_IDTR = 0x4812ul,
  185. GUEST_AR_ES = 0x4814ul,
  186. GUEST_AR_CS = 0x4816ul,
  187. GUEST_AR_SS = 0x4818ul,
  188. GUEST_AR_DS = 0x481aul,
  189. GUEST_AR_FS = 0x481cul,
  190. GUEST_AR_GS = 0x481eul,
  191. GUEST_AR_LDTR = 0x4820ul,
  192. GUEST_AR_TR = 0x4822ul,
  193. GUEST_INTR_STATE = 0x4824ul,
  194. GUEST_ACTV_STATE = 0x4826ul,
  195. GUEST_SMBASE = 0x4828ul,
  196. GUEST_SYSENTER_CS = 0x482aul,
  197. PREEMPT_TIMER_VALUE = 0x482eul,
  198. /* 32-Bit Host State Fields */
  199. HOST_SYSENTER_CS = 0x4c00ul,
  200. /* Natural-Width Control Fields */
  201. CR0_MASK = 0x6000ul,
  202. CR4_MASK = 0x6002ul,
  203. CR0_READ_SHADOW = 0x6004ul,
  204. CR4_READ_SHADOW = 0x6006ul,
  205. CR3_TARGET_0 = 0x6008ul,
  206. CR3_TARGET_1 = 0x600aul,
  207. CR3_TARGET_2 = 0x600cul,
  208. CR3_TARGET_3 = 0x600eul,
  209. /* Natural-Width R/O Data Fields */
  210. EXI_QUALIFICATION = 0x6400ul,
  211. IO_RCX = 0x6402ul,
  212. IO_RSI = 0x6404ul,
  213. IO_RDI = 0x6406ul,
  214. IO_RIP = 0x6408ul,
  215. GUEST_LINEAR_ADDRESS = 0x640aul,
  216. /* Natural-Width Guest State Fields */
  217. GUEST_CR0 = 0x6800ul,
  218. GUEST_CR3 = 0x6802ul,
  219. GUEST_CR4 = 0x6804ul,
  220. GUEST_BASE_ES = 0x6806ul,
  221. GUEST_BASE_CS = 0x6808ul,
  222. GUEST_BASE_SS = 0x680aul,
  223. GUEST_BASE_DS = 0x680cul,
  224. GUEST_BASE_FS = 0x680eul,
  225. GUEST_BASE_GS = 0x6810ul,
  226. GUEST_BASE_LDTR = 0x6812ul,
  227. GUEST_BASE_TR = 0x6814ul,
  228. GUEST_BASE_GDTR = 0x6816ul,
  229. GUEST_BASE_IDTR = 0x6818ul,
  230. GUEST_DR7 = 0x681aul,
  231. GUEST_RSP = 0x681cul,
  232. GUEST_RIP = 0x681eul,
  233. GUEST_RFLAGS = 0x6820ul,
  234. GUEST_PENDING_DEBUG = 0x6822ul,
  235. GUEST_SYSENTER_ESP = 0x6824ul,
  236. GUEST_SYSENTER_EIP = 0x6826ul,
  237. /* Natural-Width Host State Fields */
  238. HOST_CR0 = 0x6c00ul,
  239. HOST_CR3 = 0x6c02ul,
  240. HOST_CR4 = 0x6c04ul,
  241. HOST_BASE_FS = 0x6c06ul,
  242. HOST_BASE_GS = 0x6c08ul,
  243. HOST_BASE_TR = 0x6c0aul,
  244. HOST_BASE_GDTR = 0x6c0cul,
  245. HOST_BASE_IDTR = 0x6c0eul,
  246. HOST_SYSENTER_ESP = 0x6c10ul,
  247. HOST_SYSENTER_EIP = 0x6c12ul,
  248. HOST_RSP = 0x6c14ul,
  249. HOST_RIP = 0x6c16ul
  250. };
  251. #define VMX_ENTRY_FAILURE (1ul << 31)
  252. #define VMX_ENTRY_FLAGS (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \
  253. X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)
  254. enum Reason {
  255. VMX_EXC_NMI = 0,
  256. VMX_EXTINT = 1,
  257. VMX_TRIPLE_FAULT = 2,
  258. VMX_INIT = 3,
  259. VMX_SIPI = 4,
  260. VMX_SMI_IO = 5,
  261. VMX_SMI_OTHER = 6,
  262. VMX_INTR_WINDOW = 7,
  263. VMX_NMI_WINDOW = 8,
  264. VMX_TASK_SWITCH = 9,
  265. VMX_CPUID = 10,
  266. VMX_GETSEC = 11,
  267. VMX_HLT = 12,
  268. VMX_INVD = 13,
  269. VMX_INVLPG = 14,
  270. VMX_RDPMC = 15,
  271. VMX_RDTSC = 16,
  272. VMX_RSM = 17,
  273. VMX_VMCALL = 18,
  274. VMX_VMCLEAR = 19,
  275. VMX_VMLAUNCH = 20,
  276. VMX_VMPTRLD = 21,
  277. VMX_VMPTRST = 22,
  278. VMX_VMREAD = 23,
  279. VMX_VMRESUME = 24,
  280. VMX_VMWRITE = 25,
  281. VMX_VMXOFF = 26,
  282. VMX_VMXON = 27,
  283. VMX_CR = 28,
  284. VMX_DR = 29,
  285. VMX_IO = 30,
  286. VMX_RDMSR = 31,
  287. VMX_WRMSR = 32,
  288. VMX_FAIL_STATE = 33,
  289. VMX_FAIL_MSR = 34,
  290. VMX_MWAIT = 36,
  291. VMX_MTF = 37,
  292. VMX_MONITOR = 39,
  293. VMX_PAUSE = 40,
  294. VMX_FAIL_MCHECK = 41,
  295. VMX_TPR_THRESHOLD = 43,
  296. VMX_APIC_ACCESS = 44,
  297. VMX_GDTR_IDTR = 46,
  298. VMX_LDTR_TR = 47,
  299. VMX_EPT_VIOLATION = 48,
  300. VMX_EPT_MISCONFIG = 49,
  301. VMX_INVEPT = 50,
  302. VMX_PREEMPT = 52,
  303. VMX_INVVPID = 53,
  304. VMX_WBINVD = 54,
  305. VMX_XSETBV = 55,
  306. VMX_APIC_WRITE = 56,
  307. VMX_RDRAND = 57,
  308. VMX_INVPCID = 58,
  309. VMX_VMFUNC = 59,
  310. VMX_RDSEED = 61,
  311. VMX_PML_FULL = 62,
  312. VMX_XSAVES = 63,
  313. VMX_XRSTORS = 64,
  314. };
  315. enum Ctrl_exi {
  316. EXI_SAVE_DBGCTLS = 1UL << 2,
  317. EXI_HOST_64 = 1UL << 9,
  318. EXI_LOAD_PERF = 1UL << 12,
  319. EXI_INTA = 1UL << 15,
  320. EXI_SAVE_PAT = 1UL << 18,
  321. EXI_LOAD_PAT = 1UL << 19,
  322. EXI_SAVE_EFER = 1UL << 20,
  323. EXI_LOAD_EFER = 1UL << 21,
  324. EXI_SAVE_PREEMPT = 1UL << 22,
  325. };
  326. enum Ctrl_ent {
  327. ENT_LOAD_DBGCTLS = 1UL << 2,
  328. ENT_GUEST_64 = 1UL << 9,
  329. ENT_LOAD_PAT = 1UL << 14,
  330. ENT_LOAD_EFER = 1UL << 15,
  331. };
  332. enum Ctrl_pin {
  333. PIN_EXTINT = 1ul << 0,
  334. PIN_NMI = 1ul << 3,
  335. PIN_VIRT_NMI = 1ul << 5,
  336. PIN_PREEMPT = 1ul << 6,
  337. };
  338. enum Ctrl0 {
  339. CPU_INTR_WINDOW = 1ul << 2,
  340. CPU_HLT = 1ul << 7,
  341. CPU_INVLPG = 1ul << 9,
  342. CPU_MWAIT = 1ul << 10,
  343. CPU_RDPMC = 1ul << 11,
  344. CPU_RDTSC = 1ul << 12,
  345. CPU_CR3_LOAD = 1ul << 15,
  346. CPU_CR3_STORE = 1ul << 16,
  347. CPU_CR8_LOAD = 1ul << 19,
  348. CPU_CR8_STORE = 1ul << 20,
  349. CPU_TPR_SHADOW = 1ul << 21,
  350. CPU_NMI_WINDOW = 1ul << 22,
  351. CPU_IO = 1ul << 24,
  352. CPU_IO_BITMAP = 1ul << 25,
  353. CPU_MSR_BITMAP = 1ul << 28,
  354. CPU_MONITOR = 1ul << 29,
  355. CPU_PAUSE = 1ul << 30,
  356. CPU_SECONDARY = 1ul << 31,
  357. };
  358. enum Ctrl1 {
  359. CPU_EPT = 1ul << 1,
  360. CPU_DESC_TABLE = 1ul << 2,
  361. CPU_RDTSCP = 1ul << 3,
  362. CPU_VPID = 1ul << 5,
  363. CPU_URG = 1ul << 7,
  364. CPU_WBINVD = 1ul << 6,
  365. CPU_RDRAND = 1ul << 11,
  366. CPU_PML = 1ul << 17,
  367. };
  368. enum Intr_type {
  369. VMX_INTR_TYPE_EXT_INTR = 0,
  370. VMX_INTR_TYPE_NMI_INTR = 2,
  371. VMX_INTR_TYPE_HARD_EXCEPTION = 3,
  372. VMX_INTR_TYPE_SOFT_INTR = 4,
  373. VMX_INTR_TYPE_SOFT_EXCEPTION = 6,
  374. };
  375. /*
  376. * Interruption-information format
  377. */
  378. #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
  379. #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
  380. #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */
  381. #define INTR_INFO_UNBLOCK_NMI_MASK 0x1000 /* 12 */
  382. #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
  383. #define INTR_INFO_INTR_TYPE_SHIFT 8
  384. /*
  385. * VM-instruction error numbers
  386. */
  387. enum vm_instruction_error_number {
  388. VMXERR_VMCALL_IN_VMX_ROOT_OPERATION = 1,
  389. VMXERR_VMCLEAR_INVALID_ADDRESS = 2,
  390. VMXERR_VMCLEAR_VMXON_POINTER = 3,
  391. VMXERR_VMLAUNCH_NONCLEAR_VMCS = 4,
  392. VMXERR_VMRESUME_NONLAUNCHED_VMCS = 5,
  393. VMXERR_VMRESUME_AFTER_VMXOFF = 6,
  394. VMXERR_ENTRY_INVALID_CONTROL_FIELD = 7,
  395. VMXERR_ENTRY_INVALID_HOST_STATE_FIELD = 8,
  396. VMXERR_VMPTRLD_INVALID_ADDRESS = 9,
  397. VMXERR_VMPTRLD_VMXON_POINTER = 10,
  398. VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID = 11,
  399. VMXERR_UNSUPPORTED_VMCS_COMPONENT = 12,
  400. VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT = 13,
  401. VMXERR_VMXON_IN_VMX_ROOT_OPERATION = 15,
  402. VMXERR_ENTRY_INVALID_EXECUTIVE_VMCS_POINTER = 16,
  403. VMXERR_ENTRY_NONLAUNCHED_EXECUTIVE_VMCS = 17,
  404. VMXERR_ENTRY_EXECUTIVE_VMCS_POINTER_NOT_VMXON_POINTER = 18,
  405. VMXERR_VMCALL_NONCLEAR_VMCS = 19,
  406. VMXERR_VMCALL_INVALID_VM_EXIT_CONTROL_FIELDS = 20,
  407. VMXERR_VMCALL_INCORRECT_MSEG_REVISION_ID = 22,
  408. VMXERR_VMXOFF_UNDER_DUAL_MONITOR_TREATMENT_OF_SMIS_AND_SMM = 23,
  409. VMXERR_VMCALL_INVALID_SMM_MONITOR_FEATURES = 24,
  410. VMXERR_ENTRY_INVALID_VM_EXECUTION_CONTROL_FIELDS_IN_EXECUTIVE_VMCS = 25,
  411. VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS = 26,
  412. VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
  413. };
  414. #define SAVE_GPR \
  415. "xchg %rax, regs\n\t" \
  416. "xchg %rbx, regs+0x8\n\t" \
  417. "xchg %rcx, regs+0x10\n\t" \
  418. "xchg %rdx, regs+0x18\n\t" \
  419. "xchg %rbp, regs+0x28\n\t" \
  420. "xchg %rsi, regs+0x30\n\t" \
  421. "xchg %rdi, regs+0x38\n\t" \
  422. "xchg %r8, regs+0x40\n\t" \
  423. "xchg %r9, regs+0x48\n\t" \
  424. "xchg %r10, regs+0x50\n\t" \
  425. "xchg %r11, regs+0x58\n\t" \
  426. "xchg %r12, regs+0x60\n\t" \
  427. "xchg %r13, regs+0x68\n\t" \
  428. "xchg %r14, regs+0x70\n\t" \
  429. "xchg %r15, regs+0x78\n\t"
  430. #define LOAD_GPR SAVE_GPR
  431. #define SAVE_GPR_C \
  432. "xchg %%rax, regs\n\t" \
  433. "xchg %%rbx, regs+0x8\n\t" \
  434. "xchg %%rcx, regs+0x10\n\t" \
  435. "xchg %%rdx, regs+0x18\n\t" \
  436. "xchg %%rbp, regs+0x28\n\t" \
  437. "xchg %%rsi, regs+0x30\n\t" \
  438. "xchg %%rdi, regs+0x38\n\t" \
  439. "xchg %%r8, regs+0x40\n\t" \
  440. "xchg %%r9, regs+0x48\n\t" \
  441. "xchg %%r10, regs+0x50\n\t" \
  442. "xchg %%r11, regs+0x58\n\t" \
  443. "xchg %%r12, regs+0x60\n\t" \
  444. "xchg %%r13, regs+0x68\n\t" \
  445. "xchg %%r14, regs+0x70\n\t" \
  446. "xchg %%r15, regs+0x78\n\t"
  447. #define LOAD_GPR_C SAVE_GPR_C
  448. #define VMX_IO_SIZE_MASK 0x7
  449. #define _VMX_IO_BYTE 0
  450. #define _VMX_IO_WORD 1
  451. #define _VMX_IO_LONG 3
  452. #define VMX_IO_DIRECTION_MASK (1ul << 3)
  453. #define VMX_IO_IN (1ul << 3)
  454. #define VMX_IO_OUT 0
  455. #define VMX_IO_STRING (1ul << 4)
  456. #define VMX_IO_REP (1ul << 5)
  457. #define VMX_IO_OPRAND_IMM (1ul << 6)
  458. #define VMX_IO_PORT_MASK 0xFFFF0000
  459. #define VMX_IO_PORT_SHIFT 16
  460. #define VMX_TEST_START 0
  461. #define VMX_TEST_VMEXIT 1
  462. #define VMX_TEST_EXIT 2
  463. #define VMX_TEST_RESUME 3
  464. #define VMX_TEST_VMABORT 4
  465. #define VMX_TEST_VMSKIP 5
  466. #define HYPERCALL_BIT (1ul << 12)
  467. #define HYPERCALL_MASK 0xFFF
  468. #define HYPERCALL_VMEXIT 0x1
  469. #define HYPERCALL_VMABORT 0x2
  470. #define HYPERCALL_VMSKIP 0x3
  471. #define EPTP_PG_WALK_LEN_SHIFT 3ul
  472. #define EPTP_AD_FLAG (1ul << 6)
  473. #define EPT_MEM_TYPE_UC 0ul
  474. #define EPT_MEM_TYPE_WC 1ul
  475. #define EPT_MEM_TYPE_WT 4ul
  476. #define EPT_MEM_TYPE_WP 5ul
  477. #define EPT_MEM_TYPE_WB 6ul
  478. #define EPT_RA 1ul
  479. #define EPT_WA 2ul
  480. #define EPT_EA 4ul
  481. #define EPT_PRESENT (EPT_RA | EPT_WA | EPT_EA)
  482. #define EPT_ACCESS_FLAG (1ul << 8)
  483. #define EPT_DIRTY_FLAG (1ul << 9)
  484. #define EPT_LARGE_PAGE (1ul << 7)
  485. #define EPT_MEM_TYPE_SHIFT 3ul
  486. #define EPT_IGNORE_PAT (1ul << 6)
  487. #define EPT_SUPPRESS_VE (1ull << 63)
  488. #define EPT_CAP_WT 1ull
  489. #define EPT_CAP_PWL4 (1ull << 6)
  490. #define EPT_CAP_UC (1ull << 8)
  491. #define EPT_CAP_WB (1ull << 14)
  492. #define EPT_CAP_2M_PAGE (1ull << 16)
  493. #define EPT_CAP_1G_PAGE (1ull << 17)
  494. #define EPT_CAP_INVEPT (1ull << 20)
  495. #define EPT_CAP_INVEPT_SINGLE (1ull << 25)
  496. #define EPT_CAP_INVEPT_ALL (1ull << 26)
  497. #define EPT_CAP_AD_FLAG (1ull << 21)
  498. #define VPID_CAP_INVVPID (1ull << 32)
  499. #define VPID_CAP_INVVPID_ADDR (1ull << 40)
  500. #define VPID_CAP_INVVPID_CXTGLB (1ull << 41)
  501. #define VPID_CAP_INVVPID_ALL (1ull << 42)
  502. #define VPID_CAP_INVVPID_CXTLOC (1ull << 43)
  503. #define PAGE_SIZE_2M (512 * PAGE_SIZE)
  504. #define PAGE_SIZE_1G (512 * PAGE_SIZE_2M)
  505. #define EPT_PAGE_LEVEL 4
  506. #define EPT_PGDIR_WIDTH 9
  507. #define EPT_PGDIR_MASK 511
  508. #define EPT_PGDIR_ENTRIES (1 << EPT_PGDIR_WIDTH)
  509. #define EPT_LEVEL_SHIFT(level) (((level)-1) * EPT_PGDIR_WIDTH + 12)
  510. #define EPT_ADDR_MASK GENMASK_ULL(51, 12)
  511. #define PAGE_MASK_2M (~(PAGE_SIZE_2M-1))
  512. #define EPT_VLT_RD 1
  513. #define EPT_VLT_WR (1 << 1)
  514. #define EPT_VLT_FETCH (1 << 2)
  515. #define EPT_VLT_PERM_RD (1 << 3)
  516. #define EPT_VLT_PERM_WR (1 << 4)
  517. #define EPT_VLT_PERM_EX (1 << 5)
  518. #define EPT_VLT_PERMS (EPT_VLT_PERM_RD | EPT_VLT_PERM_WR | \
  519. EPT_VLT_PERM_EX)
  520. #define EPT_VLT_LADDR_VLD (1 << 7)
  521. #define EPT_VLT_PADDR (1 << 8)
  522. #define MAGIC_VAL_1 0x12345678ul
  523. #define MAGIC_VAL_2 0x87654321ul
  524. #define MAGIC_VAL_3 0xfffffffful
  525. #define MAGIC_VAL_4 0xdeadbeeful
  526. #define INVEPT_SINGLE 1
  527. #define INVEPT_GLOBAL 2
  528. #define INVVPID_ADDR 0
  529. #define INVVPID_CONTEXT_GLOBAL 1
  530. #define INVVPID_ALL 2
  531. #define INVVPID_CONTEXT_LOCAL 3
  532. #define ACTV_ACTIVE 0
  533. #define ACTV_HLT 1
  534. extern struct regs regs;
  535. extern union vmx_basic basic;
  536. extern union vmx_ctrl_msr ctrl_pin_rev;
  537. extern union vmx_ctrl_msr ctrl_cpu_rev[2];
  538. extern union vmx_ctrl_msr ctrl_exit_rev;
  539. extern union vmx_ctrl_msr ctrl_enter_rev;
  540. extern union vmx_ept_vpid ept_vpid;
  541. extern u64 *vmxon_region;
  542. void vmx_set_test_stage(u32 s);
  543. u32 vmx_get_test_stage(void);
  544. void vmx_inc_test_stage(void);
  545. static int vmx_on(void)
  546. {
  547. bool ret;
  548. u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
  549. asm volatile ("push %1; popf; vmxon %2; setbe %0\n\t"
  550. : "=q" (ret) : "q" (rflags), "m" (vmxon_region) : "cc");
  551. return ret;
  552. }
  553. static int vmx_off(void)
  554. {
  555. bool ret;
  556. u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
  557. asm volatile("push %1; popf; vmxoff; setbe %0\n\t"
  558. : "=q"(ret) : "q" (rflags) : "cc");
  559. return ret;
  560. }
  561. static inline int make_vmcs_current(struct vmcs *vmcs)
  562. {
  563. bool ret;
  564. u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
  565. asm volatile ("push %1; popf; vmptrld %2; setbe %0"
  566. : "=q" (ret) : "q" (rflags), "m" (vmcs) : "cc");
  567. return ret;
  568. }
  569. static inline int vmcs_clear(struct vmcs *vmcs)
  570. {
  571. bool ret;
  572. u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
  573. asm volatile ("push %1; popf; vmclear %2; setbe %0"
  574. : "=q" (ret) : "q" (rflags), "m" (vmcs) : "cc");
  575. return ret;
  576. }
  577. static inline u64 vmcs_read(enum Encoding enc)
  578. {
  579. u64 val;
  580. asm volatile ("vmread %1, %0" : "=rm" (val) : "r" ((u64)enc) : "cc");
  581. return val;
  582. }
  583. static inline int vmcs_read_checking(enum Encoding enc, u64 *value)
  584. {
  585. u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
  586. u64 encoding = enc;
  587. u64 val;
  588. asm volatile ("shl $8, %%rax;"
  589. "sahf;"
  590. "vmread %[encoding], %[val];"
  591. "lahf;"
  592. "shr $8, %%rax"
  593. : /* output */ [val]"=rm"(val), "+a"(rflags)
  594. : /* input */ [encoding]"r"(encoding)
  595. : /* clobber */ "cc");
  596. *value = val;
  597. return rflags & (X86_EFLAGS_CF | X86_EFLAGS_ZF);
  598. }
  599. static inline int vmcs_write(enum Encoding enc, u64 val)
  600. {
  601. bool ret;
  602. asm volatile ("vmwrite %1, %2; setbe %0"
  603. : "=q"(ret) : "rm" (val), "r" ((u64)enc) : "cc");
  604. return ret;
  605. }
  606. static inline int vmcs_save(struct vmcs **vmcs)
  607. {
  608. bool ret;
  609. unsigned long pa;
  610. u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
  611. asm volatile ("push %2; popf; vmptrst %1; setbe %0"
  612. : "=q" (ret), "=m" (pa) : "r" (rflags) : "cc");
  613. *vmcs = (pa == -1ull) ? NULL : phys_to_virt(pa);
  614. return ret;
  615. }
  616. static inline bool invept(unsigned long type, u64 eptp)
  617. {
  618. bool ret;
  619. u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
  620. struct {
  621. u64 eptp, gpa;
  622. } operand = {eptp, 0};
  623. asm volatile("push %1; popf; invept %2, %3; setbe %0"
  624. : "=q" (ret) : "r" (rflags), "m"(operand),"r"(type) : "cc");
  625. return ret;
  626. }
  627. static inline bool invvpid(unsigned long type, u64 vpid, u64 gla)
  628. {
  629. bool ret;
  630. u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
  631. struct invvpid_operand operand = {vpid, gla};
  632. asm volatile("push %1; popf; invvpid %2, %3; setbe %0"
  633. : "=q" (ret) : "r" (rflags), "m"(operand),"r"(type) : "cc");
  634. return ret;
  635. }
  636. const char *exit_reason_description(u64 reason);
  637. void print_vmexit_info();
  638. void print_vmentry_failure_info(struct vmentry_failure *failure);
  639. void ept_sync(int type, u64 eptp);
  640. void vpid_sync(int type, u16 vpid);
  641. void install_ept_entry(unsigned long *pml4, int pte_level,
  642. unsigned long guest_addr, unsigned long pte,
  643. unsigned long *pt_page);
  644. void install_1g_ept(unsigned long *pml4, unsigned long phys,
  645. unsigned long guest_addr, u64 perm);
  646. void install_2m_ept(unsigned long *pml4, unsigned long phys,
  647. unsigned long guest_addr, u64 perm);
  648. void install_ept(unsigned long *pml4, unsigned long phys,
  649. unsigned long guest_addr, u64 perm);
  650. void setup_ept_range(unsigned long *pml4, unsigned long start,
  651. unsigned long len, int map_1g, int map_2m, u64 perm);
  652. bool get_ept_pte(unsigned long *pml4, unsigned long guest_addr, int level,
  653. unsigned long *pte);
  654. void set_ept_pte(unsigned long *pml4, unsigned long guest_addr,
  655. int level, u64 pte_val);
  656. void check_ept_ad(unsigned long *pml4, u64 guest_cr3,
  657. unsigned long guest_addr, int expected_gpa_ad,
  658. int expected_pt_ad);
  659. void clear_ept_ad(unsigned long *pml4, u64 guest_cr3,
  660. unsigned long guest_addr);
  661. bool ept_2m_supported(void);
  662. bool ept_1g_supported(void);
  663. bool ept_huge_pages_supported(int level);
  664. bool ept_execute_only_supported(void);
  665. bool ept_ad_bits_supported(void);
  666. void enter_guest(void);
  667. typedef void (*test_guest_func)(void);
  668. typedef void (*test_teardown_func)(void *data);
  669. void test_set_guest(test_guest_func func);
  670. void test_add_teardown(test_teardown_func func, void *data);
  671. void test_skip(const char *msg);
  672. void __abort_test(void);
  673. #define TEST_ASSERT(cond) \
  674. do { \
  675. if (!(cond)) { \
  676. report("%s:%d: Assertion failed: %s", 0, \
  677. __FILE__, __LINE__, #cond); \
  678. dump_stack(); \
  679. __abort_test(); \
  680. } \
  681. report_pass(); \
  682. } while (0)
  683. #define TEST_ASSERT_MSG(cond, fmt, args...) \
  684. do { \
  685. if (!(cond)) { \
  686. report("%s:%d: Assertion failed: %s\n" fmt, 0, \
  687. __FILE__, __LINE__, #cond, ##args); \
  688. dump_stack(); \
  689. __abort_test(); \
  690. } \
  691. report_pass(); \
  692. } while (0)
  693. #define __TEST_EQ(a, b, a_str, b_str, assertion, fmt, args...) \
  694. do { \
  695. typeof(a) _a = a; \
  696. typeof(b) _b = b; \
  697. if (_a != _b) { \
  698. char _bin_a[BINSTR_SZ]; \
  699. char _bin_b[BINSTR_SZ]; \
  700. binstr(_a, _bin_a); \
  701. binstr(_b, _bin_b); \
  702. report("%s:%d: %s failed: (%s) == (%s)\n" \
  703. "\tLHS: %#018lx - %s - %lu\n" \
  704. "\tRHS: %#018lx - %s - %lu%s" fmt, 0, \
  705. __FILE__, __LINE__, \
  706. assertion ? "Assertion" : "Expectation", a_str, b_str, \
  707. (unsigned long) _a, _bin_a, (unsigned long) _a, \
  708. (unsigned long) _b, _bin_b, (unsigned long) _b, \
  709. fmt[0] == '\0' ? "" : "\n", ## args); \
  710. dump_stack(); \
  711. if (assertion) \
  712. __abort_test(); \
  713. } \
  714. report_pass(); \
  715. } while (0)
  716. #define TEST_ASSERT_EQ(a, b) __TEST_EQ(a, b, #a, #b, 1, "")
  717. #define TEST_ASSERT_EQ_MSG(a, b, fmt, args...) \
  718. __TEST_EQ(a, b, #a, #b, 1, fmt, ## args)
  719. #define TEST_EXPECT_EQ(a, b) __TEST_EQ(a, b, #a, #b, 0, "")
  720. #define TEST_EXPECT_EQ_MSG(a, b, fmt, args...) \
  721. __TEST_EQ(a, b, #a, #b, 0, fmt, ## args)
  722. #endif