svm.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081
  1. #include "svm.h"
  2. #include "libcflat.h"
  3. #include "processor.h"
  4. #include "desc.h"
  5. #include "msr.h"
  6. #include "vm.h"
  7. #include "smp.h"
  8. #include "types.h"
  9. /* for the nested page table*/
  10. u64 *pml4e;
  11. u64 *pdpe;
  12. u64 *pde[4];
  13. u64 *pte[2048];
  14. void *scratch_page;
  15. #define LATENCY_RUNS 1000000
  16. u64 tsc_start;
  17. u64 tsc_end;
  18. u64 vmrun_sum, vmexit_sum;
  19. u64 vmsave_sum, vmload_sum;
  20. u64 stgi_sum, clgi_sum;
  21. u64 latvmrun_max;
  22. u64 latvmrun_min;
  23. u64 latvmexit_max;
  24. u64 latvmexit_min;
  25. u64 latvmload_max;
  26. u64 latvmload_min;
  27. u64 latvmsave_max;
  28. u64 latvmsave_min;
  29. u64 latstgi_max;
  30. u64 latstgi_min;
  31. u64 latclgi_max;
  32. u64 latclgi_min;
  33. u64 runs;
  34. u8 *io_bitmap;
  35. u8 io_bitmap_area[16384];
  36. static bool npt_supported(void)
  37. {
  38. return cpuid(0x8000000A).d & 1;
  39. }
  40. static void setup_svm(void)
  41. {
  42. void *hsave = alloc_page();
  43. u64 *page, address;
  44. int i,j;
  45. wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
  46. wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
  47. wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
  48. scratch_page = alloc_page();
  49. io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095);
  50. if (!npt_supported())
  51. return;
  52. printf("NPT detected - running all tests with NPT enabled\n");
  53. /*
  54. * Nested paging supported - Build a nested page table
  55. * Build the page-table bottom-up and map everything with 4k pages
  56. * to get enough granularity for the NPT unit-tests.
  57. */
  58. address = 0;
  59. /* PTE level */
  60. for (i = 0; i < 2048; ++i) {
  61. page = alloc_page();
  62. for (j = 0; j < 512; ++j, address += 4096)
  63. page[j] = address | 0x067ULL;
  64. pte[i] = page;
  65. }
  66. /* PDE level */
  67. for (i = 0; i < 4; ++i) {
  68. page = alloc_page();
  69. for (j = 0; j < 512; ++j)
  70. page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
  71. pde[i] = page;
  72. }
  73. /* PDPe level */
  74. pdpe = alloc_page();
  75. for (i = 0; i < 4; ++i)
  76. pdpe[i] = ((u64)(pde[i])) | 0x27;
  77. /* PML4e level */
  78. pml4e = alloc_page();
  79. pml4e[0] = ((u64)pdpe) | 0x27;
  80. }
  81. static u64 *npt_get_pde(u64 address)
  82. {
  83. int i1, i2;
  84. address >>= 21;
  85. i1 = (address >> 9) & 0x3;
  86. i2 = address & 0x1ff;
  87. return &pde[i1][i2];
  88. }
  89. static u64 *npt_get_pte(u64 address)
  90. {
  91. int i1, i2;
  92. address >>= 12;
  93. i1 = (address >> 9) & 0x7ff;
  94. i2 = address & 0x1ff;
  95. return &pte[i1][i2];
  96. }
  97. static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
  98. u64 base, u32 limit, u32 attr)
  99. {
  100. seg->selector = selector;
  101. seg->attrib = attr;
  102. seg->limit = limit;
  103. seg->base = base;
  104. }
  105. static void vmcb_ident(struct vmcb *vmcb)
  106. {
  107. u64 vmcb_phys = virt_to_phys(vmcb);
  108. struct vmcb_save_area *save = &vmcb->save;
  109. struct vmcb_control_area *ctrl = &vmcb->control;
  110. u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
  111. | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
  112. u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
  113. | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
  114. struct descriptor_table_ptr desc_table_ptr;
  115. memset(vmcb, 0, sizeof(*vmcb));
  116. asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
  117. vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
  118. vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
  119. vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
  120. vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
  121. sgdt(&desc_table_ptr);
  122. vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
  123. sidt(&desc_table_ptr);
  124. vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
  125. ctrl->asid = 1;
  126. save->cpl = 0;
  127. save->efer = rdmsr(MSR_EFER);
  128. save->cr4 = read_cr4();
  129. save->cr3 = read_cr3();
  130. save->cr0 = read_cr0();
  131. save->dr7 = read_dr7();
  132. save->dr6 = read_dr6();
  133. save->cr2 = read_cr2();
  134. save->g_pat = rdmsr(MSR_IA32_CR_PAT);
  135. save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
  136. ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
  137. ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
  138. if (npt_supported()) {
  139. ctrl->nested_ctl = 1;
  140. ctrl->nested_cr3 = (u64)pml4e;
  141. }
  142. }
  143. struct test {
  144. const char *name;
  145. bool (*supported)(void);
  146. void (*prepare)(struct test *test);
  147. void (*guest_func)(struct test *test);
  148. bool (*finished)(struct test *test);
  149. bool (*succeeded)(struct test *test);
  150. struct vmcb *vmcb;
  151. int exits;
  152. ulong scratch;
  153. };
  154. static inline void vmmcall(void)
  155. {
  156. asm volatile ("vmmcall" : : : "memory");
  157. }
  158. static void test_thunk(struct test *test)
  159. {
  160. test->guest_func(test);
  161. vmmcall();
  162. }
  163. struct regs {
  164. u64 rax;
  165. u64 rcx;
  166. u64 rdx;
  167. u64 rbx;
  168. u64 cr2;
  169. u64 rbp;
  170. u64 rsi;
  171. u64 rdi;
  172. u64 r8;
  173. u64 r9;
  174. u64 r10;
  175. u64 r11;
  176. u64 r12;
  177. u64 r13;
  178. u64 r14;
  179. u64 r15;
  180. u64 rflags;
  181. };
  182. struct regs regs;
  183. // rax handled specially below
  184. #define SAVE_GPR_C \
  185. "xchg %%rbx, regs+0x8\n\t" \
  186. "xchg %%rcx, regs+0x10\n\t" \
  187. "xchg %%rdx, regs+0x18\n\t" \
  188. "xchg %%rbp, regs+0x28\n\t" \
  189. "xchg %%rsi, regs+0x30\n\t" \
  190. "xchg %%rdi, regs+0x38\n\t" \
  191. "xchg %%r8, regs+0x40\n\t" \
  192. "xchg %%r9, regs+0x48\n\t" \
  193. "xchg %%r10, regs+0x50\n\t" \
  194. "xchg %%r11, regs+0x58\n\t" \
  195. "xchg %%r12, regs+0x60\n\t" \
  196. "xchg %%r13, regs+0x68\n\t" \
  197. "xchg %%r14, regs+0x70\n\t" \
  198. "xchg %%r15, regs+0x78\n\t"
  199. #define LOAD_GPR_C SAVE_GPR_C
  200. static void test_run(struct test *test, struct vmcb *vmcb)
  201. {
  202. u64 vmcb_phys = virt_to_phys(vmcb);
  203. u64 guest_stack[10000];
  204. test->vmcb = vmcb;
  205. test->prepare(test);
  206. vmcb->save.rip = (ulong)test_thunk;
  207. vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
  208. regs.rdi = (ulong)test;
  209. do {
  210. tsc_start = rdtsc();
  211. asm volatile (
  212. "clgi \n\t"
  213. "vmload \n\t"
  214. "mov regs+0x80, %%r15\n\t" // rflags
  215. "mov %%r15, 0x170(%0)\n\t"
  216. "mov regs, %%r15\n\t" // rax
  217. "mov %%r15, 0x1f8(%0)\n\t"
  218. LOAD_GPR_C
  219. "vmrun \n\t"
  220. SAVE_GPR_C
  221. "mov 0x170(%0), %%r15\n\t" // rflags
  222. "mov %%r15, regs+0x80\n\t"
  223. "mov 0x1f8(%0), %%r15\n\t" // rax
  224. "mov %%r15, regs\n\t"
  225. "vmsave \n\t"
  226. "stgi"
  227. : : "a"(vmcb_phys)
  228. : "rbx", "rcx", "rdx", "rsi",
  229. "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
  230. "memory");
  231. tsc_end = rdtsc();
  232. ++test->exits;
  233. } while (!test->finished(test));
  234. report("%s", test->succeeded(test), test->name);
  235. }
  236. static bool smp_supported(void)
  237. {
  238. return cpu_count() > 1;
  239. }
  240. static bool default_supported(void)
  241. {
  242. return true;
  243. }
  244. static void default_prepare(struct test *test)
  245. {
  246. vmcb_ident(test->vmcb);
  247. cli();
  248. }
  249. static bool default_finished(struct test *test)
  250. {
  251. return true; /* one vmexit */
  252. }
  253. static void null_test(struct test *test)
  254. {
  255. }
  256. static bool null_check(struct test *test)
  257. {
  258. return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
  259. }
  260. static void prepare_no_vmrun_int(struct test *test)
  261. {
  262. test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
  263. }
  264. static bool check_no_vmrun_int(struct test *test)
  265. {
  266. return test->vmcb->control.exit_code == SVM_EXIT_ERR;
  267. }
  268. static void test_vmrun(struct test *test)
  269. {
  270. asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
  271. }
  272. static bool check_vmrun(struct test *test)
  273. {
  274. return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
  275. }
  276. static void prepare_cr3_intercept(struct test *test)
  277. {
  278. default_prepare(test);
  279. test->vmcb->control.intercept_cr_read |= 1 << 3;
  280. }
  281. static void test_cr3_intercept(struct test *test)
  282. {
  283. asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
  284. }
  285. static bool check_cr3_intercept(struct test *test)
  286. {
  287. return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
  288. }
  289. static bool check_cr3_nointercept(struct test *test)
  290. {
  291. return null_check(test) && test->scratch == read_cr3();
  292. }
  293. static void corrupt_cr3_intercept_bypass(void *_test)
  294. {
  295. struct test *test = _test;
  296. extern volatile u32 mmio_insn;
  297. while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
  298. pause();
  299. pause();
  300. pause();
  301. pause();
  302. mmio_insn = 0x90d8200f; // mov %cr3, %rax; nop
  303. }
  304. static void prepare_cr3_intercept_bypass(struct test *test)
  305. {
  306. default_prepare(test);
  307. test->vmcb->control.intercept_cr_read |= 1 << 3;
  308. on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
  309. }
  310. static void test_cr3_intercept_bypass(struct test *test)
  311. {
  312. ulong a = 0xa0000;
  313. test->scratch = 1;
  314. while (test->scratch != 2)
  315. barrier();
  316. asm volatile ("mmio_insn: mov %0, (%0); nop"
  317. : "+a"(a) : : "memory");
  318. test->scratch = a;
  319. }
  320. static bool next_rip_supported(void)
  321. {
  322. return (cpuid(SVM_CPUID_FUNC).d & 8);
  323. }
  324. static void prepare_next_rip(struct test *test)
  325. {
  326. test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
  327. }
  328. static void test_next_rip(struct test *test)
  329. {
  330. asm volatile ("rdtsc\n\t"
  331. ".globl exp_next_rip\n\t"
  332. "exp_next_rip:\n\t" ::: "eax", "edx");
  333. }
  334. static bool check_next_rip(struct test *test)
  335. {
  336. extern char exp_next_rip;
  337. unsigned long address = (unsigned long)&exp_next_rip;
  338. return address == test->vmcb->control.next_rip;
  339. }
  340. static void prepare_mode_switch(struct test *test)
  341. {
  342. test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
  343. | (1ULL << UD_VECTOR)
  344. | (1ULL << DF_VECTOR)
  345. | (1ULL << PF_VECTOR);
  346. test->scratch = 0;
  347. }
  348. static void test_mode_switch(struct test *test)
  349. {
  350. asm volatile(" cli\n"
  351. " ljmp *1f\n" /* jump to 32-bit code segment */
  352. "1:\n"
  353. " .long 2f\n"
  354. " .long " xstr(KERNEL_CS32) "\n"
  355. ".code32\n"
  356. "2:\n"
  357. " movl %%cr0, %%eax\n"
  358. " btcl $31, %%eax\n" /* clear PG */
  359. " movl %%eax, %%cr0\n"
  360. " movl $0xc0000080, %%ecx\n" /* EFER */
  361. " rdmsr\n"
  362. " btcl $8, %%eax\n" /* clear LME */
  363. " wrmsr\n"
  364. " movl %%cr4, %%eax\n"
  365. " btcl $5, %%eax\n" /* clear PAE */
  366. " movl %%eax, %%cr4\n"
  367. " movw %[ds16], %%ax\n"
  368. " movw %%ax, %%ds\n"
  369. " ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
  370. ".code16\n"
  371. "3:\n"
  372. " movl %%cr0, %%eax\n"
  373. " btcl $0, %%eax\n" /* clear PE */
  374. " movl %%eax, %%cr0\n"
  375. " ljmpl $0, $4f\n" /* jump to real-mode */
  376. "4:\n"
  377. " vmmcall\n"
  378. " movl %%cr0, %%eax\n"
  379. " btsl $0, %%eax\n" /* set PE */
  380. " movl %%eax, %%cr0\n"
  381. " ljmpl %[cs32], $5f\n" /* back to protected mode */
  382. ".code32\n"
  383. "5:\n"
  384. " movl %%cr4, %%eax\n"
  385. " btsl $5, %%eax\n" /* set PAE */
  386. " movl %%eax, %%cr4\n"
  387. " movl $0xc0000080, %%ecx\n" /* EFER */
  388. " rdmsr\n"
  389. " btsl $8, %%eax\n" /* set LME */
  390. " wrmsr\n"
  391. " movl %%cr0, %%eax\n"
  392. " btsl $31, %%eax\n" /* set PG */
  393. " movl %%eax, %%cr0\n"
  394. " ljmpl %[cs64], $6f\n" /* back to long mode */
  395. ".code64\n\t"
  396. "6:\n"
  397. " vmmcall\n"
  398. :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
  399. [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
  400. : "rax", "rbx", "rcx", "rdx", "memory");
  401. }
  402. static bool mode_switch_finished(struct test *test)
  403. {
  404. u64 cr0, cr4, efer;
  405. cr0 = test->vmcb->save.cr0;
  406. cr4 = test->vmcb->save.cr4;
  407. efer = test->vmcb->save.efer;
  408. /* Only expect VMMCALL intercepts */
  409. if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
  410. return true;
  411. /* Jump over VMMCALL instruction */
  412. test->vmcb->save.rip += 3;
  413. /* Do sanity checks */
  414. switch (test->scratch) {
  415. case 0:
  416. /* Test should be in real mode now - check for this */
  417. if ((cr0 & 0x80000001) || /* CR0.PG, CR0.PE */
  418. (cr4 & 0x00000020) || /* CR4.PAE */
  419. (efer & 0x00000500)) /* EFER.LMA, EFER.LME */
  420. return true;
  421. break;
  422. case 2:
  423. /* Test should be back in long-mode now - check for this */
  424. if (((cr0 & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
  425. ((cr4 & 0x00000020) != 0x00000020) || /* CR4.PAE */
  426. ((efer & 0x00000500) != 0x00000500)) /* EFER.LMA, EFER.LME */
  427. return true;
  428. break;
  429. }
  430. /* one step forward */
  431. test->scratch += 1;
  432. return test->scratch == 2;
  433. }
  434. static bool check_mode_switch(struct test *test)
  435. {
  436. return test->scratch == 2;
  437. }
  438. static void prepare_ioio(struct test *test)
  439. {
  440. test->vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
  441. test->scratch = 0;
  442. memset(io_bitmap, 0, 8192);
  443. io_bitmap[8192] = 0xFF;
  444. }
  445. int get_test_stage(struct test *test)
  446. {
  447. barrier();
  448. return test->scratch;
  449. }
  450. void inc_test_stage(struct test *test)
  451. {
  452. barrier();
  453. test->scratch++;
  454. barrier();
  455. }
  456. static void test_ioio(struct test *test)
  457. {
  458. // stage 0, test IO pass
  459. inb(0x5000);
  460. outb(0x0, 0x5000);
  461. if (get_test_stage(test) != 0)
  462. goto fail;
  463. // test IO width, in/out
  464. io_bitmap[0] = 0xFF;
  465. inc_test_stage(test);
  466. inb(0x0);
  467. if (get_test_stage(test) != 2)
  468. goto fail;
  469. outw(0x0, 0x0);
  470. if (get_test_stage(test) != 3)
  471. goto fail;
  472. inl(0x0);
  473. if (get_test_stage(test) != 4)
  474. goto fail;
  475. // test low/high IO port
  476. io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
  477. inb(0x5000);
  478. if (get_test_stage(test) != 5)
  479. goto fail;
  480. io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
  481. inw(0x9000);
  482. if (get_test_stage(test) != 6)
  483. goto fail;
  484. // test partial pass
  485. io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
  486. inl(0x4FFF);
  487. if (get_test_stage(test) != 7)
  488. goto fail;
  489. // test across pages
  490. inc_test_stage(test);
  491. inl(0x7FFF);
  492. if (get_test_stage(test) != 8)
  493. goto fail;
  494. inc_test_stage(test);
  495. io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
  496. inl(0x7FFF);
  497. if (get_test_stage(test) != 10)
  498. goto fail;
  499. io_bitmap[0] = 0;
  500. inl(0xFFFF);
  501. if (get_test_stage(test) != 11)
  502. goto fail;
  503. io_bitmap[0] = 0xFF;
  504. io_bitmap[8192] = 0;
  505. inl(0xFFFF);
  506. inc_test_stage(test);
  507. if (get_test_stage(test) != 12)
  508. goto fail;
  509. return;
  510. fail:
  511. report("stage %d", false, get_test_stage(test));
  512. test->scratch = -1;
  513. }
  514. static bool ioio_finished(struct test *test)
  515. {
  516. unsigned port, size;
  517. /* Only expect IOIO intercepts */
  518. if (test->vmcb->control.exit_code == SVM_EXIT_VMMCALL)
  519. return true;
  520. if (test->vmcb->control.exit_code != SVM_EXIT_IOIO)
  521. return true;
  522. /* one step forward */
  523. test->scratch += 1;
  524. port = test->vmcb->control.exit_info_1 >> 16;
  525. size = (test->vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
  526. while (size--) {
  527. io_bitmap[port / 8] &= ~(1 << (port & 7));
  528. port++;
  529. }
  530. return false;
  531. }
  532. static bool check_ioio(struct test *test)
  533. {
  534. memset(io_bitmap, 0, 8193);
  535. return test->scratch != -1;
  536. }
  537. static void prepare_asid_zero(struct test *test)
  538. {
  539. test->vmcb->control.asid = 0;
  540. }
  541. static void test_asid_zero(struct test *test)
  542. {
  543. asm volatile ("vmmcall\n\t");
  544. }
  545. static bool check_asid_zero(struct test *test)
  546. {
  547. return test->vmcb->control.exit_code == SVM_EXIT_ERR;
  548. }
  549. static void sel_cr0_bug_prepare(struct test *test)
  550. {
  551. vmcb_ident(test->vmcb);
  552. test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
  553. }
  554. static bool sel_cr0_bug_finished(struct test *test)
  555. {
  556. return true;
  557. }
  558. static void sel_cr0_bug_test(struct test *test)
  559. {
  560. unsigned long cr0;
  561. /* read cr0, clear CD, and write back */
  562. cr0 = read_cr0();
  563. cr0 |= (1UL << 30);
  564. write_cr0(cr0);
  565. /*
  566. * If we are here the test failed, not sure what to do now because we
  567. * are not in guest-mode anymore so we can't trigger an intercept.
  568. * Trigger a tripple-fault for now.
  569. */
  570. report("sel_cr0 test. Can not recover from this - exiting", false);
  571. exit(report_summary());
  572. }
  573. static bool sel_cr0_bug_check(struct test *test)
  574. {
  575. return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
  576. }
  577. static void npt_nx_prepare(struct test *test)
  578. {
  579. u64 *pte;
  580. vmcb_ident(test->vmcb);
  581. pte = npt_get_pte((u64)null_test);
  582. *pte |= (1ULL << 63);
  583. }
  584. static bool npt_nx_check(struct test *test)
  585. {
  586. u64 *pte = npt_get_pte((u64)null_test);
  587. *pte &= ~(1ULL << 63);
  588. test->vmcb->save.efer |= (1 << 11);
  589. return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
  590. && (test->vmcb->control.exit_info_1 == 0x100000015ULL);
  591. }
  592. static void npt_us_prepare(struct test *test)
  593. {
  594. u64 *pte;
  595. vmcb_ident(test->vmcb);
  596. pte = npt_get_pte((u64)scratch_page);
  597. *pte &= ~(1ULL << 2);
  598. }
  599. static void npt_us_test(struct test *test)
  600. {
  601. (void) *(volatile u64 *)scratch_page;
  602. }
  603. static bool npt_us_check(struct test *test)
  604. {
  605. u64 *pte = npt_get_pte((u64)scratch_page);
  606. *pte |= (1ULL << 2);
  607. return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
  608. && (test->vmcb->control.exit_info_1 == 0x100000005ULL);
  609. }
  610. u64 save_pde;
  611. static void npt_rsvd_prepare(struct test *test)
  612. {
  613. u64 *pde;
  614. vmcb_ident(test->vmcb);
  615. pde = npt_get_pde((u64) null_test);
  616. save_pde = *pde;
  617. *pde = (1ULL << 19) | (1ULL << 7) | 0x27;
  618. }
  619. static bool npt_rsvd_check(struct test *test)
  620. {
  621. u64 *pde = npt_get_pde((u64) null_test);
  622. *pde = save_pde;
  623. return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
  624. && (test->vmcb->control.exit_info_1 == 0x10000001dULL);
  625. }
  626. static void npt_rw_prepare(struct test *test)
  627. {
  628. u64 *pte;
  629. vmcb_ident(test->vmcb);
  630. pte = npt_get_pte(0x80000);
  631. *pte &= ~(1ULL << 1);
  632. }
  633. static void npt_rw_test(struct test *test)
  634. {
  635. u64 *data = (void*)(0x80000);
  636. *data = 0;
  637. }
  638. static bool npt_rw_check(struct test *test)
  639. {
  640. u64 *pte = npt_get_pte(0x80000);
  641. *pte |= (1ULL << 1);
  642. return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
  643. && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
  644. }
  645. static void npt_rw_pfwalk_prepare(struct test *test)
  646. {
  647. u64 *pte;
  648. vmcb_ident(test->vmcb);
  649. pte = npt_get_pte(read_cr3());
  650. *pte &= ~(1ULL << 1);
  651. }
  652. static bool npt_rw_pfwalk_check(struct test *test)
  653. {
  654. u64 *pte = npt_get_pte(read_cr3());
  655. *pte |= (1ULL << 1);
  656. return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
  657. && (test->vmcb->control.exit_info_1 == 0x200000006ULL)
  658. && (test->vmcb->control.exit_info_2 == read_cr3());
  659. }
  660. static void npt_rsvd_pfwalk_prepare(struct test *test)
  661. {
  662. vmcb_ident(test->vmcb);
  663. pdpe[0] |= (1ULL << 8);
  664. }
  665. static bool npt_rsvd_pfwalk_check(struct test *test)
  666. {
  667. pdpe[0] &= ~(1ULL << 8);
  668. return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
  669. && (test->vmcb->control.exit_info_1 == 0x200000006ULL);
  670. }
  671. static void npt_l1mmio_prepare(struct test *test)
  672. {
  673. vmcb_ident(test->vmcb);
  674. }
  675. u32 nested_apic_version1;
  676. u32 nested_apic_version2;
  677. static void npt_l1mmio_test(struct test *test)
  678. {
  679. volatile u32 *data = (volatile void*)(0xfee00030UL);
  680. nested_apic_version1 = *data;
  681. nested_apic_version2 = *data;
  682. }
  683. static bool npt_l1mmio_check(struct test *test)
  684. {
  685. volatile u32 *data = (volatile void*)(0xfee00030);
  686. u32 lvr = *data;
  687. return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
  688. }
  689. static void npt_rw_l1mmio_prepare(struct test *test)
  690. {
  691. u64 *pte;
  692. vmcb_ident(test->vmcb);
  693. pte = npt_get_pte(0xfee00080);
  694. *pte &= ~(1ULL << 1);
  695. }
  696. static void npt_rw_l1mmio_test(struct test *test)
  697. {
  698. volatile u32 *data = (volatile void*)(0xfee00080);
  699. *data = *data;
  700. }
  701. static bool npt_rw_l1mmio_check(struct test *test)
  702. {
  703. u64 *pte = npt_get_pte(0xfee00080);
  704. *pte |= (1ULL << 1);
  705. return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
  706. && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
  707. }
  708. static void latency_prepare(struct test *test)
  709. {
  710. default_prepare(test);
  711. runs = LATENCY_RUNS;
  712. latvmrun_min = latvmexit_min = -1ULL;
  713. latvmrun_max = latvmexit_max = 0;
  714. vmrun_sum = vmexit_sum = 0;
  715. }
  716. static void latency_test(struct test *test)
  717. {
  718. u64 cycles;
  719. start:
  720. tsc_end = rdtsc();
  721. cycles = tsc_end - tsc_start;
  722. if (cycles > latvmrun_max)
  723. latvmrun_max = cycles;
  724. if (cycles < latvmrun_min)
  725. latvmrun_min = cycles;
  726. vmrun_sum += cycles;
  727. tsc_start = rdtsc();
  728. asm volatile ("vmmcall" : : : "memory");
  729. goto start;
  730. }
  731. static bool latency_finished(struct test *test)
  732. {
  733. u64 cycles;
  734. tsc_end = rdtsc();
  735. cycles = tsc_end - tsc_start;
  736. if (cycles > latvmexit_max)
  737. latvmexit_max = cycles;
  738. if (cycles < latvmexit_min)
  739. latvmexit_min = cycles;
  740. vmexit_sum += cycles;
  741. test->vmcb->save.rip += 3;
  742. runs -= 1;
  743. return runs == 0;
  744. }
  745. static bool latency_check(struct test *test)
  746. {
  747. printf(" Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max,
  748. latvmrun_min, vmrun_sum / LATENCY_RUNS);
  749. printf(" Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max,
  750. latvmexit_min, vmexit_sum / LATENCY_RUNS);
  751. return true;
  752. }
  753. static void lat_svm_insn_prepare(struct test *test)
  754. {
  755. default_prepare(test);
  756. runs = LATENCY_RUNS;
  757. latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
  758. latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
  759. vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
  760. }
  761. static bool lat_svm_insn_finished(struct test *test)
  762. {
  763. u64 vmcb_phys = virt_to_phys(test->vmcb);
  764. u64 cycles;
  765. for ( ; runs != 0; runs--) {
  766. tsc_start = rdtsc();
  767. asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
  768. cycles = rdtsc() - tsc_start;
  769. if (cycles > latvmload_max)
  770. latvmload_max = cycles;
  771. if (cycles < latvmload_min)
  772. latvmload_min = cycles;
  773. vmload_sum += cycles;
  774. tsc_start = rdtsc();
  775. asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
  776. cycles = rdtsc() - tsc_start;
  777. if (cycles > latvmsave_max)
  778. latvmsave_max = cycles;
  779. if (cycles < latvmsave_min)
  780. latvmsave_min = cycles;
  781. vmsave_sum += cycles;
  782. tsc_start = rdtsc();
  783. asm volatile("stgi\n\t");
  784. cycles = rdtsc() - tsc_start;
  785. if (cycles > latstgi_max)
  786. latstgi_max = cycles;
  787. if (cycles < latstgi_min)
  788. latstgi_min = cycles;
  789. stgi_sum += cycles;
  790. tsc_start = rdtsc();
  791. asm volatile("clgi\n\t");
  792. cycles = rdtsc() - tsc_start;
  793. if (cycles > latclgi_max)
  794. latclgi_max = cycles;
  795. if (cycles < latclgi_min)
  796. latclgi_min = cycles;
  797. clgi_sum += cycles;
  798. }
  799. return true;
  800. }
  801. static bool lat_svm_insn_check(struct test *test)
  802. {
  803. printf(" Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max,
  804. latvmload_min, vmload_sum / LATENCY_RUNS);
  805. printf(" Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max,
  806. latvmsave_min, vmsave_sum / LATENCY_RUNS);
  807. printf(" Latency STGI: max: %ld min: %ld avg: %ld\n", latstgi_max,
  808. latstgi_min, stgi_sum / LATENCY_RUNS);
  809. printf(" Latency CLGI: max: %ld min: %ld avg: %ld\n", latclgi_max,
  810. latclgi_min, clgi_sum / LATENCY_RUNS);
  811. return true;
  812. }
  813. static struct test tests[] = {
  814. { "null", default_supported, default_prepare, null_test,
  815. default_finished, null_check },
  816. { "vmrun", default_supported, default_prepare, test_vmrun,
  817. default_finished, check_vmrun },
  818. { "ioio", default_supported, prepare_ioio, test_ioio,
  819. ioio_finished, check_ioio },
  820. { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
  821. null_test, default_finished, check_no_vmrun_int },
  822. { "cr3 read intercept", default_supported, prepare_cr3_intercept,
  823. test_cr3_intercept, default_finished, check_cr3_intercept },
  824. { "cr3 read nointercept", default_supported, default_prepare,
  825. test_cr3_intercept, default_finished, check_cr3_nointercept },
  826. { "cr3 read intercept emulate", smp_supported,
  827. prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
  828. default_finished, check_cr3_intercept },
  829. { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
  830. default_finished, check_next_rip },
  831. { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
  832. mode_switch_finished, check_mode_switch },
  833. { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
  834. default_finished, check_asid_zero },
  835. { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
  836. sel_cr0_bug_finished, sel_cr0_bug_check },
  837. { "npt_nx", npt_supported, npt_nx_prepare, null_test,
  838. default_finished, npt_nx_check },
  839. { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
  840. default_finished, npt_us_check },
  841. { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
  842. default_finished, npt_rsvd_check },
  843. { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
  844. default_finished, npt_rw_check },
  845. { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare, null_test,
  846. default_finished, npt_rsvd_pfwalk_check },
  847. { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare, null_test,
  848. default_finished, npt_rw_pfwalk_check },
  849. { "npt_l1mmio", npt_supported, npt_l1mmio_prepare, npt_l1mmio_test,
  850. default_finished, npt_l1mmio_check },
  851. { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare, npt_rw_l1mmio_test,
  852. default_finished, npt_rw_l1mmio_check },
  853. { "latency_run_exit", default_supported, latency_prepare, latency_test,
  854. latency_finished, latency_check },
  855. { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
  856. lat_svm_insn_finished, lat_svm_insn_check },
  857. };
  858. int main(int ac, char **av)
  859. {
  860. int i, nr;
  861. struct vmcb *vmcb;
  862. setup_vm();
  863. smp_init();
  864. if (!(cpuid(0x80000001).c & 4)) {
  865. printf("SVM not availble\n");
  866. return report_summary();
  867. }
  868. setup_svm();
  869. vmcb = alloc_page();
  870. nr = ARRAY_SIZE(tests);
  871. for (i = 0; i < nr; ++i) {
  872. if (!tests[i].supported())
  873. continue;
  874. test_run(&tests[i], vmcb);
  875. }
  876. return report_summary();
  877. }