hyperv_connections.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. #include "libcflat.h"
  2. #include "vm.h"
  3. #include "smp.h"
  4. #include "isr.h"
  5. #include "atomic.h"
  6. #include "hyperv.h"
  7. #include "bitops.h"
  8. #define MAX_CPUS 64
  9. #define MSG_VEC 0xb0
  10. #define EVT_VEC 0xb1
  11. #define MSG_SINT 0x8
  12. #define EVT_SINT 0x9
  13. #define MSG_CONN_BASE 0x10
  14. #define EVT_CONN_BASE 0x20
  15. #define MSG_TYPE 0x12345678
  16. #define WAIT_CYCLES 10000000
  17. static atomic_t ncpus_done;
  18. struct hv_vcpu {
  19. struct hv_message_page *msg_page;
  20. struct hv_event_flags_page *evt_page;
  21. struct hv_input_post_message *post_msg;
  22. u8 msg_conn;
  23. u8 evt_conn;
  24. u64 hvcall_status;
  25. atomic_t sint_received;
  26. };
  27. static struct hv_vcpu hv_vcpus[MAX_CPUS];
  28. static void sint_isr(isr_regs_t *regs)
  29. {
  30. atomic_inc(&hv_vcpus[smp_id()].sint_received);
  31. }
  32. static void *hypercall_page;
  33. static void setup_hypercall(void)
  34. {
  35. u64 guestid = (0x8f00ull << 48);
  36. hypercall_page = alloc_page();
  37. if (!hypercall_page)
  38. report_abort("failed to allocate hypercall page");
  39. memset(hypercall_page, 0, PAGE_SIZE);
  40. wrmsr(HV_X64_MSR_GUEST_OS_ID, guestid);
  41. wrmsr(HV_X64_MSR_HYPERCALL,
  42. (u64)virt_to_phys(hypercall_page) | HV_X64_MSR_HYPERCALL_ENABLE);
  43. }
  44. static void teardown_hypercall(void)
  45. {
  46. wrmsr(HV_X64_MSR_HYPERCALL, 0);
  47. wrmsr(HV_X64_MSR_GUEST_OS_ID, 0);
  48. free_page(hypercall_page);
  49. }
  50. static u64 do_hypercall(u16 code, u64 arg, bool fast)
  51. {
  52. u64 ret;
  53. u64 ctl = code;
  54. if (fast)
  55. ctl |= HV_HYPERCALL_FAST;
  56. asm volatile ("call *%[hcall_page]"
  57. #ifdef __x86_64__
  58. "\n mov $0,%%r8"
  59. : "=a"(ret)
  60. : "c"(ctl), "d"(arg),
  61. #else
  62. : "=A"(ret)
  63. : "A"(ctl),
  64. "b" ((u32)(arg >> 32)), "c" ((u32)arg),
  65. "D"(0), "S"(0),
  66. #endif
  67. [hcall_page] "m" (hypercall_page)
  68. #ifdef __x86_64__
  69. : "r8"
  70. #endif
  71. );
  72. return ret;
  73. }
  74. static void setup_cpu(void *ctx)
  75. {
  76. int vcpu;
  77. struct hv_vcpu *hv;
  78. write_cr3((ulong)ctx);
  79. irq_enable();
  80. vcpu = smp_id();
  81. hv = &hv_vcpus[vcpu];
  82. hv->msg_page = alloc_page();
  83. hv->evt_page = alloc_page();
  84. hv->post_msg = alloc_page();
  85. if (!hv->msg_page || !hv->evt_page || !hv->post_msg)
  86. report_abort("failed to allocate synic pages for vcpu");
  87. memset(hv->msg_page, 0, sizeof(*hv->msg_page));
  88. memset(hv->evt_page, 0, sizeof(*hv->evt_page));
  89. memset(hv->post_msg, 0, sizeof(*hv->post_msg));
  90. hv->msg_conn = MSG_CONN_BASE + vcpu;
  91. hv->evt_conn = EVT_CONN_BASE + vcpu;
  92. wrmsr(HV_X64_MSR_SIMP,
  93. (u64)virt_to_phys(hv->msg_page) | HV_SYNIC_SIMP_ENABLE);
  94. wrmsr(HV_X64_MSR_SIEFP,
  95. (u64)virt_to_phys(hv->evt_page) | HV_SYNIC_SIEFP_ENABLE);
  96. wrmsr(HV_X64_MSR_SCONTROL, HV_SYNIC_CONTROL_ENABLE);
  97. msg_conn_create(MSG_SINT, MSG_VEC, hv->msg_conn);
  98. evt_conn_create(EVT_SINT, EVT_VEC, hv->evt_conn);
  99. hv->post_msg->connectionid = hv->msg_conn;
  100. hv->post_msg->message_type = MSG_TYPE;
  101. hv->post_msg->payload_size = 8;
  102. hv->post_msg->payload[0] = (u64)vcpu << 16;
  103. }
  104. static void teardown_cpu(void *ctx)
  105. {
  106. int vcpu = smp_id();
  107. struct hv_vcpu *hv = &hv_vcpus[vcpu];
  108. evt_conn_destroy(EVT_SINT, hv->evt_conn);
  109. msg_conn_destroy(MSG_SINT, hv->msg_conn);
  110. wrmsr(HV_X64_MSR_SCONTROL, 0);
  111. wrmsr(HV_X64_MSR_SIEFP, 0);
  112. wrmsr(HV_X64_MSR_SIMP, 0);
  113. free_page(hv->post_msg);
  114. free_page(hv->evt_page);
  115. free_page(hv->msg_page);
  116. }
  117. static void do_msg(void *ctx)
  118. {
  119. int vcpu = (ulong)ctx;
  120. struct hv_vcpu *hv = &hv_vcpus[vcpu];
  121. struct hv_input_post_message *msg = hv->post_msg;
  122. msg->payload[0]++;
  123. atomic_set(&hv->sint_received, 0);
  124. hv->hvcall_status = do_hypercall(HVCALL_POST_MESSAGE,
  125. virt_to_phys(msg), 0);
  126. atomic_inc(&ncpus_done);
  127. }
  128. static void clear_msg(void *ctx)
  129. {
  130. /* should only be done on the current vcpu */
  131. int vcpu = smp_id();
  132. struct hv_vcpu *hv = &hv_vcpus[vcpu];
  133. struct hv_message *msg = &hv->msg_page->sint_message[MSG_SINT];
  134. atomic_set(&hv->sint_received, 0);
  135. msg->header.message_type = 0;
  136. barrier();
  137. wrmsr(HV_X64_MSR_EOM, 0);
  138. atomic_inc(&ncpus_done);
  139. }
  140. static bool msg_ok(int vcpu)
  141. {
  142. struct hv_vcpu *hv = &hv_vcpus[vcpu];
  143. struct hv_input_post_message *post_msg = hv->post_msg;
  144. struct hv_message *msg = &hv->msg_page->sint_message[MSG_SINT];
  145. return msg->header.message_type == post_msg->message_type &&
  146. msg->header.payload_size == post_msg->payload_size &&
  147. msg->header.message_flags.msg_pending == 0 &&
  148. msg->u.payload[0] == post_msg->payload[0] &&
  149. hv->hvcall_status == 0 &&
  150. atomic_read(&hv->sint_received) == 1;
  151. }
  152. static bool msg_busy(int vcpu)
  153. {
  154. struct hv_vcpu *hv = &hv_vcpus[vcpu];
  155. struct hv_input_post_message *post_msg = hv->post_msg;
  156. struct hv_message *msg = &hv->msg_page->sint_message[MSG_SINT];
  157. return msg->header.message_type == post_msg->message_type &&
  158. msg->header.payload_size == post_msg->payload_size &&
  159. msg->header.message_flags.msg_pending == 1 &&
  160. msg->u.payload[0] == post_msg->payload[0] - 1 &&
  161. hv->hvcall_status == 0 &&
  162. atomic_read(&hv->sint_received) == 0;
  163. }
  164. static void do_evt(void *ctx)
  165. {
  166. int vcpu = (ulong)ctx;
  167. struct hv_vcpu *hv = &hv_vcpus[vcpu];
  168. atomic_set(&hv->sint_received, 0);
  169. hv->hvcall_status = do_hypercall(HVCALL_SIGNAL_EVENT,
  170. hv->evt_conn, 1);
  171. atomic_inc(&ncpus_done);
  172. }
  173. static void clear_evt(void *ctx)
  174. {
  175. /* should only be done on the current vcpu */
  176. int vcpu = smp_id();
  177. struct hv_vcpu *hv = &hv_vcpus[vcpu];
  178. ulong *flags = hv->evt_page->slot[EVT_SINT].flags;
  179. atomic_set(&hv->sint_received, 0);
  180. flags[BIT_WORD(hv->evt_conn)] &= ~BIT_MASK(hv->evt_conn);
  181. barrier();
  182. atomic_inc(&ncpus_done);
  183. }
  184. static bool evt_ok(int vcpu)
  185. {
  186. struct hv_vcpu *hv = &hv_vcpus[vcpu];
  187. ulong *flags = hv->evt_page->slot[EVT_SINT].flags;
  188. return flags[BIT_WORD(hv->evt_conn)] == BIT_MASK(hv->evt_conn) &&
  189. hv->hvcall_status == 0 &&
  190. atomic_read(&hv->sint_received) == 1;
  191. }
  192. static bool evt_busy(int vcpu)
  193. {
  194. struct hv_vcpu *hv = &hv_vcpus[vcpu];
  195. ulong *flags = hv->evt_page->slot[EVT_SINT].flags;
  196. return flags[BIT_WORD(hv->evt_conn)] == BIT_MASK(hv->evt_conn) &&
  197. hv->hvcall_status == 0 &&
  198. atomic_read(&hv->sint_received) == 0;
  199. }
  200. static int run_test(int ncpus, int dst_add, ulong wait_cycles,
  201. void (*func)(void *), bool (*is_ok)(int))
  202. {
  203. int i, ret = 0;
  204. atomic_set(&ncpus_done, 0);
  205. for (i = 0; i < ncpus; i++) {
  206. ulong dst = (i + dst_add) % ncpus;
  207. on_cpu_async(i, func, (void *)dst);
  208. }
  209. while (atomic_read(&ncpus_done) != ncpus)
  210. pause();
  211. while (wait_cycles--)
  212. pause();
  213. if (is_ok)
  214. for (i = 0; i < ncpus; i++)
  215. ret += is_ok(i);
  216. return ret;
  217. }
  218. #define HV_STATUS_INVALID_HYPERCALL_CODE 2
  219. int main(int ac, char **av)
  220. {
  221. int ncpus, ncpus_ok, i;
  222. if (!synic_supported()) {
  223. report_skip("Hyper-V SynIC is not supported");
  224. goto summary;
  225. }
  226. setup_vm();
  227. smp_init();
  228. ncpus = cpu_count();
  229. if (ncpus > MAX_CPUS)
  230. report_abort("# cpus: %d > %d", ncpus, MAX_CPUS);
  231. handle_irq(MSG_VEC, sint_isr);
  232. handle_irq(EVT_VEC, sint_isr);
  233. setup_hypercall();
  234. if (do_hypercall(HVCALL_SIGNAL_EVENT, 0x1234, 1) ==
  235. HV_STATUS_INVALID_HYPERCALL_CODE) {
  236. report_skip("Hyper-V SynIC connections are not supported");
  237. goto summary;
  238. }
  239. for (i = 0; i < ncpus; i++)
  240. on_cpu(i, setup_cpu, (void *)read_cr3());
  241. ncpus_ok = run_test(ncpus, 0, WAIT_CYCLES, do_msg, msg_ok);
  242. report("send message to self: %d/%d",
  243. ncpus_ok == ncpus, ncpus_ok, ncpus);
  244. run_test(ncpus, 0, 0, clear_msg, NULL);
  245. ncpus_ok = run_test(ncpus, 1, WAIT_CYCLES, do_msg, msg_ok);
  246. report("send message to another cpu: %d/%d",
  247. ncpus_ok == ncpus, ncpus_ok, ncpus);
  248. ncpus_ok = run_test(ncpus, 1, WAIT_CYCLES, do_msg, msg_busy);
  249. report("send message to busy slot: %d/%d",
  250. ncpus_ok == ncpus, ncpus_ok, ncpus);
  251. ncpus_ok = run_test(ncpus, 0, WAIT_CYCLES, clear_msg, msg_ok);
  252. report("receive pending message: %d/%d",
  253. ncpus_ok == ncpus, ncpus_ok, ncpus);
  254. ncpus_ok = run_test(ncpus, 0, WAIT_CYCLES, do_evt, evt_ok);
  255. report("signal event on self: %d/%d",
  256. ncpus_ok == ncpus, ncpus_ok, ncpus);
  257. run_test(ncpus, 0, 0, clear_evt, NULL);
  258. ncpus_ok = run_test(ncpus, 1, WAIT_CYCLES, do_evt, evt_ok);
  259. report("signal event on another cpu: %d/%d",
  260. ncpus_ok == ncpus, ncpus_ok, ncpus);
  261. ncpus_ok = run_test(ncpus, 1, WAIT_CYCLES, do_evt, evt_busy);
  262. report("signal event already set: %d/%d",
  263. ncpus_ok == ncpus, ncpus_ok, ncpus);
  264. for (i = 0; i < ncpus; i++)
  265. on_cpu(i, teardown_cpu, NULL);
  266. teardown_hypercall();
  267. summary:
  268. return report_summary();
  269. }