quic_reactor.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. /*
  2. * Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. #include "internal/quic_reactor.h"
  10. #include "internal/common.h"
  11. #include "internal/thread_arch.h"
  12. /*
  13. * Core I/O Reactor Framework
  14. * ==========================
  15. */
  16. void ossl_quic_reactor_init(QUIC_REACTOR *rtor,
  17. void (*tick_cb)(QUIC_TICK_RESULT *res, void *arg,
  18. uint32_t flags),
  19. void *tick_cb_arg,
  20. OSSL_TIME initial_tick_deadline)
  21. {
  22. rtor->poll_r.type = BIO_POLL_DESCRIPTOR_TYPE_NONE;
  23. rtor->poll_w.type = BIO_POLL_DESCRIPTOR_TYPE_NONE;
  24. rtor->net_read_desired = 0;
  25. rtor->net_write_desired = 0;
  26. rtor->can_poll_r = 0;
  27. rtor->can_poll_w = 0;
  28. rtor->tick_deadline = initial_tick_deadline;
  29. rtor->tick_cb = tick_cb;
  30. rtor->tick_cb_arg = tick_cb_arg;
  31. }
  32. void ossl_quic_reactor_set_poll_r(QUIC_REACTOR *rtor, const BIO_POLL_DESCRIPTOR *r)
  33. {
  34. if (r == NULL)
  35. rtor->poll_r.type = BIO_POLL_DESCRIPTOR_TYPE_NONE;
  36. else
  37. rtor->poll_r = *r;
  38. rtor->can_poll_r
  39. = ossl_quic_reactor_can_support_poll_descriptor(rtor, &rtor->poll_r);
  40. }
  41. void ossl_quic_reactor_set_poll_w(QUIC_REACTOR *rtor, const BIO_POLL_DESCRIPTOR *w)
  42. {
  43. if (w == NULL)
  44. rtor->poll_w.type = BIO_POLL_DESCRIPTOR_TYPE_NONE;
  45. else
  46. rtor->poll_w = *w;
  47. rtor->can_poll_w
  48. = ossl_quic_reactor_can_support_poll_descriptor(rtor, &rtor->poll_w);
  49. }
  50. const BIO_POLL_DESCRIPTOR *ossl_quic_reactor_get_poll_r(const QUIC_REACTOR *rtor)
  51. {
  52. return &rtor->poll_r;
  53. }
  54. const BIO_POLL_DESCRIPTOR *ossl_quic_reactor_get_poll_w(const QUIC_REACTOR *rtor)
  55. {
  56. return &rtor->poll_w;
  57. }
  58. int ossl_quic_reactor_can_support_poll_descriptor(const QUIC_REACTOR *rtor,
  59. const BIO_POLL_DESCRIPTOR *d)
  60. {
  61. return d->type == BIO_POLL_DESCRIPTOR_TYPE_SOCK_FD;
  62. }
  63. int ossl_quic_reactor_can_poll_r(const QUIC_REACTOR *rtor)
  64. {
  65. return rtor->can_poll_r;
  66. }
  67. int ossl_quic_reactor_can_poll_w(const QUIC_REACTOR *rtor)
  68. {
  69. return rtor->can_poll_w;
  70. }
  71. int ossl_quic_reactor_net_read_desired(QUIC_REACTOR *rtor)
  72. {
  73. return rtor->net_read_desired;
  74. }
  75. int ossl_quic_reactor_net_write_desired(QUIC_REACTOR *rtor)
  76. {
  77. return rtor->net_write_desired;
  78. }
  79. OSSL_TIME ossl_quic_reactor_get_tick_deadline(QUIC_REACTOR *rtor)
  80. {
  81. return rtor->tick_deadline;
  82. }
  83. int ossl_quic_reactor_tick(QUIC_REACTOR *rtor, uint32_t flags)
  84. {
  85. QUIC_TICK_RESULT res = {0};
  86. /*
  87. * Note that the tick callback cannot fail; this is intentional. Arguably it
  88. * does not make that much sense for ticking to 'fail' (in the sense of an
  89. * explicit error indicated to the user) because ticking is by its nature
  90. * best effort. If something fatal happens with a connection we can report
  91. * it on the next actual application I/O call.
  92. */
  93. rtor->tick_cb(&res, rtor->tick_cb_arg, flags);
  94. rtor->net_read_desired = res.net_read_desired;
  95. rtor->net_write_desired = res.net_write_desired;
  96. rtor->tick_deadline = res.tick_deadline;
  97. return 1;
  98. }
  99. /*
  100. * Blocking I/O Adaptation Layer
  101. * =============================
  102. */
  103. /*
  104. * Utility which can be used to poll on up to two FDs. This is designed to
  105. * support use of split FDs (e.g. with SSL_set_rfd and SSL_set_wfd where
  106. * different FDs are used for read and write).
  107. *
  108. * Generally use of poll(2) is preferred where available. Windows, however,
  109. * hasn't traditionally offered poll(2), only select(2). WSAPoll() was
  110. * introduced in Vista but has seemingly been buggy until relatively recent
  111. * versions of Windows 10. Moreover we support XP so this is not a suitable
  112. * target anyway. However, the traditional issues with select(2) turn out not to
  113. * be an issue on Windows; whereas traditional *NIX select(2) uses a bitmap of
  114. * FDs (and thus is limited in the magnitude of the FDs expressible), Windows
  115. * select(2) is very different. In Windows, socket handles are not allocated
  116. * contiguously from zero and thus this bitmap approach was infeasible. Thus in
  117. * adapting the Berkeley sockets API to Windows a different approach was taken
  118. * whereby the fd_set contains a fixed length array of socket handles and an
  119. * integer indicating how many entries are valid; thus Windows select()
  120. * ironically is actually much more like *NIX poll(2) than *NIX select(2). In
  121. * any case, this means that the relevant limit for Windows select() is the
  122. * number of FDs being polled, not the magnitude of those FDs. Since we only
  123. * poll for two FDs here, this limit does not concern us.
  124. *
  125. * Usage: rfd and wfd may be the same or different. Either or both may also be
  126. * -1. If rfd_want_read is 1, rfd is polled for readability, and if
  127. * wfd_want_write is 1, wfd is polled for writability. Note that since any
  128. * passed FD is always polled for error conditions, setting rfd_want_read=0 and
  129. * wfd_want_write=0 is not the same as passing -1 for both FDs.
  130. *
  131. * deadline is a timestamp to return at. If it is ossl_time_infinite(), the call
  132. * never times out.
  133. *
  134. * Returns 0 on error and 1 on success. Timeout expiry is considered a success
  135. * condition. We don't elaborate our return values here because the way we are
  136. * actually using this doesn't currently care.
  137. *
  138. * If mutex is non-NULL, it is assumed to be held for write and is unlocked for
  139. * the duration of the call.
  140. *
  141. * Precondition: mutex is NULL or is held for write (unchecked)
  142. * Postcondition: mutex is NULL or is held for write (unless
  143. * CRYPTO_THREAD_write_lock fails)
  144. */
  145. static int poll_two_fds(int rfd, int rfd_want_read,
  146. int wfd, int wfd_want_write,
  147. OSSL_TIME deadline,
  148. CRYPTO_MUTEX *mutex)
  149. {
  150. #if defined(OPENSSL_SYS_WINDOWS) || !defined(POLLIN)
  151. fd_set rfd_set, wfd_set, efd_set;
  152. OSSL_TIME now, timeout;
  153. struct timeval tv, *ptv;
  154. int maxfd, pres;
  155. # ifndef OPENSSL_SYS_WINDOWS
  156. /*
  157. * On Windows there is no relevant limit to the magnitude of a fd value (see
  158. * above). On *NIX the fd_set uses a bitmap and we must check the limit.
  159. */
  160. if (rfd >= FD_SETSIZE || wfd >= FD_SETSIZE)
  161. return 0;
  162. # endif
  163. FD_ZERO(&rfd_set);
  164. FD_ZERO(&wfd_set);
  165. FD_ZERO(&efd_set);
  166. if (rfd != -1 && rfd_want_read)
  167. openssl_fdset(rfd, &rfd_set);
  168. if (wfd != -1 && wfd_want_write)
  169. openssl_fdset(wfd, &wfd_set);
  170. /* Always check for error conditions. */
  171. if (rfd != -1)
  172. openssl_fdset(rfd, &efd_set);
  173. if (wfd != -1)
  174. openssl_fdset(wfd, &efd_set);
  175. maxfd = rfd;
  176. if (wfd > maxfd)
  177. maxfd = wfd;
  178. if (!ossl_assert(rfd != -1 || wfd != -1
  179. || !ossl_time_is_infinite(deadline)))
  180. /* Do not block forever; should not happen. */
  181. return 0;
  182. # if defined(OPENSSL_THREADS)
  183. if (mutex != NULL)
  184. ossl_crypto_mutex_unlock(mutex);
  185. # endif
  186. do {
  187. /*
  188. * select expects a timeout, not a deadline, so do the conversion.
  189. * Update for each call to ensure the correct value is used if we repeat
  190. * due to EINTR.
  191. */
  192. if (ossl_time_is_infinite(deadline)) {
  193. ptv = NULL;
  194. } else {
  195. now = ossl_time_now();
  196. /*
  197. * ossl_time_subtract saturates to zero so we don't need to check if
  198. * now > deadline.
  199. */
  200. timeout = ossl_time_subtract(deadline, now);
  201. tv = ossl_time_to_timeval(timeout);
  202. ptv = &tv;
  203. }
  204. pres = select(maxfd + 1, &rfd_set, &wfd_set, &efd_set, ptv);
  205. } while (pres == -1 && get_last_socket_error_is_eintr());
  206. # if defined(OPENSSL_THREADS)
  207. if (mutex != NULL)
  208. ossl_crypto_mutex_lock(mutex);
  209. # endif
  210. return pres < 0 ? 0 : 1;
  211. #else
  212. int pres, timeout_ms;
  213. OSSL_TIME now, timeout;
  214. struct pollfd pfds[2] = {0};
  215. size_t npfd = 0;
  216. if (rfd == wfd) {
  217. pfds[npfd].fd = rfd;
  218. pfds[npfd].events = (rfd_want_read ? POLLIN : 0)
  219. | (wfd_want_write ? POLLOUT : 0);
  220. if (rfd >= 0 && pfds[npfd].events != 0)
  221. ++npfd;
  222. } else {
  223. pfds[npfd].fd = rfd;
  224. pfds[npfd].events = (rfd_want_read ? POLLIN : 0);
  225. if (rfd >= 0 && pfds[npfd].events != 0)
  226. ++npfd;
  227. pfds[npfd].fd = wfd;
  228. pfds[npfd].events = (wfd_want_write ? POLLOUT : 0);
  229. if (wfd >= 0 && pfds[npfd].events != 0)
  230. ++npfd;
  231. }
  232. if (!ossl_assert(npfd != 0 || !ossl_time_is_infinite(deadline)))
  233. /* Do not block forever; should not happen. */
  234. return 0;
  235. # if defined(OPENSSL_THREADS)
  236. if (mutex != NULL)
  237. ossl_crypto_mutex_unlock(mutex);
  238. # endif
  239. do {
  240. if (ossl_time_is_infinite(deadline)) {
  241. timeout_ms = -1;
  242. } else {
  243. now = ossl_time_now();
  244. timeout = ossl_time_subtract(deadline, now);
  245. timeout_ms = ossl_time2ms(timeout);
  246. }
  247. pres = poll(pfds, npfd, timeout_ms);
  248. } while (pres == -1 && get_last_socket_error_is_eintr());
  249. # if defined(OPENSSL_THREADS)
  250. if (mutex != NULL)
  251. ossl_crypto_mutex_lock(mutex);
  252. # endif
  253. return pres < 0 ? 0 : 1;
  254. #endif
  255. }
  256. static int poll_descriptor_to_fd(const BIO_POLL_DESCRIPTOR *d, int *fd)
  257. {
  258. if (d == NULL || d->type == BIO_POLL_DESCRIPTOR_TYPE_NONE) {
  259. *fd = INVALID_SOCKET;
  260. return 1;
  261. }
  262. if (d->type != BIO_POLL_DESCRIPTOR_TYPE_SOCK_FD
  263. || d->value.fd == INVALID_SOCKET)
  264. return 0;
  265. *fd = d->value.fd;
  266. return 1;
  267. }
  268. /*
  269. * Poll up to two abstract poll descriptors. Currently we only support
  270. * poll descriptors which represent FDs.
  271. *
  272. * If mutex is non-NULL, it is assumed be a lock currently held for write and is
  273. * unlocked for the duration of any wait.
  274. *
  275. * Precondition: mutex is NULL or is held for write (unchecked)
  276. * Postcondition: mutex is NULL or is held for write (unless
  277. * CRYPTO_THREAD_write_lock fails)
  278. */
  279. static int poll_two_descriptors(const BIO_POLL_DESCRIPTOR *r, int r_want_read,
  280. const BIO_POLL_DESCRIPTOR *w, int w_want_write,
  281. OSSL_TIME deadline,
  282. CRYPTO_MUTEX *mutex)
  283. {
  284. int rfd, wfd;
  285. if (!poll_descriptor_to_fd(r, &rfd)
  286. || !poll_descriptor_to_fd(w, &wfd))
  287. return 0;
  288. return poll_two_fds(rfd, r_want_read, wfd, w_want_write, deadline, mutex);
  289. }
  290. /*
  291. * Block until a predicate function evaluates to true.
  292. *
  293. * If mutex is non-NULL, it is assumed be a lock currently held for write and is
  294. * unlocked for the duration of any wait.
  295. *
  296. * Precondition: Must hold channel write lock (unchecked)
  297. * Precondition: mutex is NULL or is held for write (unchecked)
  298. * Postcondition: mutex is NULL or is held for write (unless
  299. * CRYPTO_THREAD_write_lock fails)
  300. */
  301. int ossl_quic_reactor_block_until_pred(QUIC_REACTOR *rtor,
  302. int (*pred)(void *arg), void *pred_arg,
  303. uint32_t flags,
  304. CRYPTO_MUTEX *mutex)
  305. {
  306. int res, net_read_desired, net_write_desired;
  307. OSSL_TIME tick_deadline;
  308. for (;;) {
  309. if ((flags & SKIP_FIRST_TICK) != 0)
  310. flags &= ~SKIP_FIRST_TICK;
  311. else
  312. /* best effort */
  313. ossl_quic_reactor_tick(rtor, 0);
  314. if ((res = pred(pred_arg)) != 0)
  315. return res;
  316. net_read_desired = ossl_quic_reactor_net_read_desired(rtor);
  317. net_write_desired = ossl_quic_reactor_net_write_desired(rtor);
  318. tick_deadline = ossl_quic_reactor_get_tick_deadline(rtor);
  319. if (!net_read_desired && !net_write_desired
  320. && ossl_time_is_infinite(tick_deadline))
  321. /* Can't wait if there is nothing to wait for. */
  322. return 0;
  323. if (!poll_two_descriptors(ossl_quic_reactor_get_poll_r(rtor),
  324. net_read_desired,
  325. ossl_quic_reactor_get_poll_w(rtor),
  326. net_write_desired,
  327. tick_deadline,
  328. mutex))
  329. /*
  330. * We don't actually care why the call succeeded (timeout, FD
  331. * readiness), we just call reactor_tick and start trying to do I/O
  332. * things again. If poll_two_fds returns 0, this is some other
  333. * non-timeout failure and we should stop here.
  334. *
  335. * TODO(QUIC FUTURE): In the future we could avoid unnecessary
  336. * syscalls by not retrying network I/O that isn't ready based
  337. * on the result of the poll call. However this might be difficult
  338. * because it requires we do the call to poll(2) or equivalent
  339. * syscall ourselves, whereas in the general case the application
  340. * does the polling and just calls SSL_handle_events().
  341. * Implementing this optimisation in the future will probably
  342. * therefore require API changes.
  343. */
  344. return 0;
  345. }
  346. }