quic_record_rx.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375
  1. /*
  2. * Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. #include <openssl/ssl.h>
  10. #include "internal/quic_record_rx.h"
  11. #include "quic_record_shared.h"
  12. #include "internal/common.h"
  13. #include "internal/list.h"
  14. #include "../ssl_local.h"
  15. /*
  16. * Mark a packet in a bitfield.
  17. *
  18. * pkt_idx: index of packet within datagram.
  19. */
  20. static ossl_inline void pkt_mark(uint64_t *bitf, size_t pkt_idx)
  21. {
  22. assert(pkt_idx < QUIC_MAX_PKT_PER_URXE);
  23. *bitf |= ((uint64_t)1) << pkt_idx;
  24. }
  25. /* Returns 1 if a packet is in the bitfield. */
  26. static ossl_inline int pkt_is_marked(const uint64_t *bitf, size_t pkt_idx)
  27. {
  28. assert(pkt_idx < QUIC_MAX_PKT_PER_URXE);
  29. return (*bitf & (((uint64_t)1) << pkt_idx)) != 0;
  30. }
  31. /*
  32. * RXE
  33. * ===
  34. *
  35. * RX Entries (RXEs) store processed (i.e., decrypted) data received from the
  36. * network. One RXE is used per received QUIC packet.
  37. */
  38. typedef struct rxe_st RXE;
  39. struct rxe_st {
  40. OSSL_QRX_PKT pkt;
  41. OSSL_LIST_MEMBER(rxe, RXE);
  42. size_t data_len, alloc_len, refcount;
  43. /* Extra fields for per-packet information. */
  44. QUIC_PKT_HDR hdr; /* data/len are decrypted payload */
  45. /* Decoded packet number. */
  46. QUIC_PN pn;
  47. /* Addresses copied from URXE. */
  48. BIO_ADDR peer, local;
  49. /* Time we received the packet (not when we processed it). */
  50. OSSL_TIME time;
  51. /* Total length of the datagram which contained this packet. */
  52. size_t datagram_len;
  53. /*
  54. * The key epoch the packet was received with. Always 0 for non-1-RTT
  55. * packets.
  56. */
  57. uint64_t key_epoch;
  58. /*
  59. * alloc_len allocated bytes (of which data_len bytes are valid) follow this
  60. * structure.
  61. */
  62. };
  63. DEFINE_LIST_OF(rxe, RXE);
  64. typedef OSSL_LIST(rxe) RXE_LIST;
  65. static ossl_inline unsigned char *rxe_data(const RXE *e)
  66. {
  67. return (unsigned char *)(e + 1);
  68. }
  69. /*
  70. * QRL
  71. * ===
  72. */
  73. struct ossl_qrx_st {
  74. OSSL_LIB_CTX *libctx;
  75. const char *propq;
  76. /* Demux to receive datagrams from. */
  77. QUIC_DEMUX *demux;
  78. /* Length of connection IDs used in short-header packets in bytes. */
  79. size_t short_conn_id_len;
  80. /* Maximum number of deferred datagrams buffered at any one time. */
  81. size_t max_deferred;
  82. /* Current count of deferred datagrams. */
  83. size_t num_deferred;
  84. /*
  85. * List of URXEs which are filled with received encrypted data.
  86. * These are returned to the DEMUX's free list as they are processed.
  87. */
  88. QUIC_URXE_LIST urx_pending;
  89. /*
  90. * List of URXEs which we could not decrypt immediately and which are being
  91. * kept in case they can be decrypted later.
  92. */
  93. QUIC_URXE_LIST urx_deferred;
  94. /*
  95. * List of RXEs which are not currently in use. These are moved
  96. * to the pending list as they are filled.
  97. */
  98. RXE_LIST rx_free;
  99. /*
  100. * List of RXEs which are filled with decrypted packets ready to be passed
  101. * to the user. A RXE is removed from all lists inside the QRL when passed
  102. * to the user, then returned to the free list when the user returns it.
  103. */
  104. RXE_LIST rx_pending;
  105. /* Largest PN we have received and processed in a given PN space. */
  106. QUIC_PN largest_pn[QUIC_PN_SPACE_NUM];
  107. /* Per encryption-level state. */
  108. OSSL_QRL_ENC_LEVEL_SET el_set;
  109. /* Bytes we have received since this counter was last cleared. */
  110. uint64_t bytes_received;
  111. /*
  112. * Number of forged packets we have received since the QRX was instantiated.
  113. * Note that as per RFC 9001, this is connection-level state; it is not per
  114. * EL and is not reset by a key update.
  115. */
  116. uint64_t forged_pkt_count;
  117. /*
  118. * The PN the current key epoch started at, inclusive.
  119. */
  120. uint64_t cur_epoch_start_pn;
  121. /* Validation callback. */
  122. ossl_qrx_late_validation_cb *validation_cb;
  123. void *validation_cb_arg;
  124. /* Key update callback. */
  125. ossl_qrx_key_update_cb *key_update_cb;
  126. void *key_update_cb_arg;
  127. /* Initial key phase. For debugging use only; always 0 in real use. */
  128. unsigned char init_key_phase_bit;
  129. /* Are we allowed to process 1-RTT packets yet? */
  130. unsigned char allow_1rtt;
  131. /* Message callback related arguments */
  132. ossl_msg_cb msg_callback;
  133. void *msg_callback_arg;
  134. SSL *msg_callback_ssl;
  135. };
  136. static void qrx_on_rx(QUIC_URXE *urxe, void *arg);
  137. OSSL_QRX *ossl_qrx_new(const OSSL_QRX_ARGS *args)
  138. {
  139. OSSL_QRX *qrx;
  140. size_t i;
  141. if (args->demux == NULL || args->max_deferred == 0)
  142. return 0;
  143. qrx = OPENSSL_zalloc(sizeof(OSSL_QRX));
  144. if (qrx == NULL)
  145. return 0;
  146. for (i = 0; i < OSSL_NELEM(qrx->largest_pn); ++i)
  147. qrx->largest_pn[i] = args->init_largest_pn[i];
  148. qrx->libctx = args->libctx;
  149. qrx->propq = args->propq;
  150. qrx->demux = args->demux;
  151. qrx->short_conn_id_len = args->short_conn_id_len;
  152. qrx->init_key_phase_bit = args->init_key_phase_bit;
  153. qrx->max_deferred = args->max_deferred;
  154. return qrx;
  155. }
  156. static void qrx_cleanup_rxl(RXE_LIST *l)
  157. {
  158. RXE *e, *enext;
  159. for (e = ossl_list_rxe_head(l); e != NULL; e = enext) {
  160. enext = ossl_list_rxe_next(e);
  161. ossl_list_rxe_remove(l, e);
  162. OPENSSL_free(e);
  163. }
  164. }
  165. static void qrx_cleanup_urxl(OSSL_QRX *qrx, QUIC_URXE_LIST *l)
  166. {
  167. QUIC_URXE *e, *enext;
  168. for (e = ossl_list_urxe_head(l); e != NULL; e = enext) {
  169. enext = ossl_list_urxe_next(e);
  170. ossl_list_urxe_remove(l, e);
  171. ossl_quic_demux_release_urxe(qrx->demux, e);
  172. }
  173. }
  174. void ossl_qrx_free(OSSL_QRX *qrx)
  175. {
  176. uint32_t i;
  177. if (qrx == NULL)
  178. return;
  179. /* Unregister from the RX DEMUX. */
  180. ossl_quic_demux_unregister_by_cb(qrx->demux, qrx_on_rx, qrx);
  181. /* Free RXE queue data. */
  182. qrx_cleanup_rxl(&qrx->rx_free);
  183. qrx_cleanup_rxl(&qrx->rx_pending);
  184. qrx_cleanup_urxl(qrx, &qrx->urx_pending);
  185. qrx_cleanup_urxl(qrx, &qrx->urx_deferred);
  186. /* Drop keying material and crypto resources. */
  187. for (i = 0; i < QUIC_ENC_LEVEL_NUM; ++i)
  188. ossl_qrl_enc_level_set_discard(&qrx->el_set, i);
  189. OPENSSL_free(qrx);
  190. }
  191. void ossl_qrx_inject_urxe(OSSL_QRX *qrx, QUIC_URXE *urxe)
  192. {
  193. /* Initialize our own fields inside the URXE and add to the pending list. */
  194. urxe->processed = 0;
  195. urxe->hpr_removed = 0;
  196. urxe->deferred = 0;
  197. ossl_list_urxe_insert_tail(&qrx->urx_pending, urxe);
  198. if (qrx->msg_callback != NULL)
  199. qrx->msg_callback(0, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_DATAGRAM, urxe + 1,
  200. urxe->data_len, qrx->msg_callback_ssl,
  201. qrx->msg_callback_arg);
  202. }
  203. static void qrx_on_rx(QUIC_URXE *urxe, void *arg)
  204. {
  205. OSSL_QRX *qrx = arg;
  206. ossl_qrx_inject_urxe(qrx, urxe);
  207. }
  208. int ossl_qrx_add_dst_conn_id(OSSL_QRX *qrx,
  209. const QUIC_CONN_ID *dst_conn_id)
  210. {
  211. return ossl_quic_demux_register(qrx->demux,
  212. dst_conn_id,
  213. qrx_on_rx,
  214. qrx);
  215. }
  216. int ossl_qrx_remove_dst_conn_id(OSSL_QRX *qrx,
  217. const QUIC_CONN_ID *dst_conn_id)
  218. {
  219. return ossl_quic_demux_unregister(qrx->demux, dst_conn_id);
  220. }
  221. static void qrx_requeue_deferred(OSSL_QRX *qrx)
  222. {
  223. QUIC_URXE *e;
  224. while ((e = ossl_list_urxe_head(&qrx->urx_deferred)) != NULL) {
  225. ossl_list_urxe_remove(&qrx->urx_deferred, e);
  226. ossl_list_urxe_insert_tail(&qrx->urx_pending, e);
  227. }
  228. }
  229. int ossl_qrx_provide_secret(OSSL_QRX *qrx, uint32_t enc_level,
  230. uint32_t suite_id, EVP_MD *md,
  231. const unsigned char *secret, size_t secret_len)
  232. {
  233. if (enc_level >= QUIC_ENC_LEVEL_NUM)
  234. return 0;
  235. if (!ossl_qrl_enc_level_set_provide_secret(&qrx->el_set,
  236. qrx->libctx,
  237. qrx->propq,
  238. enc_level,
  239. suite_id,
  240. md,
  241. secret,
  242. secret_len,
  243. qrx->init_key_phase_bit,
  244. /*is_tx=*/0))
  245. return 0;
  246. /*
  247. * Any packets we previously could not decrypt, we may now be able to
  248. * decrypt, so move any datagrams containing deferred packets from the
  249. * deferred to the pending queue.
  250. */
  251. qrx_requeue_deferred(qrx);
  252. return 1;
  253. }
  254. int ossl_qrx_discard_enc_level(OSSL_QRX *qrx, uint32_t enc_level)
  255. {
  256. if (enc_level >= QUIC_ENC_LEVEL_NUM)
  257. return 0;
  258. ossl_qrl_enc_level_set_discard(&qrx->el_set, enc_level);
  259. return 1;
  260. }
  261. /* Returns 1 if there are one or more pending RXEs. */
  262. int ossl_qrx_processed_read_pending(OSSL_QRX *qrx)
  263. {
  264. return !ossl_list_rxe_is_empty(&qrx->rx_pending);
  265. }
  266. /* Returns 1 if there are yet-unprocessed packets. */
  267. int ossl_qrx_unprocessed_read_pending(OSSL_QRX *qrx)
  268. {
  269. return !ossl_list_urxe_is_empty(&qrx->urx_pending)
  270. || !ossl_list_urxe_is_empty(&qrx->urx_deferred);
  271. }
  272. /* Pop the next pending RXE. Returns NULL if no RXE is pending. */
  273. static RXE *qrx_pop_pending_rxe(OSSL_QRX *qrx)
  274. {
  275. RXE *rxe = ossl_list_rxe_head(&qrx->rx_pending);
  276. if (rxe == NULL)
  277. return NULL;
  278. ossl_list_rxe_remove(&qrx->rx_pending, rxe);
  279. return rxe;
  280. }
  281. /* Allocate a new RXE. */
  282. static RXE *qrx_alloc_rxe(size_t alloc_len)
  283. {
  284. RXE *rxe;
  285. if (alloc_len >= SIZE_MAX - sizeof(RXE))
  286. return NULL;
  287. rxe = OPENSSL_malloc(sizeof(RXE) + alloc_len);
  288. if (rxe == NULL)
  289. return NULL;
  290. ossl_list_rxe_init_elem(rxe);
  291. rxe->alloc_len = alloc_len;
  292. rxe->data_len = 0;
  293. rxe->refcount = 0;
  294. return rxe;
  295. }
  296. /*
  297. * Ensures there is at least one RXE in the RX free list, allocating a new entry
  298. * if necessary. The returned RXE is in the RX free list; it is not popped.
  299. *
  300. * alloc_len is a hint which may be used to determine the RXE size if allocation
  301. * is necessary. Returns NULL on allocation failure.
  302. */
  303. static RXE *qrx_ensure_free_rxe(OSSL_QRX *qrx, size_t alloc_len)
  304. {
  305. RXE *rxe;
  306. if (ossl_list_rxe_head(&qrx->rx_free) != NULL)
  307. return ossl_list_rxe_head(&qrx->rx_free);
  308. rxe = qrx_alloc_rxe(alloc_len);
  309. if (rxe == NULL)
  310. return NULL;
  311. ossl_list_rxe_insert_tail(&qrx->rx_free, rxe);
  312. return rxe;
  313. }
  314. /*
  315. * Resize the data buffer attached to an RXE to be n bytes in size. The address
  316. * of the RXE might change; the new address is returned, or NULL on failure, in
  317. * which case the original RXE remains valid.
  318. */
  319. static RXE *qrx_resize_rxe(RXE_LIST *rxl, RXE *rxe, size_t n)
  320. {
  321. RXE *rxe2, *p;
  322. /* Should never happen. */
  323. if (rxe == NULL)
  324. return NULL;
  325. if (n >= SIZE_MAX - sizeof(RXE))
  326. return NULL;
  327. /* Remove the item from the list to avoid accessing freed memory */
  328. p = ossl_list_rxe_prev(rxe);
  329. ossl_list_rxe_remove(rxl, rxe);
  330. /* Should never resize an RXE which has been handed out. */
  331. if (!ossl_assert(rxe->refcount == 0))
  332. return NULL;
  333. /*
  334. * NOTE: We do not clear old memory, although it does contain decrypted
  335. * data.
  336. */
  337. rxe2 = OPENSSL_realloc(rxe, sizeof(RXE) + n);
  338. if (rxe2 == NULL) {
  339. /* Resize failed, restore old allocation. */
  340. if (p == NULL)
  341. ossl_list_rxe_insert_head(rxl, rxe);
  342. else
  343. ossl_list_rxe_insert_after(rxl, p, rxe);
  344. return NULL;
  345. }
  346. if (p == NULL)
  347. ossl_list_rxe_insert_head(rxl, rxe2);
  348. else
  349. ossl_list_rxe_insert_after(rxl, p, rxe2);
  350. rxe2->alloc_len = n;
  351. return rxe2;
  352. }
  353. /*
  354. * Ensure the data buffer attached to an RXE is at least n bytes in size.
  355. * Returns NULL on failure.
  356. */
  357. static RXE *qrx_reserve_rxe(RXE_LIST *rxl,
  358. RXE *rxe, size_t n)
  359. {
  360. if (rxe->alloc_len >= n)
  361. return rxe;
  362. return qrx_resize_rxe(rxl, rxe, n);
  363. }
  364. /* Return a RXE handed out to the user back to our freelist. */
  365. static void qrx_recycle_rxe(OSSL_QRX *qrx, RXE *rxe)
  366. {
  367. /* RXE should not be in any list */
  368. assert(ossl_list_rxe_prev(rxe) == NULL && ossl_list_rxe_next(rxe) == NULL);
  369. rxe->pkt.hdr = NULL;
  370. rxe->pkt.peer = NULL;
  371. rxe->pkt.local = NULL;
  372. ossl_list_rxe_insert_tail(&qrx->rx_free, rxe);
  373. }
  374. /*
  375. * Given a pointer to a pointer pointing to a buffer and the size of that
  376. * buffer, copy the buffer into *prxe, expanding the RXE if necessary (its
  377. * pointer may change due to realloc). *pi is the offset in bytes to copy the
  378. * buffer to, and on success is updated to be the offset pointing after the
  379. * copied buffer. *pptr is updated to point to the new location of the buffer.
  380. */
  381. static int qrx_relocate_buffer(OSSL_QRX *qrx, RXE **prxe, size_t *pi,
  382. const unsigned char **pptr, size_t buf_len)
  383. {
  384. RXE *rxe;
  385. unsigned char *dst;
  386. if (!buf_len)
  387. return 1;
  388. if ((rxe = qrx_reserve_rxe(&qrx->rx_free, *prxe, *pi + buf_len)) == NULL)
  389. return 0;
  390. *prxe = rxe;
  391. dst = (unsigned char *)rxe_data(rxe) + *pi;
  392. memcpy(dst, *pptr, buf_len);
  393. *pi += buf_len;
  394. *pptr = dst;
  395. return 1;
  396. }
  397. static uint32_t qrx_determine_enc_level(const QUIC_PKT_HDR *hdr)
  398. {
  399. switch (hdr->type) {
  400. case QUIC_PKT_TYPE_INITIAL:
  401. return QUIC_ENC_LEVEL_INITIAL;
  402. case QUIC_PKT_TYPE_HANDSHAKE:
  403. return QUIC_ENC_LEVEL_HANDSHAKE;
  404. case QUIC_PKT_TYPE_0RTT:
  405. return QUIC_ENC_LEVEL_0RTT;
  406. case QUIC_PKT_TYPE_1RTT:
  407. return QUIC_ENC_LEVEL_1RTT;
  408. default:
  409. assert(0);
  410. case QUIC_PKT_TYPE_RETRY:
  411. case QUIC_PKT_TYPE_VERSION_NEG:
  412. return QUIC_ENC_LEVEL_INITIAL; /* not used */
  413. }
  414. }
  415. static uint32_t rxe_determine_pn_space(RXE *rxe)
  416. {
  417. uint32_t enc_level;
  418. enc_level = qrx_determine_enc_level(&rxe->hdr);
  419. return ossl_quic_enc_level_to_pn_space(enc_level);
  420. }
  421. static int qrx_validate_hdr_early(OSSL_QRX *qrx, RXE *rxe,
  422. const QUIC_CONN_ID *first_dcid)
  423. {
  424. /* Ensure version is what we want. */
  425. if (rxe->hdr.version != QUIC_VERSION_1
  426. && rxe->hdr.version != QUIC_VERSION_NONE)
  427. return 0;
  428. /* Clients should never receive 0-RTT packets. */
  429. if (rxe->hdr.type == QUIC_PKT_TYPE_0RTT)
  430. return 0;
  431. /* Version negotiation and retry packets must be the first packet. */
  432. if (first_dcid != NULL && !ossl_quic_pkt_type_can_share_dgram(rxe->hdr.type))
  433. return 0;
  434. /*
  435. * If this is not the first packet in a datagram, the destination connection
  436. * ID must match the one in that packet.
  437. */
  438. if (first_dcid != NULL) {
  439. if (!ossl_assert(first_dcid->id_len < QUIC_MAX_CONN_ID_LEN)
  440. || !ossl_quic_conn_id_eq(first_dcid,
  441. &rxe->hdr.dst_conn_id))
  442. return 0;
  443. }
  444. return 1;
  445. }
  446. /* Validate header and decode PN. */
  447. static int qrx_validate_hdr(OSSL_QRX *qrx, RXE *rxe)
  448. {
  449. int pn_space = rxe_determine_pn_space(rxe);
  450. if (!ossl_quic_wire_decode_pkt_hdr_pn(rxe->hdr.pn, rxe->hdr.pn_len,
  451. qrx->largest_pn[pn_space],
  452. &rxe->pn))
  453. return 0;
  454. return 1;
  455. }
  456. /* Late packet header validation. */
  457. static int qrx_validate_hdr_late(OSSL_QRX *qrx, RXE *rxe)
  458. {
  459. int pn_space = rxe_determine_pn_space(rxe);
  460. /*
  461. * Allow our user to decide whether to discard the packet before we try and
  462. * decrypt it.
  463. */
  464. if (qrx->validation_cb != NULL
  465. && !qrx->validation_cb(rxe->pn, pn_space, qrx->validation_cb_arg))
  466. return 0;
  467. return 1;
  468. }
  469. /*
  470. * Retrieves the correct cipher context for an EL and key phase. Writes the key
  471. * epoch number actually used for packet decryption to *rx_key_epoch.
  472. */
  473. static size_t qrx_get_cipher_ctx_idx(OSSL_QRX *qrx, OSSL_QRL_ENC_LEVEL *el,
  474. uint32_t enc_level,
  475. unsigned char key_phase_bit,
  476. uint64_t *rx_key_epoch,
  477. int *is_old_key)
  478. {
  479. size_t idx;
  480. *is_old_key = 0;
  481. if (enc_level != QUIC_ENC_LEVEL_1RTT) {
  482. *rx_key_epoch = 0;
  483. return 0;
  484. }
  485. if (!ossl_assert(key_phase_bit <= 1))
  486. return SIZE_MAX;
  487. /*
  488. * RFC 9001 requires that we not create timing channels which could reveal
  489. * the decrypted value of the Key Phase bit. We usually handle this by
  490. * keeping the cipher contexts for both the current and next key epochs
  491. * around, so that we just select a cipher context blindly using the key
  492. * phase bit, which is time-invariant.
  493. *
  494. * In the COOLDOWN state, we only have one keyslot/cipher context. RFC 9001
  495. * suggests an implementation strategy to avoid creating a timing channel in
  496. * this case:
  497. *
  498. * Endpoints can use randomized packet protection keys in place of
  499. * discarded keys when key updates are not yet permitted.
  500. *
  501. * Rather than use a randomised key, we simply use our existing key as it
  502. * will fail AEAD verification anyway. This avoids the need to keep around a
  503. * dedicated garbage key.
  504. *
  505. * Note: Accessing different cipher contexts is technically not
  506. * timing-channel safe due to microarchitectural side channels, but this is
  507. * the best we can reasonably do and appears to be directly suggested by the
  508. * RFC.
  509. */
  510. idx = (el->state == QRL_EL_STATE_PROV_COOLDOWN ? el->key_epoch & 1
  511. : key_phase_bit);
  512. /*
  513. * We also need to determine the key epoch number which this index
  514. * corresponds to. This is so we can report the key epoch number in the
  515. * OSSL_QRX_PKT structure, which callers need to validate whether it was OK
  516. * for a packet to be sent using a given key epoch's keys.
  517. */
  518. switch (el->state) {
  519. case QRL_EL_STATE_PROV_NORMAL:
  520. /*
  521. * If we are in the NORMAL state, usually the KP bit will match the LSB
  522. * of our key epoch, meaning no new key update is being signalled. If it
  523. * does not match, this means the packet (purports to) belong to
  524. * the next key epoch.
  525. *
  526. * IMPORTANT: The AEAD tag has not been verified yet when this function
  527. * is called, so this code must be timing-channel safe, hence use of
  528. * XOR. Moreover, the value output below is not yet authenticated.
  529. */
  530. *rx_key_epoch
  531. = el->key_epoch + ((el->key_epoch & 1) ^ (uint64_t)key_phase_bit);
  532. break;
  533. case QRL_EL_STATE_PROV_UPDATING:
  534. /*
  535. * If we are in the UPDATING state, usually the KP bit will match the
  536. * LSB of our key epoch. If it does not match, this means that the
  537. * packet (purports to) belong to the previous key epoch.
  538. *
  539. * As above, must be timing-channel safe.
  540. */
  541. *is_old_key = (el->key_epoch & 1) ^ (uint64_t)key_phase_bit;
  542. *rx_key_epoch = el->key_epoch - (uint64_t)*is_old_key;
  543. break;
  544. case QRL_EL_STATE_PROV_COOLDOWN:
  545. /*
  546. * If we are in COOLDOWN, there is only one key epoch we can possibly
  547. * decrypt with, so just try that. If AEAD decryption fails, the
  548. * value we output here isn't used anyway.
  549. */
  550. *rx_key_epoch = el->key_epoch;
  551. break;
  552. }
  553. return idx;
  554. }
  555. /*
  556. * Tries to decrypt a packet payload.
  557. *
  558. * Returns 1 on success or 0 on failure (which is permanent). The payload is
  559. * decrypted from src and written to dst. The buffer dst must be of at least
  560. * src_len bytes in length. The actual length of the output in bytes is written
  561. * to *dec_len on success, which will always be equal to or less than (usually
  562. * less than) src_len.
  563. */
  564. static int qrx_decrypt_pkt_body(OSSL_QRX *qrx, unsigned char *dst,
  565. const unsigned char *src,
  566. size_t src_len, size_t *dec_len,
  567. const unsigned char *aad, size_t aad_len,
  568. QUIC_PN pn, uint32_t enc_level,
  569. unsigned char key_phase_bit,
  570. uint64_t *rx_key_epoch)
  571. {
  572. int l = 0, l2 = 0, is_old_key, nonce_len;
  573. unsigned char nonce[EVP_MAX_IV_LENGTH];
  574. size_t i, cctx_idx;
  575. OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
  576. enc_level, 1);
  577. EVP_CIPHER_CTX *cctx;
  578. if (src_len > INT_MAX || aad_len > INT_MAX)
  579. return 0;
  580. /* We should not have been called if we do not have key material. */
  581. if (!ossl_assert(el != NULL))
  582. return 0;
  583. if (el->tag_len >= src_len)
  584. return 0;
  585. /*
  586. * If we have failed to authenticate a certain number of ciphertexts, refuse
  587. * to decrypt any more ciphertexts.
  588. */
  589. if (qrx->forged_pkt_count >= ossl_qrl_get_suite_max_forged_pkt(el->suite_id))
  590. return 0;
  591. cctx_idx = qrx_get_cipher_ctx_idx(qrx, el, enc_level, key_phase_bit,
  592. rx_key_epoch, &is_old_key);
  593. if (!ossl_assert(cctx_idx < OSSL_NELEM(el->cctx)))
  594. return 0;
  595. if (is_old_key && pn >= qrx->cur_epoch_start_pn)
  596. /*
  597. * RFC 9001 s. 5.5: Once an endpoint successfully receives a packet with
  598. * a given PN, it MUST discard all packets in the same PN space with
  599. * higher PNs if they cannot be successfully unprotected with the same
  600. * key, or -- if there is a key update -- a subsequent packet protection
  601. * key.
  602. *
  603. * In other words, once a PN x triggers a KU, it is invalid for us to
  604. * receive a packet with a newer PN y (y > x) using the old keys.
  605. */
  606. return 0;
  607. cctx = el->cctx[cctx_idx];
  608. /* Construct nonce (nonce=IV ^ PN). */
  609. nonce_len = EVP_CIPHER_CTX_get_iv_length(cctx);
  610. if (!ossl_assert(nonce_len >= (int)sizeof(QUIC_PN)))
  611. return 0;
  612. memcpy(nonce, el->iv[cctx_idx], nonce_len);
  613. for (i = 0; i < sizeof(QUIC_PN); ++i)
  614. nonce[nonce_len - i - 1] ^= (unsigned char)(pn >> (i * 8));
  615. /* type and key will already have been setup; feed the IV. */
  616. if (EVP_CipherInit_ex(cctx, NULL,
  617. NULL, NULL, nonce, /*enc=*/0) != 1)
  618. return 0;
  619. /* Feed the AEAD tag we got so the cipher can validate it. */
  620. if (EVP_CIPHER_CTX_ctrl(cctx, EVP_CTRL_AEAD_SET_TAG,
  621. el->tag_len,
  622. (unsigned char *)src + src_len - el->tag_len) != 1)
  623. return 0;
  624. /* Feed AAD data. */
  625. if (EVP_CipherUpdate(cctx, NULL, &l, aad, aad_len) != 1)
  626. return 0;
  627. /* Feed encrypted packet body. */
  628. if (EVP_CipherUpdate(cctx, dst, &l, src, src_len - el->tag_len) != 1)
  629. return 0;
  630. #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  631. /*
  632. * Throw away what we just decrypted and just use the ciphertext instead
  633. * (which should be unencrypted)
  634. */
  635. memcpy(dst, src, l);
  636. /* Pretend to authenticate the tag but ignore it */
  637. if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
  638. /* We don't care */
  639. }
  640. #else
  641. /* Ensure authentication succeeded. */
  642. if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
  643. /* Authentication failed, increment failed auth counter. */
  644. ++qrx->forged_pkt_count;
  645. return 0;
  646. }
  647. #endif
  648. *dec_len = l;
  649. return 1;
  650. }
  651. static ossl_inline void ignore_res(int x)
  652. {
  653. /* No-op. */
  654. }
  655. static void qrx_key_update_initiated(OSSL_QRX *qrx, QUIC_PN pn)
  656. {
  657. if (!ossl_qrl_enc_level_set_key_update(&qrx->el_set, QUIC_ENC_LEVEL_1RTT))
  658. /* We are already in RXKU, so we don't call the callback again. */
  659. return;
  660. qrx->cur_epoch_start_pn = pn;
  661. if (qrx->key_update_cb != NULL)
  662. qrx->key_update_cb(pn, qrx->key_update_cb_arg);
  663. }
  664. /* Process a single packet in a datagram. */
  665. static int qrx_process_pkt(OSSL_QRX *qrx, QUIC_URXE *urxe,
  666. PACKET *pkt, size_t pkt_idx,
  667. QUIC_CONN_ID *first_dcid,
  668. size_t datagram_len)
  669. {
  670. RXE *rxe;
  671. const unsigned char *eop = NULL;
  672. size_t i, aad_len = 0, dec_len = 0;
  673. PACKET orig_pkt = *pkt;
  674. const unsigned char *sop = PACKET_data(pkt);
  675. unsigned char *dst;
  676. char need_second_decode = 0, already_processed = 0;
  677. QUIC_PKT_HDR_PTRS ptrs;
  678. uint32_t pn_space, enc_level;
  679. OSSL_QRL_ENC_LEVEL *el = NULL;
  680. uint64_t rx_key_epoch = UINT64_MAX;
  681. /*
  682. * Get a free RXE. If we need to allocate a new one, use the packet length
  683. * as a good ballpark figure.
  684. */
  685. rxe = qrx_ensure_free_rxe(qrx, PACKET_remaining(pkt));
  686. if (rxe == NULL)
  687. return 0;
  688. /* Have we already processed this packet? */
  689. if (pkt_is_marked(&urxe->processed, pkt_idx))
  690. already_processed = 1;
  691. /*
  692. * Decode the header into the RXE structure. We first decrypt and read the
  693. * unprotected part of the packet header (unless we already removed header
  694. * protection, in which case we decode all of it).
  695. */
  696. need_second_decode = !pkt_is_marked(&urxe->hpr_removed, pkt_idx);
  697. if (!ossl_quic_wire_decode_pkt_hdr(pkt,
  698. qrx->short_conn_id_len,
  699. need_second_decode, 0, &rxe->hdr, &ptrs))
  700. goto malformed;
  701. /*
  702. * Our successful decode above included an intelligible length and the
  703. * PACKET is now pointing to the end of the QUIC packet.
  704. */
  705. eop = PACKET_data(pkt);
  706. /*
  707. * Make a note of the first packet's DCID so we can later ensure the
  708. * destination connection IDs of all packets in a datagram match.
  709. */
  710. if (pkt_idx == 0)
  711. *first_dcid = rxe->hdr.dst_conn_id;
  712. /*
  713. * Early header validation. Since we now know the packet length, we can also
  714. * now skip over it if we already processed it.
  715. */
  716. if (already_processed
  717. || !qrx_validate_hdr_early(qrx, rxe, pkt_idx == 0 ? NULL : first_dcid))
  718. /*
  719. * Already processed packets are handled identically to malformed
  720. * packets; i.e., they are ignored.
  721. */
  722. goto malformed;
  723. if (!ossl_quic_pkt_type_is_encrypted(rxe->hdr.type)) {
  724. /*
  725. * Version negotiation and retry packets are a special case. They do not
  726. * contain a payload which needs decrypting and have no header
  727. * protection.
  728. */
  729. /* Just copy the payload from the URXE to the RXE. */
  730. if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len)) == NULL)
  731. /*
  732. * Allocation failure. EOP will be pointing to the end of the
  733. * datagram so processing of this datagram will end here.
  734. */
  735. goto malformed;
  736. /* We are now committed to returning the packet. */
  737. memcpy(rxe_data(rxe), rxe->hdr.data, rxe->hdr.len);
  738. pkt_mark(&urxe->processed, pkt_idx);
  739. rxe->hdr.data = rxe_data(rxe);
  740. rxe->pn = QUIC_PN_INVALID;
  741. rxe->data_len = rxe->hdr.len;
  742. rxe->datagram_len = datagram_len;
  743. rxe->key_epoch = 0;
  744. rxe->peer = urxe->peer;
  745. rxe->local = urxe->local;
  746. rxe->time = urxe->time;
  747. /* Move RXE to pending. */
  748. ossl_list_rxe_remove(&qrx->rx_free, rxe);
  749. ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
  750. return 0; /* success, did not defer */
  751. }
  752. /* Determine encryption level of packet. */
  753. enc_level = qrx_determine_enc_level(&rxe->hdr);
  754. /* If we do not have keying material for this encryption level yet, defer. */
  755. switch (ossl_qrl_enc_level_set_have_el(&qrx->el_set, enc_level)) {
  756. case 1:
  757. /* We have keys. */
  758. if (enc_level == QUIC_ENC_LEVEL_1RTT && !qrx->allow_1rtt)
  759. /*
  760. * But we cannot process 1-RTT packets until the handshake is
  761. * completed (RFC 9000 s. 5.7).
  762. */
  763. goto cannot_decrypt;
  764. break;
  765. case 0:
  766. /* No keys yet. */
  767. goto cannot_decrypt;
  768. default:
  769. /* We already discarded keys for this EL, we will never process this.*/
  770. goto malformed;
  771. }
  772. /*
  773. * We will copy any token included in the packet to the start of our RXE
  774. * data buffer (so that we don't reference the URXE buffer any more and can
  775. * recycle it). Track our position in the RXE buffer by index instead of
  776. * pointer as the pointer may change as reallocs occur.
  777. */
  778. i = 0;
  779. /*
  780. * rxe->hdr.data is now pointing at the (encrypted) packet payload. rxe->hdr
  781. * also has fields pointing into the PACKET buffer which will be going away
  782. * soon (the URXE will be reused for another incoming packet).
  783. *
  784. * Firstly, relocate some of these fields into the RXE as needed.
  785. *
  786. * Relocate token buffer and fix pointer.
  787. */
  788. if (rxe->hdr.type == QUIC_PKT_TYPE_INITIAL) {
  789. const unsigned char *token = rxe->hdr.token;
  790. /*
  791. * This may change the value of rxe and change the value of the token
  792. * pointer as well. So we must make a temporary copy of the pointer to
  793. * the token, and then copy it back into the new location of the rxe
  794. */
  795. if (!qrx_relocate_buffer(qrx, &rxe, &i, &token, rxe->hdr.token_len))
  796. goto malformed;
  797. rxe->hdr.token = token;
  798. }
  799. /* Now remove header protection. */
  800. *pkt = orig_pkt;
  801. el = ossl_qrl_enc_level_set_get(&qrx->el_set, enc_level, 1);
  802. assert(el != NULL); /* Already checked above */
  803. if (need_second_decode) {
  804. if (!ossl_quic_hdr_protector_decrypt(&el->hpr, &ptrs))
  805. goto malformed;
  806. /*
  807. * We have removed header protection, so don't attempt to do it again if
  808. * the packet gets deferred and processed again.
  809. */
  810. pkt_mark(&urxe->hpr_removed, pkt_idx);
  811. /* Decode the now unprotected header. */
  812. if (ossl_quic_wire_decode_pkt_hdr(pkt, qrx->short_conn_id_len,
  813. 0, 0, &rxe->hdr, NULL) != 1)
  814. goto malformed;
  815. }
  816. /* Validate header and decode PN. */
  817. if (!qrx_validate_hdr(qrx, rxe))
  818. goto malformed;
  819. if (qrx->msg_callback != NULL)
  820. qrx->msg_callback(0, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_PACKET, sop,
  821. eop - sop - rxe->hdr.len, qrx->msg_callback_ssl,
  822. qrx->msg_callback_arg);
  823. /*
  824. * The AAD data is the entire (unprotected) packet header including the PN.
  825. * The packet header has been unprotected in place, so we can just reuse the
  826. * PACKET buffer. The header ends where the payload begins.
  827. */
  828. aad_len = rxe->hdr.data - sop;
  829. /* Ensure the RXE buffer size is adequate for our payload. */
  830. if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len + i)) == NULL) {
  831. /*
  832. * Allocation failure, treat as malformed and do not bother processing
  833. * any further packets in the datagram as they are likely to also
  834. * encounter allocation failures.
  835. */
  836. eop = NULL;
  837. goto malformed;
  838. }
  839. /*
  840. * We decrypt the packet body to immediately after the token at the start of
  841. * the RXE buffer (where present).
  842. *
  843. * Do the decryption from the PACKET (which points into URXE memory) to our
  844. * RXE payload (single-copy decryption), then fixup the pointers in the
  845. * header to point to our new buffer.
  846. *
  847. * If decryption fails this is considered a permanent error; we defer
  848. * packets we don't yet have decryption keys for above, so if this fails,
  849. * something has gone wrong with the handshake process or a packet has been
  850. * corrupted.
  851. */
  852. dst = (unsigned char *)rxe_data(rxe) + i;
  853. if (!qrx_decrypt_pkt_body(qrx, dst, rxe->hdr.data, rxe->hdr.len,
  854. &dec_len, sop, aad_len, rxe->pn, enc_level,
  855. rxe->hdr.key_phase, &rx_key_epoch))
  856. goto malformed;
  857. /*
  858. * -----------------------------------------------------
  859. * IMPORTANT: ANYTHING ABOVE THIS LINE IS UNVERIFIED
  860. * AND MUST BE TIMING-CHANNEL SAFE.
  861. * -----------------------------------------------------
  862. *
  863. * At this point, we have successfully authenticated the AEAD tag and no
  864. * longer need to worry about exposing the PN, PN length or Key Phase bit in
  865. * timing channels. Invoke any configured validation callback to allow for
  866. * rejection of duplicate PNs.
  867. */
  868. if (!qrx_validate_hdr_late(qrx, rxe))
  869. goto malformed;
  870. /* Check for a Key Phase bit differing from our expectation. */
  871. if (rxe->hdr.type == QUIC_PKT_TYPE_1RTT
  872. && rxe->hdr.key_phase != (el->key_epoch & 1))
  873. qrx_key_update_initiated(qrx, rxe->pn);
  874. /*
  875. * We have now successfully decrypted the packet payload. If there are
  876. * additional packets in the datagram, it is possible we will fail to
  877. * decrypt them and need to defer them until we have some key material we
  878. * don't currently possess. If this happens, the URXE will be moved to the
  879. * deferred queue. Since a URXE corresponds to one datagram, which may
  880. * contain multiple packets, we must ensure any packets we have already
  881. * processed in the URXE are not processed again (this is an RFC
  882. * requirement). We do this by marking the nth packet in the datagram as
  883. * processed.
  884. *
  885. * We are now committed to returning this decrypted packet to the user,
  886. * meaning we now consider the packet processed and must mark it
  887. * accordingly.
  888. */
  889. pkt_mark(&urxe->processed, pkt_idx);
  890. /*
  891. * Update header to point to the decrypted buffer, which may be shorter
  892. * due to AEAD tags, block padding, etc.
  893. */
  894. rxe->hdr.data = dst;
  895. rxe->hdr.len = dec_len;
  896. rxe->data_len = dec_len;
  897. rxe->datagram_len = datagram_len;
  898. rxe->key_epoch = rx_key_epoch;
  899. /* We processed the PN successfully, so update largest processed PN. */
  900. pn_space = rxe_determine_pn_space(rxe);
  901. if (rxe->pn > qrx->largest_pn[pn_space])
  902. qrx->largest_pn[pn_space] = rxe->pn;
  903. /* Copy across network addresses and RX time from URXE to RXE. */
  904. rxe->peer = urxe->peer;
  905. rxe->local = urxe->local;
  906. rxe->time = urxe->time;
  907. /* Move RXE to pending. */
  908. ossl_list_rxe_remove(&qrx->rx_free, rxe);
  909. ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
  910. return 0; /* success, did not defer; not distinguished from failure */
  911. cannot_decrypt:
  912. /*
  913. * We cannot process this packet right now (but might be able to later). We
  914. * MUST attempt to process any other packets in the datagram, so defer it
  915. * and skip over it.
  916. */
  917. assert(eop != NULL && eop >= PACKET_data(pkt));
  918. /*
  919. * We don't care if this fails as it will just result in the packet being at
  920. * the end of the datagram buffer.
  921. */
  922. ignore_res(PACKET_forward(pkt, eop - PACKET_data(pkt)));
  923. return 1; /* deferred */
  924. malformed:
  925. if (eop != NULL) {
  926. /*
  927. * This packet cannot be processed and will never be processable. We
  928. * were at least able to decode its header and determine its length, so
  929. * we can skip over it and try to process any subsequent packets in the
  930. * datagram.
  931. *
  932. * Mark as processed as an optimization.
  933. */
  934. assert(eop >= PACKET_data(pkt));
  935. pkt_mark(&urxe->processed, pkt_idx);
  936. /* We don't care if this fails (see above) */
  937. ignore_res(PACKET_forward(pkt, eop - PACKET_data(pkt)));
  938. } else {
  939. /*
  940. * This packet cannot be processed and will never be processable.
  941. * Because even its header is not intelligible, we cannot examine any
  942. * further packets in the datagram because its length cannot be
  943. * discerned.
  944. *
  945. * Advance over the entire remainder of the datagram, and mark it as
  946. * processed as an optimization.
  947. */
  948. pkt_mark(&urxe->processed, pkt_idx);
  949. /* We don't care if this fails (see above) */
  950. ignore_res(PACKET_forward(pkt, PACKET_remaining(pkt)));
  951. }
  952. return 0; /* failure, did not defer; not distinguished from success */
  953. }
  954. /* Process a datagram which was received. */
  955. static int qrx_process_datagram(OSSL_QRX *qrx, QUIC_URXE *e,
  956. const unsigned char *data,
  957. size_t data_len)
  958. {
  959. int have_deferred = 0;
  960. PACKET pkt;
  961. size_t pkt_idx = 0;
  962. QUIC_CONN_ID first_dcid = { 255 };
  963. qrx->bytes_received += data_len;
  964. if (!PACKET_buf_init(&pkt, data, data_len))
  965. return 0;
  966. for (; PACKET_remaining(&pkt) > 0; ++pkt_idx) {
  967. /*
  968. * A packet smaller than the minimum possible QUIC packet size is not
  969. * considered valid. We also ignore more than a certain number of
  970. * packets within the same datagram.
  971. */
  972. if (PACKET_remaining(&pkt) < QUIC_MIN_VALID_PKT_LEN
  973. || pkt_idx >= QUIC_MAX_PKT_PER_URXE)
  974. break;
  975. /*
  976. * We note whether packet processing resulted in a deferral since
  977. * this means we need to move the URXE to the deferred list rather
  978. * than the free list after we're finished dealing with it for now.
  979. *
  980. * However, we don't otherwise care here whether processing succeeded or
  981. * failed, as the RFC says even if a packet in a datagram is malformed,
  982. * we should still try to process any packets following it.
  983. *
  984. * In the case where the packet is so malformed we can't determine its
  985. * length, qrx_process_pkt will take care of advancing to the end of
  986. * the packet, so we will exit the loop automatically in this case.
  987. */
  988. if (qrx_process_pkt(qrx, e, &pkt, pkt_idx, &first_dcid, data_len))
  989. have_deferred = 1;
  990. }
  991. /* Only report whether there were any deferrals. */
  992. return have_deferred;
  993. }
  994. /* Process a single pending URXE. */
  995. static int qrx_process_one_urxe(OSSL_QRX *qrx, QUIC_URXE *e)
  996. {
  997. int was_deferred;
  998. /* The next URXE we process should be at the head of the pending list. */
  999. if (!ossl_assert(e == ossl_list_urxe_head(&qrx->urx_pending)))
  1000. return 0;
  1001. /*
  1002. * Attempt to process the datagram. The return value indicates only if
  1003. * processing of the datagram was deferred. If we failed to process the
  1004. * datagram, we do not attempt to process it again and silently eat the
  1005. * error.
  1006. */
  1007. was_deferred = qrx_process_datagram(qrx, e, ossl_quic_urxe_data(e),
  1008. e->data_len);
  1009. /*
  1010. * Remove the URXE from the pending list and return it to
  1011. * either the free or deferred list.
  1012. */
  1013. ossl_list_urxe_remove(&qrx->urx_pending, e);
  1014. if (was_deferred > 0 &&
  1015. (e->deferred || qrx->num_deferred < qrx->max_deferred)) {
  1016. ossl_list_urxe_insert_tail(&qrx->urx_deferred, e);
  1017. if (!e->deferred) {
  1018. e->deferred = 1;
  1019. ++qrx->num_deferred;
  1020. }
  1021. } else {
  1022. if (e->deferred) {
  1023. e->deferred = 0;
  1024. --qrx->num_deferred;
  1025. }
  1026. ossl_quic_demux_release_urxe(qrx->demux, e);
  1027. }
  1028. return 1;
  1029. }
  1030. /* Process any pending URXEs to generate pending RXEs. */
  1031. static int qrx_process_pending_urxl(OSSL_QRX *qrx)
  1032. {
  1033. QUIC_URXE *e;
  1034. while ((e = ossl_list_urxe_head(&qrx->urx_pending)) != NULL)
  1035. if (!qrx_process_one_urxe(qrx, e))
  1036. return 0;
  1037. return 1;
  1038. }
  1039. int ossl_qrx_read_pkt(OSSL_QRX *qrx, OSSL_QRX_PKT **ppkt)
  1040. {
  1041. RXE *rxe;
  1042. if (!ossl_qrx_processed_read_pending(qrx)) {
  1043. if (!qrx_process_pending_urxl(qrx))
  1044. return 0;
  1045. if (!ossl_qrx_processed_read_pending(qrx))
  1046. return 0;
  1047. }
  1048. rxe = qrx_pop_pending_rxe(qrx);
  1049. if (!ossl_assert(rxe != NULL))
  1050. return 0;
  1051. assert(rxe->refcount == 0);
  1052. rxe->refcount = 1;
  1053. rxe->pkt.hdr = &rxe->hdr;
  1054. rxe->pkt.pn = rxe->pn;
  1055. rxe->pkt.time = rxe->time;
  1056. rxe->pkt.datagram_len = rxe->datagram_len;
  1057. rxe->pkt.peer
  1058. = BIO_ADDR_family(&rxe->peer) != AF_UNSPEC ? &rxe->peer : NULL;
  1059. rxe->pkt.local
  1060. = BIO_ADDR_family(&rxe->local) != AF_UNSPEC ? &rxe->local : NULL;
  1061. rxe->pkt.key_epoch = rxe->key_epoch;
  1062. rxe->pkt.qrx = qrx;
  1063. *ppkt = &rxe->pkt;
  1064. return 1;
  1065. }
  1066. void ossl_qrx_pkt_release(OSSL_QRX_PKT *pkt)
  1067. {
  1068. RXE *rxe;
  1069. if (pkt == NULL)
  1070. return;
  1071. rxe = (RXE *)pkt;
  1072. assert(rxe->refcount > 0);
  1073. if (--rxe->refcount == 0)
  1074. qrx_recycle_rxe(pkt->qrx, rxe);
  1075. }
  1076. void ossl_qrx_pkt_up_ref(OSSL_QRX_PKT *pkt)
  1077. {
  1078. RXE *rxe = (RXE *)pkt;
  1079. assert(rxe->refcount > 0);
  1080. ++rxe->refcount;
  1081. }
  1082. uint64_t ossl_qrx_get_bytes_received(OSSL_QRX *qrx, int clear)
  1083. {
  1084. uint64_t v = qrx->bytes_received;
  1085. if (clear)
  1086. qrx->bytes_received = 0;
  1087. return v;
  1088. }
  1089. int ossl_qrx_set_late_validation_cb(OSSL_QRX *qrx,
  1090. ossl_qrx_late_validation_cb *cb,
  1091. void *cb_arg)
  1092. {
  1093. qrx->validation_cb = cb;
  1094. qrx->validation_cb_arg = cb_arg;
  1095. return 1;
  1096. }
  1097. int ossl_qrx_set_key_update_cb(OSSL_QRX *qrx,
  1098. ossl_qrx_key_update_cb *cb,
  1099. void *cb_arg)
  1100. {
  1101. qrx->key_update_cb = cb;
  1102. qrx->key_update_cb_arg = cb_arg;
  1103. return 1;
  1104. }
  1105. uint64_t ossl_qrx_get_key_epoch(OSSL_QRX *qrx)
  1106. {
  1107. OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
  1108. QUIC_ENC_LEVEL_1RTT, 1);
  1109. return el == NULL ? UINT64_MAX : el->key_epoch;
  1110. }
  1111. int ossl_qrx_key_update_timeout(OSSL_QRX *qrx, int normal)
  1112. {
  1113. OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
  1114. QUIC_ENC_LEVEL_1RTT, 1);
  1115. if (el == NULL)
  1116. return 0;
  1117. if (el->state == QRL_EL_STATE_PROV_UPDATING
  1118. && !ossl_qrl_enc_level_set_key_update_done(&qrx->el_set,
  1119. QUIC_ENC_LEVEL_1RTT))
  1120. return 0;
  1121. if (normal && el->state == QRL_EL_STATE_PROV_COOLDOWN
  1122. && !ossl_qrl_enc_level_set_key_cooldown_done(&qrx->el_set,
  1123. QUIC_ENC_LEVEL_1RTT))
  1124. return 0;
  1125. return 1;
  1126. }
  1127. uint64_t ossl_qrx_get_cur_forged_pkt_count(OSSL_QRX *qrx)
  1128. {
  1129. return qrx->forged_pkt_count;
  1130. }
  1131. uint64_t ossl_qrx_get_max_forged_pkt_count(OSSL_QRX *qrx,
  1132. uint32_t enc_level)
  1133. {
  1134. OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
  1135. enc_level, 1);
  1136. return el == NULL ? UINT64_MAX
  1137. : ossl_qrl_get_suite_max_forged_pkt(el->suite_id);
  1138. }
  1139. void ossl_qrx_allow_1rtt_processing(OSSL_QRX *qrx)
  1140. {
  1141. if (qrx->allow_1rtt)
  1142. return;
  1143. qrx->allow_1rtt = 1;
  1144. qrx_requeue_deferred(qrx);
  1145. }
  1146. void ossl_qrx_set_msg_callback(OSSL_QRX *qrx, ossl_msg_cb msg_callback,
  1147. SSL *msg_callback_ssl)
  1148. {
  1149. qrx->msg_callback = msg_callback;
  1150. qrx->msg_callback_ssl = msg_callback_ssl;
  1151. }
  1152. void ossl_qrx_set_msg_callback_arg(OSSL_QRX *qrx, void *msg_callback_arg)
  1153. {
  1154. qrx->msg_callback_arg = msg_callback_arg;
  1155. }