2
0

bufq.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. /***************************************************************************
  2. * _ _ ____ _
  3. * Project ___| | | | _ \| |
  4. * / __| | | | |_) | |
  5. * | (__| |_| | _ <| |___
  6. * \___|\___/|_| \_\_____|
  7. *
  8. * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
  9. *
  10. * This software is licensed as described in the file COPYING, which
  11. * you should have received as part of this distribution. The terms
  12. * are also available at https://curl.se/docs/copyright.html.
  13. *
  14. * You may opt to use, copy, modify, merge, publish, distribute and/or sell
  15. * copies of the Software, and permit persons to whom the Software is
  16. * furnished to do so, under the terms of the COPYING file.
  17. *
  18. * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
  19. * KIND, either express or implied.
  20. *
  21. * SPDX-License-Identifier: curl
  22. *
  23. ***************************************************************************/
  24. #include "curl_setup.h"
  25. #include "bufq.h"
  26. /* The last 3 #include files should be in this order */
  27. #include "curl_printf.h"
  28. #include "curl_memory.h"
  29. #include "memdebug.h"
  30. static bool chunk_is_empty(const struct buf_chunk *chunk)
  31. {
  32. return chunk->r_offset >= chunk->w_offset;
  33. }
  34. static bool chunk_is_full(const struct buf_chunk *chunk)
  35. {
  36. return chunk->w_offset >= chunk->dlen;
  37. }
  38. static size_t chunk_len(const struct buf_chunk *chunk)
  39. {
  40. return chunk->w_offset - chunk->r_offset;
  41. }
  42. static size_t chunk_space(const struct buf_chunk *chunk)
  43. {
  44. return chunk->dlen - chunk->w_offset;
  45. }
  46. static void chunk_reset(struct buf_chunk *chunk)
  47. {
  48. chunk->next = NULL;
  49. chunk->r_offset = chunk->w_offset = 0;
  50. }
  51. static size_t chunk_append(struct buf_chunk *chunk,
  52. const unsigned char *buf, size_t len)
  53. {
  54. unsigned char *p = &chunk->x.data[chunk->w_offset];
  55. size_t n = chunk->dlen - chunk->w_offset;
  56. DEBUGASSERT(chunk->dlen >= chunk->w_offset);
  57. if(n) {
  58. n = CURLMIN(n, len);
  59. memcpy(p, buf, n);
  60. chunk->w_offset += n;
  61. }
  62. return n;
  63. }
  64. static size_t chunk_read(struct buf_chunk *chunk,
  65. unsigned char *buf, size_t len)
  66. {
  67. unsigned char *p = &chunk->x.data[chunk->r_offset];
  68. size_t n = chunk->w_offset - chunk->r_offset;
  69. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  70. if(!n) {
  71. return 0;
  72. }
  73. else if(n <= len) {
  74. memcpy(buf, p, n);
  75. chunk->r_offset = chunk->w_offset = 0;
  76. return n;
  77. }
  78. else {
  79. memcpy(buf, p, len);
  80. chunk->r_offset += len;
  81. return len;
  82. }
  83. }
  84. static ssize_t chunk_slurpn(struct buf_chunk *chunk, size_t max_len,
  85. Curl_bufq_reader *reader,
  86. void *reader_ctx, CURLcode *err)
  87. {
  88. unsigned char *p = &chunk->x.data[chunk->w_offset];
  89. size_t n = chunk->dlen - chunk->w_offset; /* free amount */
  90. ssize_t nread;
  91. DEBUGASSERT(chunk->dlen >= chunk->w_offset);
  92. if(!n) {
  93. *err = CURLE_AGAIN;
  94. return -1;
  95. }
  96. if(max_len && n > max_len)
  97. n = max_len;
  98. nread = reader(reader_ctx, p, n, err);
  99. if(nread > 0) {
  100. DEBUGASSERT((size_t)nread <= n);
  101. chunk->w_offset += nread;
  102. }
  103. return nread;
  104. }
  105. static void chunk_peek(const struct buf_chunk *chunk,
  106. const unsigned char **pbuf, size_t *plen)
  107. {
  108. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  109. *pbuf = &chunk->x.data[chunk->r_offset];
  110. *plen = chunk->w_offset - chunk->r_offset;
  111. }
  112. static void chunk_peek_at(const struct buf_chunk *chunk, size_t offset,
  113. const unsigned char **pbuf, size_t *plen)
  114. {
  115. offset += chunk->r_offset;
  116. DEBUGASSERT(chunk->w_offset >= offset);
  117. *pbuf = &chunk->x.data[offset];
  118. *plen = chunk->w_offset - offset;
  119. }
  120. static size_t chunk_skip(struct buf_chunk *chunk, size_t amount)
  121. {
  122. size_t n = chunk->w_offset - chunk->r_offset;
  123. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  124. if(n) {
  125. n = CURLMIN(n, amount);
  126. chunk->r_offset += n;
  127. if(chunk->r_offset == chunk->w_offset)
  128. chunk->r_offset = chunk->w_offset = 0;
  129. }
  130. return n;
  131. }
  132. static void chunk_shift(struct buf_chunk *chunk)
  133. {
  134. if(chunk->r_offset) {
  135. if(!chunk_is_empty(chunk)) {
  136. size_t n = chunk->w_offset - chunk->r_offset;
  137. memmove(chunk->x.data, chunk->x.data + chunk->r_offset, n);
  138. chunk->w_offset -= chunk->r_offset;
  139. chunk->r_offset = 0;
  140. }
  141. else {
  142. chunk->r_offset = chunk->w_offset = 0;
  143. }
  144. }
  145. }
  146. static void chunk_list_free(struct buf_chunk **anchor)
  147. {
  148. struct buf_chunk *chunk;
  149. while(*anchor) {
  150. chunk = *anchor;
  151. *anchor = chunk->next;
  152. free(chunk);
  153. }
  154. }
  155. void Curl_bufcp_init(struct bufc_pool *pool,
  156. size_t chunk_size, size_t spare_max)
  157. {
  158. DEBUGASSERT(chunk_size > 0);
  159. DEBUGASSERT(spare_max > 0);
  160. memset(pool, 0, sizeof(*pool));
  161. pool->chunk_size = chunk_size;
  162. pool->spare_max = spare_max;
  163. }
  164. static CURLcode bufcp_take(struct bufc_pool *pool,
  165. struct buf_chunk **pchunk)
  166. {
  167. struct buf_chunk *chunk = NULL;
  168. if(pool->spare) {
  169. chunk = pool->spare;
  170. pool->spare = chunk->next;
  171. --pool->spare_count;
  172. chunk_reset(chunk);
  173. *pchunk = chunk;
  174. return CURLE_OK;
  175. }
  176. chunk = calloc(1, sizeof(*chunk) + pool->chunk_size);
  177. if(!chunk) {
  178. *pchunk = NULL;
  179. return CURLE_OUT_OF_MEMORY;
  180. }
  181. chunk->dlen = pool->chunk_size;
  182. *pchunk = chunk;
  183. return CURLE_OK;
  184. }
  185. static void bufcp_put(struct bufc_pool *pool,
  186. struct buf_chunk *chunk)
  187. {
  188. if(pool->spare_count >= pool->spare_max) {
  189. free(chunk);
  190. }
  191. else {
  192. chunk_reset(chunk);
  193. chunk->next = pool->spare;
  194. pool->spare = chunk;
  195. ++pool->spare_count;
  196. }
  197. }
  198. void Curl_bufcp_free(struct bufc_pool *pool)
  199. {
  200. chunk_list_free(&pool->spare);
  201. pool->spare_count = 0;
  202. }
  203. static void bufq_init(struct bufq *q, struct bufc_pool *pool,
  204. size_t chunk_size, size_t max_chunks, int opts)
  205. {
  206. DEBUGASSERT(chunk_size > 0);
  207. DEBUGASSERT(max_chunks > 0);
  208. memset(q, 0, sizeof(*q));
  209. q->chunk_size = chunk_size;
  210. q->max_chunks = max_chunks;
  211. q->pool = pool;
  212. q->opts = opts;
  213. }
  214. void Curl_bufq_init2(struct bufq *q, size_t chunk_size, size_t max_chunks,
  215. int opts)
  216. {
  217. bufq_init(q, NULL, chunk_size, max_chunks, opts);
  218. }
  219. void Curl_bufq_init(struct bufq *q, size_t chunk_size, size_t max_chunks)
  220. {
  221. bufq_init(q, NULL, chunk_size, max_chunks, BUFQ_OPT_NONE);
  222. }
  223. void Curl_bufq_initp(struct bufq *q, struct bufc_pool *pool,
  224. size_t max_chunks, int opts)
  225. {
  226. bufq_init(q, pool, pool->chunk_size, max_chunks, opts);
  227. }
  228. void Curl_bufq_free(struct bufq *q)
  229. {
  230. chunk_list_free(&q->head);
  231. chunk_list_free(&q->spare);
  232. q->tail = NULL;
  233. q->chunk_count = 0;
  234. }
  235. void Curl_bufq_reset(struct bufq *q)
  236. {
  237. struct buf_chunk *chunk;
  238. while(q->head) {
  239. chunk = q->head;
  240. q->head = chunk->next;
  241. chunk->next = q->spare;
  242. q->spare = chunk;
  243. }
  244. q->tail = NULL;
  245. }
  246. size_t Curl_bufq_len(const struct bufq *q)
  247. {
  248. const struct buf_chunk *chunk = q->head;
  249. size_t len = 0;
  250. while(chunk) {
  251. len += chunk_len(chunk);
  252. chunk = chunk->next;
  253. }
  254. return len;
  255. }
  256. size_t Curl_bufq_space(const struct bufq *q)
  257. {
  258. size_t space = 0;
  259. if(q->tail)
  260. space += chunk_space(q->tail);
  261. if(q->spare) {
  262. struct buf_chunk *chunk = q->spare;
  263. while(chunk) {
  264. space += chunk->dlen;
  265. chunk = chunk->next;
  266. }
  267. }
  268. if(q->chunk_count < q->max_chunks) {
  269. space += (q->max_chunks - q->chunk_count) * q->chunk_size;
  270. }
  271. return space;
  272. }
  273. bool Curl_bufq_is_empty(const struct bufq *q)
  274. {
  275. return !q->head || chunk_is_empty(q->head);
  276. }
  277. bool Curl_bufq_is_full(const struct bufq *q)
  278. {
  279. if(!q->tail || q->spare)
  280. return FALSE;
  281. if(q->chunk_count < q->max_chunks)
  282. return FALSE;
  283. if(q->chunk_count > q->max_chunks)
  284. return TRUE;
  285. /* we have no spares and cannot make more, is the tail full? */
  286. return chunk_is_full(q->tail);
  287. }
  288. static struct buf_chunk *get_spare(struct bufq *q)
  289. {
  290. struct buf_chunk *chunk = NULL;
  291. if(q->spare) {
  292. chunk = q->spare;
  293. q->spare = chunk->next;
  294. chunk_reset(chunk);
  295. return chunk;
  296. }
  297. if(q->chunk_count >= q->max_chunks && (!(q->opts & BUFQ_OPT_SOFT_LIMIT)))
  298. return NULL;
  299. if(q->pool) {
  300. if(bufcp_take(q->pool, &chunk))
  301. return NULL;
  302. ++q->chunk_count;
  303. return chunk;
  304. }
  305. else {
  306. chunk = calloc(1, sizeof(*chunk) + q->chunk_size);
  307. if(!chunk)
  308. return NULL;
  309. chunk->dlen = q->chunk_size;
  310. ++q->chunk_count;
  311. return chunk;
  312. }
  313. }
  314. static void prune_head(struct bufq *q)
  315. {
  316. struct buf_chunk *chunk;
  317. while(q->head && chunk_is_empty(q->head)) {
  318. chunk = q->head;
  319. q->head = chunk->next;
  320. if(q->tail == chunk)
  321. q->tail = q->head;
  322. if(q->pool) {
  323. bufcp_put(q->pool, chunk);
  324. --q->chunk_count;
  325. }
  326. else if((q->chunk_count > q->max_chunks) ||
  327. (q->opts & BUFQ_OPT_NO_SPARES)) {
  328. /* SOFT_LIMIT allowed us more than max. free spares until
  329. * we are at max again. Or free them if we are configured
  330. * to not use spares. */
  331. free(chunk);
  332. --q->chunk_count;
  333. }
  334. else {
  335. chunk->next = q->spare;
  336. q->spare = chunk;
  337. }
  338. }
  339. }
  340. static struct buf_chunk *get_non_full_tail(struct bufq *q)
  341. {
  342. struct buf_chunk *chunk;
  343. if(q->tail && !chunk_is_full(q->tail))
  344. return q->tail;
  345. chunk = get_spare(q);
  346. if(chunk) {
  347. /* new tail, and possibly new head */
  348. if(q->tail) {
  349. q->tail->next = chunk;
  350. q->tail = chunk;
  351. }
  352. else {
  353. DEBUGASSERT(!q->head);
  354. q->head = q->tail = chunk;
  355. }
  356. }
  357. return chunk;
  358. }
  359. ssize_t Curl_bufq_write(struct bufq *q,
  360. const unsigned char *buf, size_t len,
  361. CURLcode *err)
  362. {
  363. struct buf_chunk *tail;
  364. ssize_t nwritten = 0;
  365. size_t n;
  366. DEBUGASSERT(q->max_chunks > 0);
  367. while(len) {
  368. tail = get_non_full_tail(q);
  369. if(!tail) {
  370. if(q->chunk_count < q->max_chunks) {
  371. *err = CURLE_OUT_OF_MEMORY;
  372. return -1;
  373. }
  374. break;
  375. }
  376. n = chunk_append(tail, buf, len);
  377. if(!n)
  378. break;
  379. nwritten += n;
  380. buf += n;
  381. len -= n;
  382. }
  383. if(nwritten == 0 && len) {
  384. *err = CURLE_AGAIN;
  385. return -1;
  386. }
  387. *err = CURLE_OK;
  388. return nwritten;
  389. }
  390. ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len,
  391. CURLcode *err)
  392. {
  393. ssize_t nread = 0;
  394. size_t n;
  395. *err = CURLE_OK;
  396. while(len && q->head) {
  397. n = chunk_read(q->head, buf, len);
  398. if(n) {
  399. nread += n;
  400. buf += n;
  401. len -= n;
  402. }
  403. prune_head(q);
  404. }
  405. if(nread == 0) {
  406. *err = CURLE_AGAIN;
  407. return -1;
  408. }
  409. return nread;
  410. }
  411. bool Curl_bufq_peek(struct bufq *q,
  412. const unsigned char **pbuf, size_t *plen)
  413. {
  414. if(q->head && chunk_is_empty(q->head)) {
  415. prune_head(q);
  416. }
  417. if(q->head && !chunk_is_empty(q->head)) {
  418. chunk_peek(q->head, pbuf, plen);
  419. return TRUE;
  420. }
  421. *pbuf = NULL;
  422. *plen = 0;
  423. return FALSE;
  424. }
  425. bool Curl_bufq_peek_at(struct bufq *q, size_t offset,
  426. const unsigned char **pbuf, size_t *plen)
  427. {
  428. struct buf_chunk *c = q->head;
  429. size_t clen;
  430. while(c) {
  431. clen = chunk_len(c);
  432. if(!clen)
  433. break;
  434. if(offset >= clen) {
  435. offset -= clen;
  436. c = c->next;
  437. continue;
  438. }
  439. chunk_peek_at(c, offset, pbuf, plen);
  440. return TRUE;
  441. }
  442. *pbuf = NULL;
  443. *plen = 0;
  444. return FALSE;
  445. }
  446. void Curl_bufq_skip(struct bufq *q, size_t amount)
  447. {
  448. size_t n;
  449. while(amount && q->head) {
  450. n = chunk_skip(q->head, amount);
  451. amount -= n;
  452. prune_head(q);
  453. }
  454. }
  455. void Curl_bufq_skip_and_shift(struct bufq *q, size_t amount)
  456. {
  457. Curl_bufq_skip(q, amount);
  458. if(q->tail)
  459. chunk_shift(q->tail);
  460. }
  461. ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer,
  462. void *writer_ctx, CURLcode *err)
  463. {
  464. const unsigned char *buf;
  465. size_t blen;
  466. ssize_t nwritten = 0;
  467. while(Curl_bufq_peek(q, &buf, &blen)) {
  468. ssize_t chunk_written;
  469. chunk_written = writer(writer_ctx, buf, blen, err);
  470. if(chunk_written < 0) {
  471. if(!nwritten || *err != CURLE_AGAIN) {
  472. /* blocked on first write or real error, fail */
  473. nwritten = -1;
  474. }
  475. break;
  476. }
  477. if(!chunk_written) {
  478. if(!nwritten) {
  479. /* treat as blocked */
  480. *err = CURLE_AGAIN;
  481. nwritten = -1;
  482. }
  483. break;
  484. }
  485. Curl_bufq_skip(q, (size_t)chunk_written);
  486. nwritten += chunk_written;
  487. }
  488. return nwritten;
  489. }
  490. ssize_t Curl_bufq_write_pass(struct bufq *q,
  491. const unsigned char *buf, size_t len,
  492. Curl_bufq_writer *writer, void *writer_ctx,
  493. CURLcode *err)
  494. {
  495. ssize_t nwritten = 0, n;
  496. *err = CURLE_OK;
  497. while(len) {
  498. if(Curl_bufq_is_full(q)) {
  499. /* try to make room in case we are full */
  500. n = Curl_bufq_pass(q, writer, writer_ctx, err);
  501. if(n < 0) {
  502. if(*err != CURLE_AGAIN) {
  503. /* real error, fail */
  504. return -1;
  505. }
  506. /* would block, bufq is full, give up */
  507. break;
  508. }
  509. }
  510. /* Add whatever is remaining now to bufq */
  511. n = Curl_bufq_write(q, buf, len, err);
  512. if(n < 0) {
  513. if(*err != CURLE_AGAIN) {
  514. /* real error, fail */
  515. return -1;
  516. }
  517. /* no room in bufq */
  518. break;
  519. }
  520. /* edge case of writer returning 0 (and len is >0)
  521. * break or we might enter an infinite loop here */
  522. if(n == 0)
  523. break;
  524. /* Maybe only part of `data` has been added, continue to loop */
  525. buf += (size_t)n;
  526. len -= (size_t)n;
  527. nwritten += (size_t)n;
  528. }
  529. if(!nwritten && len) {
  530. *err = CURLE_AGAIN;
  531. return -1;
  532. }
  533. *err = CURLE_OK;
  534. return nwritten;
  535. }
  536. ssize_t Curl_bufq_sipn(struct bufq *q, size_t max_len,
  537. Curl_bufq_reader *reader, void *reader_ctx,
  538. CURLcode *err)
  539. {
  540. struct buf_chunk *tail = NULL;
  541. ssize_t nread;
  542. *err = CURLE_AGAIN;
  543. tail = get_non_full_tail(q);
  544. if(!tail) {
  545. if(q->chunk_count < q->max_chunks) {
  546. *err = CURLE_OUT_OF_MEMORY;
  547. return -1;
  548. }
  549. /* full, blocked */
  550. *err = CURLE_AGAIN;
  551. return -1;
  552. }
  553. nread = chunk_slurpn(tail, max_len, reader, reader_ctx, err);
  554. if(nread < 0) {
  555. return -1;
  556. }
  557. else if(nread == 0) {
  558. /* eof */
  559. *err = CURLE_OK;
  560. }
  561. return nread;
  562. }
  563. /**
  564. * Read up to `max_len` bytes and append it to the end of the buffer queue.
  565. * if `max_len` is 0, no limit is imposed and the call behaves exactly
  566. * the same as `Curl_bufq_slurp()`.
  567. * Returns the total amount of buf read (may be 0) or -1 on other
  568. * reader errors.
  569. * Note that even in case of a -1 chunks may have been read and
  570. * the buffer queue will have different length than before.
  571. */
  572. static ssize_t bufq_slurpn(struct bufq *q, size_t max_len,
  573. Curl_bufq_reader *reader, void *reader_ctx,
  574. CURLcode *err)
  575. {
  576. ssize_t nread = 0, n;
  577. *err = CURLE_AGAIN;
  578. while(1) {
  579. n = Curl_bufq_sipn(q, max_len, reader, reader_ctx, err);
  580. if(n < 0) {
  581. if(!nread || *err != CURLE_AGAIN) {
  582. /* blocked on first read or real error, fail */
  583. nread = -1;
  584. }
  585. else
  586. *err = CURLE_OK;
  587. break;
  588. }
  589. else if(n == 0) {
  590. /* eof */
  591. *err = CURLE_OK;
  592. break;
  593. }
  594. nread += (size_t)n;
  595. if(max_len) {
  596. DEBUGASSERT((size_t)n <= max_len);
  597. max_len -= (size_t)n;
  598. if(!max_len)
  599. break;
  600. }
  601. /* give up slurping when we get less bytes than we asked for */
  602. if(q->tail && !chunk_is_full(q->tail))
  603. break;
  604. }
  605. return nread;
  606. }
  607. ssize_t Curl_bufq_slurp(struct bufq *q, Curl_bufq_reader *reader,
  608. void *reader_ctx, CURLcode *err)
  609. {
  610. return bufq_slurpn(q, 0, reader, reader_ctx, err);
  611. }