bufq.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. /***************************************************************************
  2. * _ _ ____ _
  3. * Project ___| | | | _ \| |
  4. * / __| | | | |_) | |
  5. * | (__| |_| | _ <| |___
  6. * \___|\___/|_| \_\_____|
  7. *
  8. * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
  9. *
  10. * This software is licensed as described in the file COPYING, which
  11. * you should have received as part of this distribution. The terms
  12. * are also available at https://curl.se/docs/copyright.html.
  13. *
  14. * You may opt to use, copy, modify, merge, publish, distribute and/or sell
  15. * copies of the Software, and permit persons to whom the Software is
  16. * furnished to do so, under the terms of the COPYING file.
  17. *
  18. * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
  19. * KIND, either express or implied.
  20. *
  21. * SPDX-License-Identifier: curl
  22. *
  23. ***************************************************************************/
  24. #include "curl_setup.h"
  25. #include "bufq.h"
  26. /* The last 3 #include files should be in this order */
  27. #include "curl_printf.h"
  28. #include "curl_memory.h"
  29. #include "memdebug.h"
  30. static bool chunk_is_empty(const struct buf_chunk *chunk)
  31. {
  32. return chunk->r_offset >= chunk->w_offset;
  33. }
  34. static bool chunk_is_full(const struct buf_chunk *chunk)
  35. {
  36. return chunk->w_offset >= chunk->dlen;
  37. }
  38. static size_t chunk_len(const struct buf_chunk *chunk)
  39. {
  40. return chunk->w_offset - chunk->r_offset;
  41. }
  42. static size_t chunk_space(const struct buf_chunk *chunk)
  43. {
  44. return chunk->dlen - chunk->w_offset;
  45. }
  46. static void chunk_reset(struct buf_chunk *chunk)
  47. {
  48. chunk->next = NULL;
  49. chunk->r_offset = chunk->w_offset = 0;
  50. }
  51. static size_t chunk_append(struct buf_chunk *chunk,
  52. const unsigned char *buf, size_t len)
  53. {
  54. unsigned char *p = &chunk->x.data[chunk->w_offset];
  55. size_t n = chunk->dlen - chunk->w_offset;
  56. DEBUGASSERT(chunk->dlen >= chunk->w_offset);
  57. if(n) {
  58. n = CURLMIN(n, len);
  59. memcpy(p, buf, n);
  60. chunk->w_offset += n;
  61. }
  62. return n;
  63. }
  64. static size_t chunk_read(struct buf_chunk *chunk,
  65. unsigned char *buf, size_t len)
  66. {
  67. unsigned char *p = &chunk->x.data[chunk->r_offset];
  68. size_t n = chunk->w_offset - chunk->r_offset;
  69. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  70. if(!n) {
  71. return 0;
  72. }
  73. else if(n <= len) {
  74. memcpy(buf, p, n);
  75. chunk->r_offset = chunk->w_offset = 0;
  76. return n;
  77. }
  78. else {
  79. memcpy(buf, p, len);
  80. chunk->r_offset += len;
  81. return len;
  82. }
  83. }
  84. static ssize_t chunk_slurpn(struct buf_chunk *chunk, size_t max_len,
  85. Curl_bufq_reader *reader,
  86. void *reader_ctx, CURLcode *err)
  87. {
  88. unsigned char *p = &chunk->x.data[chunk->w_offset];
  89. size_t n = chunk->dlen - chunk->w_offset; /* free amount */
  90. ssize_t nread;
  91. DEBUGASSERT(chunk->dlen >= chunk->w_offset);
  92. if(!n) {
  93. *err = CURLE_AGAIN;
  94. return -1;
  95. }
  96. if(max_len && n > max_len)
  97. n = max_len;
  98. nread = reader(reader_ctx, p, n, err);
  99. if(nread > 0) {
  100. DEBUGASSERT((size_t)nread <= n);
  101. chunk->w_offset += nread;
  102. }
  103. return nread;
  104. }
  105. static void chunk_peek(const struct buf_chunk *chunk,
  106. const unsigned char **pbuf, size_t *plen)
  107. {
  108. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  109. *pbuf = &chunk->x.data[chunk->r_offset];
  110. *plen = chunk->w_offset - chunk->r_offset;
  111. }
  112. static void chunk_peek_at(const struct buf_chunk *chunk, size_t offset,
  113. const unsigned char **pbuf, size_t *plen)
  114. {
  115. offset += chunk->r_offset;
  116. DEBUGASSERT(chunk->w_offset >= offset);
  117. *pbuf = &chunk->x.data[offset];
  118. *plen = chunk->w_offset - offset;
  119. }
  120. static size_t chunk_skip(struct buf_chunk *chunk, size_t amount)
  121. {
  122. size_t n = chunk->w_offset - chunk->r_offset;
  123. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  124. if(n) {
  125. n = CURLMIN(n, amount);
  126. chunk->r_offset += n;
  127. if(chunk->r_offset == chunk->w_offset)
  128. chunk->r_offset = chunk->w_offset = 0;
  129. }
  130. return n;
  131. }
  132. static void chunk_list_free(struct buf_chunk **anchor)
  133. {
  134. struct buf_chunk *chunk;
  135. while(*anchor) {
  136. chunk = *anchor;
  137. *anchor = chunk->next;
  138. free(chunk);
  139. }
  140. }
  141. void Curl_bufcp_init(struct bufc_pool *pool,
  142. size_t chunk_size, size_t spare_max)
  143. {
  144. DEBUGASSERT(chunk_size > 0);
  145. DEBUGASSERT(spare_max > 0);
  146. memset(pool, 0, sizeof(*pool));
  147. pool->chunk_size = chunk_size;
  148. pool->spare_max = spare_max;
  149. }
  150. static CURLcode bufcp_take(struct bufc_pool *pool,
  151. struct buf_chunk **pchunk)
  152. {
  153. struct buf_chunk *chunk = NULL;
  154. if(pool->spare) {
  155. chunk = pool->spare;
  156. pool->spare = chunk->next;
  157. --pool->spare_count;
  158. chunk_reset(chunk);
  159. *pchunk = chunk;
  160. return CURLE_OK;
  161. }
  162. chunk = calloc(1, sizeof(*chunk) + pool->chunk_size);
  163. if(!chunk) {
  164. *pchunk = NULL;
  165. return CURLE_OUT_OF_MEMORY;
  166. }
  167. chunk->dlen = pool->chunk_size;
  168. *pchunk = chunk;
  169. return CURLE_OK;
  170. }
  171. static void bufcp_put(struct bufc_pool *pool,
  172. struct buf_chunk *chunk)
  173. {
  174. if(pool->spare_count >= pool->spare_max) {
  175. free(chunk);
  176. }
  177. else {
  178. chunk_reset(chunk);
  179. chunk->next = pool->spare;
  180. pool->spare = chunk;
  181. ++pool->spare_count;
  182. }
  183. }
  184. void Curl_bufcp_free(struct bufc_pool *pool)
  185. {
  186. chunk_list_free(&pool->spare);
  187. pool->spare_count = 0;
  188. }
  189. static void bufq_init(struct bufq *q, struct bufc_pool *pool,
  190. size_t chunk_size, size_t max_chunks, int opts)
  191. {
  192. DEBUGASSERT(chunk_size > 0);
  193. DEBUGASSERT(max_chunks > 0);
  194. memset(q, 0, sizeof(*q));
  195. q->chunk_size = chunk_size;
  196. q->max_chunks = max_chunks;
  197. q->pool = pool;
  198. q->opts = opts;
  199. }
  200. void Curl_bufq_init2(struct bufq *q, size_t chunk_size, size_t max_chunks,
  201. int opts)
  202. {
  203. bufq_init(q, NULL, chunk_size, max_chunks, opts);
  204. }
  205. void Curl_bufq_init(struct bufq *q, size_t chunk_size, size_t max_chunks)
  206. {
  207. bufq_init(q, NULL, chunk_size, max_chunks, BUFQ_OPT_NONE);
  208. }
  209. void Curl_bufq_initp(struct bufq *q, struct bufc_pool *pool,
  210. size_t max_chunks, int opts)
  211. {
  212. bufq_init(q, pool, pool->chunk_size, max_chunks, opts);
  213. }
  214. void Curl_bufq_free(struct bufq *q)
  215. {
  216. chunk_list_free(&q->head);
  217. chunk_list_free(&q->spare);
  218. q->tail = NULL;
  219. q->chunk_count = 0;
  220. }
  221. void Curl_bufq_reset(struct bufq *q)
  222. {
  223. struct buf_chunk *chunk;
  224. while(q->head) {
  225. chunk = q->head;
  226. q->head = chunk->next;
  227. chunk->next = q->spare;
  228. q->spare = chunk;
  229. }
  230. q->tail = NULL;
  231. }
  232. size_t Curl_bufq_len(const struct bufq *q)
  233. {
  234. const struct buf_chunk *chunk = q->head;
  235. size_t len = 0;
  236. while(chunk) {
  237. len += chunk_len(chunk);
  238. chunk = chunk->next;
  239. }
  240. return len;
  241. }
  242. size_t Curl_bufq_space(const struct bufq *q)
  243. {
  244. size_t space = 0;
  245. if(q->tail)
  246. space += chunk_space(q->tail);
  247. if(q->spare) {
  248. struct buf_chunk *chunk = q->spare;
  249. while(chunk) {
  250. space += chunk->dlen;
  251. chunk = chunk->next;
  252. }
  253. }
  254. if(q->chunk_count < q->max_chunks) {
  255. space += (q->max_chunks - q->chunk_count) * q->chunk_size;
  256. }
  257. return space;
  258. }
  259. bool Curl_bufq_is_empty(const struct bufq *q)
  260. {
  261. return !q->head || chunk_is_empty(q->head);
  262. }
  263. bool Curl_bufq_is_full(const struct bufq *q)
  264. {
  265. if(!q->tail || q->spare)
  266. return FALSE;
  267. if(q->chunk_count < q->max_chunks)
  268. return FALSE;
  269. if(q->chunk_count > q->max_chunks)
  270. return TRUE;
  271. /* we have no spares and cannot make more, is the tail full? */
  272. return chunk_is_full(q->tail);
  273. }
  274. static struct buf_chunk *get_spare(struct bufq *q)
  275. {
  276. struct buf_chunk *chunk = NULL;
  277. if(q->spare) {
  278. chunk = q->spare;
  279. q->spare = chunk->next;
  280. chunk_reset(chunk);
  281. return chunk;
  282. }
  283. if(q->chunk_count >= q->max_chunks && (!(q->opts & BUFQ_OPT_SOFT_LIMIT)))
  284. return NULL;
  285. if(q->pool) {
  286. if(bufcp_take(q->pool, &chunk))
  287. return NULL;
  288. ++q->chunk_count;
  289. return chunk;
  290. }
  291. else {
  292. chunk = calloc(1, sizeof(*chunk) + q->chunk_size);
  293. if(!chunk)
  294. return NULL;
  295. chunk->dlen = q->chunk_size;
  296. ++q->chunk_count;
  297. return chunk;
  298. }
  299. }
  300. static void prune_head(struct bufq *q)
  301. {
  302. struct buf_chunk *chunk;
  303. while(q->head && chunk_is_empty(q->head)) {
  304. chunk = q->head;
  305. q->head = chunk->next;
  306. if(q->tail == chunk)
  307. q->tail = q->head;
  308. if(q->pool) {
  309. bufcp_put(q->pool, chunk);
  310. --q->chunk_count;
  311. }
  312. else if((q->chunk_count > q->max_chunks) ||
  313. (q->opts & BUFQ_OPT_NO_SPARES)) {
  314. /* SOFT_LIMIT allowed us more than max. free spares until
  315. * we are at max again. Or free them if we are configured
  316. * to not use spares. */
  317. free(chunk);
  318. --q->chunk_count;
  319. }
  320. else {
  321. chunk->next = q->spare;
  322. q->spare = chunk;
  323. }
  324. }
  325. }
  326. static struct buf_chunk *get_non_full_tail(struct bufq *q)
  327. {
  328. struct buf_chunk *chunk;
  329. if(q->tail && !chunk_is_full(q->tail))
  330. return q->tail;
  331. chunk = get_spare(q);
  332. if(chunk) {
  333. /* new tail, and possibly new head */
  334. if(q->tail) {
  335. q->tail->next = chunk;
  336. q->tail = chunk;
  337. }
  338. else {
  339. DEBUGASSERT(!q->head);
  340. q->head = q->tail = chunk;
  341. }
  342. }
  343. return chunk;
  344. }
  345. ssize_t Curl_bufq_write(struct bufq *q,
  346. const unsigned char *buf, size_t len,
  347. CURLcode *err)
  348. {
  349. struct buf_chunk *tail;
  350. ssize_t nwritten = 0;
  351. size_t n;
  352. DEBUGASSERT(q->max_chunks > 0);
  353. while(len) {
  354. tail = get_non_full_tail(q);
  355. if(!tail) {
  356. if((q->chunk_count < q->max_chunks) || (q->opts & BUFQ_OPT_SOFT_LIMIT)) {
  357. *err = CURLE_OUT_OF_MEMORY;
  358. return -1;
  359. }
  360. break;
  361. }
  362. n = chunk_append(tail, buf, len);
  363. if(!n)
  364. break;
  365. nwritten += n;
  366. buf += n;
  367. len -= n;
  368. }
  369. if(nwritten == 0 && len) {
  370. *err = CURLE_AGAIN;
  371. return -1;
  372. }
  373. *err = CURLE_OK;
  374. return nwritten;
  375. }
  376. CURLcode Curl_bufq_cwrite(struct bufq *q,
  377. const char *buf, size_t len,
  378. size_t *pnwritten)
  379. {
  380. ssize_t n;
  381. CURLcode result;
  382. n = Curl_bufq_write(q, (const unsigned char *)buf, len, &result);
  383. *pnwritten = (n < 0)? 0 : (size_t)n;
  384. return result;
  385. }
  386. ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len,
  387. CURLcode *err)
  388. {
  389. ssize_t nread = 0;
  390. size_t n;
  391. *err = CURLE_OK;
  392. while(len && q->head) {
  393. n = chunk_read(q->head, buf, len);
  394. if(n) {
  395. nread += n;
  396. buf += n;
  397. len -= n;
  398. }
  399. prune_head(q);
  400. }
  401. if(nread == 0) {
  402. *err = CURLE_AGAIN;
  403. return -1;
  404. }
  405. return nread;
  406. }
  407. CURLcode Curl_bufq_cread(struct bufq *q, char *buf, size_t len,
  408. size_t *pnread)
  409. {
  410. ssize_t n;
  411. CURLcode result;
  412. n = Curl_bufq_read(q, (unsigned char *)buf, len, &result);
  413. *pnread = (n < 0)? 0 : (size_t)n;
  414. return result;
  415. }
  416. bool Curl_bufq_peek(struct bufq *q,
  417. const unsigned char **pbuf, size_t *plen)
  418. {
  419. if(q->head && chunk_is_empty(q->head)) {
  420. prune_head(q);
  421. }
  422. if(q->head && !chunk_is_empty(q->head)) {
  423. chunk_peek(q->head, pbuf, plen);
  424. return TRUE;
  425. }
  426. *pbuf = NULL;
  427. *plen = 0;
  428. return FALSE;
  429. }
  430. bool Curl_bufq_peek_at(struct bufq *q, size_t offset,
  431. const unsigned char **pbuf, size_t *plen)
  432. {
  433. struct buf_chunk *c = q->head;
  434. size_t clen;
  435. while(c) {
  436. clen = chunk_len(c);
  437. if(!clen)
  438. break;
  439. if(offset >= clen) {
  440. offset -= clen;
  441. c = c->next;
  442. continue;
  443. }
  444. chunk_peek_at(c, offset, pbuf, plen);
  445. return TRUE;
  446. }
  447. *pbuf = NULL;
  448. *plen = 0;
  449. return FALSE;
  450. }
  451. void Curl_bufq_skip(struct bufq *q, size_t amount)
  452. {
  453. size_t n;
  454. while(amount && q->head) {
  455. n = chunk_skip(q->head, amount);
  456. amount -= n;
  457. prune_head(q);
  458. }
  459. }
  460. ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer,
  461. void *writer_ctx, CURLcode *err)
  462. {
  463. const unsigned char *buf;
  464. size_t blen;
  465. ssize_t nwritten = 0;
  466. while(Curl_bufq_peek(q, &buf, &blen)) {
  467. ssize_t chunk_written;
  468. chunk_written = writer(writer_ctx, buf, blen, err);
  469. if(chunk_written < 0) {
  470. if(!nwritten || *err != CURLE_AGAIN) {
  471. /* blocked on first write or real error, fail */
  472. nwritten = -1;
  473. }
  474. break;
  475. }
  476. if(!chunk_written) {
  477. if(!nwritten) {
  478. /* treat as blocked */
  479. *err = CURLE_AGAIN;
  480. nwritten = -1;
  481. }
  482. break;
  483. }
  484. Curl_bufq_skip(q, (size_t)chunk_written);
  485. nwritten += chunk_written;
  486. }
  487. return nwritten;
  488. }
  489. ssize_t Curl_bufq_write_pass(struct bufq *q,
  490. const unsigned char *buf, size_t len,
  491. Curl_bufq_writer *writer, void *writer_ctx,
  492. CURLcode *err)
  493. {
  494. ssize_t nwritten = 0, n;
  495. *err = CURLE_OK;
  496. while(len) {
  497. if(Curl_bufq_is_full(q)) {
  498. /* try to make room in case we are full */
  499. n = Curl_bufq_pass(q, writer, writer_ctx, err);
  500. if(n < 0) {
  501. if(*err != CURLE_AGAIN) {
  502. /* real error, fail */
  503. return -1;
  504. }
  505. /* would block, bufq is full, give up */
  506. break;
  507. }
  508. }
  509. /* Add whatever is remaining now to bufq */
  510. n = Curl_bufq_write(q, buf, len, err);
  511. if(n < 0) {
  512. if(*err != CURLE_AGAIN) {
  513. /* real error, fail */
  514. return -1;
  515. }
  516. /* no room in bufq */
  517. break;
  518. }
  519. /* edge case of writer returning 0 (and len is >0)
  520. * break or we might enter an infinite loop here */
  521. if(n == 0)
  522. break;
  523. /* Maybe only part of `data` has been added, continue to loop */
  524. buf += (size_t)n;
  525. len -= (size_t)n;
  526. nwritten += (size_t)n;
  527. }
  528. if(!nwritten && len) {
  529. *err = CURLE_AGAIN;
  530. return -1;
  531. }
  532. *err = CURLE_OK;
  533. return nwritten;
  534. }
  535. ssize_t Curl_bufq_sipn(struct bufq *q, size_t max_len,
  536. Curl_bufq_reader *reader, void *reader_ctx,
  537. CURLcode *err)
  538. {
  539. struct buf_chunk *tail = NULL;
  540. ssize_t nread;
  541. *err = CURLE_AGAIN;
  542. tail = get_non_full_tail(q);
  543. if(!tail) {
  544. if(q->chunk_count < q->max_chunks) {
  545. *err = CURLE_OUT_OF_MEMORY;
  546. return -1;
  547. }
  548. /* full, blocked */
  549. *err = CURLE_AGAIN;
  550. return -1;
  551. }
  552. nread = chunk_slurpn(tail, max_len, reader, reader_ctx, err);
  553. if(nread < 0) {
  554. return -1;
  555. }
  556. else if(nread == 0) {
  557. /* eof */
  558. *err = CURLE_OK;
  559. }
  560. return nread;
  561. }
  562. /**
  563. * Read up to `max_len` bytes and append it to the end of the buffer queue.
  564. * if `max_len` is 0, no limit is imposed and the call behaves exactly
  565. * the same as `Curl_bufq_slurp()`.
  566. * Returns the total amount of buf read (may be 0) or -1 on other
  567. * reader errors.
  568. * Note that even in case of a -1 chunks may have been read and
  569. * the buffer queue will have different length than before.
  570. */
  571. static ssize_t bufq_slurpn(struct bufq *q, size_t max_len,
  572. Curl_bufq_reader *reader, void *reader_ctx,
  573. CURLcode *err)
  574. {
  575. ssize_t nread = 0, n;
  576. *err = CURLE_AGAIN;
  577. while(1) {
  578. n = Curl_bufq_sipn(q, max_len, reader, reader_ctx, err);
  579. if(n < 0) {
  580. if(!nread || *err != CURLE_AGAIN) {
  581. /* blocked on first read or real error, fail */
  582. nread = -1;
  583. }
  584. else
  585. *err = CURLE_OK;
  586. break;
  587. }
  588. else if(n == 0) {
  589. /* eof */
  590. *err = CURLE_OK;
  591. break;
  592. }
  593. nread += (size_t)n;
  594. if(max_len) {
  595. DEBUGASSERT((size_t)n <= max_len);
  596. max_len -= (size_t)n;
  597. if(!max_len)
  598. break;
  599. }
  600. /* give up slurping when we get less bytes than we asked for */
  601. if(q->tail && !chunk_is_full(q->tail))
  602. break;
  603. }
  604. return nread;
  605. }
  606. ssize_t Curl_bufq_slurp(struct bufq *q, Curl_bufq_reader *reader,
  607. void *reader_ctx, CURLcode *err)
  608. {
  609. return bufq_slurpn(q, 0, reader, reader_ctx, err);
  610. }