bufq.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746
  1. /***************************************************************************
  2. * _ _ ____ _
  3. * Project ___| | | | _ \| |
  4. * / __| | | | |_) | |
  5. * | (__| |_| | _ <| |___
  6. * \___|\___/|_| \_\_____|
  7. *
  8. * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
  9. *
  10. * This software is licensed as described in the file COPYING, which
  11. * you should have received as part of this distribution. The terms
  12. * are also available at https://curl.se/docs/copyright.html.
  13. *
  14. * You may opt to use, copy, modify, merge, publish, distribute and/or sell
  15. * copies of the Software, and permit persons to whom the Software is
  16. * furnished to do so, under the terms of the COPYING file.
  17. *
  18. * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
  19. * KIND, either express or implied.
  20. *
  21. * SPDX-License-Identifier: curl
  22. *
  23. ***************************************************************************/
  24. #include "curl_setup.h"
  25. #include "bufq.h"
  26. /* The last 3 #include files should be in this order */
  27. #include "curl_printf.h"
  28. #include "curl_memory.h"
  29. #include "memdebug.h"
  30. static bool chunk_is_empty(const struct buf_chunk *chunk)
  31. {
  32. return chunk->r_offset >= chunk->w_offset;
  33. }
  34. static bool chunk_is_full(const struct buf_chunk *chunk)
  35. {
  36. return chunk->w_offset >= chunk->dlen;
  37. }
  38. static size_t chunk_len(const struct buf_chunk *chunk)
  39. {
  40. return chunk->w_offset - chunk->r_offset;
  41. }
  42. static size_t chunk_space(const struct buf_chunk *chunk)
  43. {
  44. return chunk->dlen - chunk->w_offset;
  45. }
  46. static void chunk_reset(struct buf_chunk *chunk)
  47. {
  48. chunk->next = NULL;
  49. chunk->r_offset = chunk->w_offset = 0;
  50. }
  51. static size_t chunk_append(struct buf_chunk *chunk,
  52. const unsigned char *buf, size_t len)
  53. {
  54. unsigned char *p = &chunk->x.data[chunk->w_offset];
  55. size_t n = chunk->dlen - chunk->w_offset;
  56. DEBUGASSERT(chunk->dlen >= chunk->w_offset);
  57. if(n) {
  58. n = CURLMIN(n, len);
  59. memcpy(p, buf, n);
  60. chunk->w_offset += n;
  61. }
  62. return n;
  63. }
  64. static size_t chunk_read(struct buf_chunk *chunk,
  65. unsigned char *buf, size_t len)
  66. {
  67. unsigned char *p = &chunk->x.data[chunk->r_offset];
  68. size_t n = chunk->w_offset - chunk->r_offset;
  69. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  70. if(!n) {
  71. return 0;
  72. }
  73. else if(n <= len) {
  74. memcpy(buf, p, n);
  75. chunk->r_offset = chunk->w_offset = 0;
  76. return n;
  77. }
  78. else {
  79. memcpy(buf, p, len);
  80. chunk->r_offset += len;
  81. return len;
  82. }
  83. }
  84. static size_t chunk_unwrite(struct buf_chunk *chunk, size_t len)
  85. {
  86. size_t n = chunk->w_offset - chunk->r_offset;
  87. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  88. if(!n) {
  89. return 0;
  90. }
  91. else if(n <= len) {
  92. chunk->r_offset = chunk->w_offset = 0;
  93. return n;
  94. }
  95. else {
  96. chunk->w_offset -= len;
  97. return len;
  98. }
  99. }
  100. static ssize_t chunk_slurpn(struct buf_chunk *chunk, size_t max_len,
  101. Curl_bufq_reader *reader,
  102. void *reader_ctx, CURLcode *err)
  103. {
  104. unsigned char *p = &chunk->x.data[chunk->w_offset];
  105. size_t n = chunk->dlen - chunk->w_offset; /* free amount */
  106. ssize_t nread;
  107. DEBUGASSERT(chunk->dlen >= chunk->w_offset);
  108. if(!n) {
  109. *err = CURLE_AGAIN;
  110. return -1;
  111. }
  112. if(max_len && n > max_len)
  113. n = max_len;
  114. nread = reader(reader_ctx, p, n, err);
  115. if(nread > 0) {
  116. DEBUGASSERT((size_t)nread <= n);
  117. chunk->w_offset += nread;
  118. }
  119. return nread;
  120. }
  121. static void chunk_peek(const struct buf_chunk *chunk,
  122. const unsigned char **pbuf, size_t *plen)
  123. {
  124. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  125. *pbuf = &chunk->x.data[chunk->r_offset];
  126. *plen = chunk->w_offset - chunk->r_offset;
  127. }
  128. static void chunk_peek_at(const struct buf_chunk *chunk, size_t offset,
  129. const unsigned char **pbuf, size_t *plen)
  130. {
  131. offset += chunk->r_offset;
  132. DEBUGASSERT(chunk->w_offset >= offset);
  133. *pbuf = &chunk->x.data[offset];
  134. *plen = chunk->w_offset - offset;
  135. }
  136. static size_t chunk_skip(struct buf_chunk *chunk, size_t amount)
  137. {
  138. size_t n = chunk->w_offset - chunk->r_offset;
  139. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  140. if(n) {
  141. n = CURLMIN(n, amount);
  142. chunk->r_offset += n;
  143. if(chunk->r_offset == chunk->w_offset)
  144. chunk->r_offset = chunk->w_offset = 0;
  145. }
  146. return n;
  147. }
  148. static void chunk_list_free(struct buf_chunk **anchor)
  149. {
  150. struct buf_chunk *chunk;
  151. while(*anchor) {
  152. chunk = *anchor;
  153. *anchor = chunk->next;
  154. free(chunk);
  155. }
  156. }
  157. void Curl_bufcp_init(struct bufc_pool *pool,
  158. size_t chunk_size, size_t spare_max)
  159. {
  160. DEBUGASSERT(chunk_size > 0);
  161. DEBUGASSERT(spare_max > 0);
  162. memset(pool, 0, sizeof(*pool));
  163. pool->chunk_size = chunk_size;
  164. pool->spare_max = spare_max;
  165. }
  166. static CURLcode bufcp_take(struct bufc_pool *pool,
  167. struct buf_chunk **pchunk)
  168. {
  169. struct buf_chunk *chunk = NULL;
  170. if(pool->spare) {
  171. chunk = pool->spare;
  172. pool->spare = chunk->next;
  173. --pool->spare_count;
  174. chunk_reset(chunk);
  175. *pchunk = chunk;
  176. return CURLE_OK;
  177. }
  178. chunk = calloc(1, sizeof(*chunk) + pool->chunk_size);
  179. if(!chunk) {
  180. *pchunk = NULL;
  181. return CURLE_OUT_OF_MEMORY;
  182. }
  183. chunk->dlen = pool->chunk_size;
  184. *pchunk = chunk;
  185. return CURLE_OK;
  186. }
  187. static void bufcp_put(struct bufc_pool *pool,
  188. struct buf_chunk *chunk)
  189. {
  190. if(pool->spare_count >= pool->spare_max) {
  191. free(chunk);
  192. }
  193. else {
  194. chunk_reset(chunk);
  195. chunk->next = pool->spare;
  196. pool->spare = chunk;
  197. ++pool->spare_count;
  198. }
  199. }
  200. void Curl_bufcp_free(struct bufc_pool *pool)
  201. {
  202. chunk_list_free(&pool->spare);
  203. pool->spare_count = 0;
  204. }
  205. static void bufq_init(struct bufq *q, struct bufc_pool *pool,
  206. size_t chunk_size, size_t max_chunks, int opts)
  207. {
  208. DEBUGASSERT(chunk_size > 0);
  209. DEBUGASSERT(max_chunks > 0);
  210. memset(q, 0, sizeof(*q));
  211. q->chunk_size = chunk_size;
  212. q->max_chunks = max_chunks;
  213. q->pool = pool;
  214. q->opts = opts;
  215. }
  216. void Curl_bufq_init2(struct bufq *q, size_t chunk_size, size_t max_chunks,
  217. int opts)
  218. {
  219. bufq_init(q, NULL, chunk_size, max_chunks, opts);
  220. }
  221. void Curl_bufq_init(struct bufq *q, size_t chunk_size, size_t max_chunks)
  222. {
  223. bufq_init(q, NULL, chunk_size, max_chunks, BUFQ_OPT_NONE);
  224. }
  225. void Curl_bufq_initp(struct bufq *q, struct bufc_pool *pool,
  226. size_t max_chunks, int opts)
  227. {
  228. bufq_init(q, pool, pool->chunk_size, max_chunks, opts);
  229. }
  230. void Curl_bufq_free(struct bufq *q)
  231. {
  232. chunk_list_free(&q->head);
  233. chunk_list_free(&q->spare);
  234. q->tail = NULL;
  235. q->chunk_count = 0;
  236. }
  237. void Curl_bufq_reset(struct bufq *q)
  238. {
  239. struct buf_chunk *chunk;
  240. while(q->head) {
  241. chunk = q->head;
  242. q->head = chunk->next;
  243. chunk->next = q->spare;
  244. q->spare = chunk;
  245. }
  246. q->tail = NULL;
  247. }
  248. size_t Curl_bufq_len(const struct bufq *q)
  249. {
  250. const struct buf_chunk *chunk = q->head;
  251. size_t len = 0;
  252. while(chunk) {
  253. len += chunk_len(chunk);
  254. chunk = chunk->next;
  255. }
  256. return len;
  257. }
  258. size_t Curl_bufq_space(const struct bufq *q)
  259. {
  260. size_t space = 0;
  261. if(q->tail)
  262. space += chunk_space(q->tail);
  263. if(q->spare) {
  264. struct buf_chunk *chunk = q->spare;
  265. while(chunk) {
  266. space += chunk->dlen;
  267. chunk = chunk->next;
  268. }
  269. }
  270. if(q->chunk_count < q->max_chunks) {
  271. space += (q->max_chunks - q->chunk_count) * q->chunk_size;
  272. }
  273. return space;
  274. }
  275. bool Curl_bufq_is_empty(const struct bufq *q)
  276. {
  277. return !q->head || chunk_is_empty(q->head);
  278. }
  279. bool Curl_bufq_is_full(const struct bufq *q)
  280. {
  281. if(!q->tail || q->spare)
  282. return FALSE;
  283. if(q->chunk_count < q->max_chunks)
  284. return FALSE;
  285. if(q->chunk_count > q->max_chunks)
  286. return TRUE;
  287. /* we have no spares and cannot make more, is the tail full? */
  288. return chunk_is_full(q->tail);
  289. }
  290. static struct buf_chunk *get_spare(struct bufq *q)
  291. {
  292. struct buf_chunk *chunk = NULL;
  293. if(q->spare) {
  294. chunk = q->spare;
  295. q->spare = chunk->next;
  296. chunk_reset(chunk);
  297. return chunk;
  298. }
  299. if(q->chunk_count >= q->max_chunks && (!(q->opts & BUFQ_OPT_SOFT_LIMIT)))
  300. return NULL;
  301. if(q->pool) {
  302. if(bufcp_take(q->pool, &chunk))
  303. return NULL;
  304. ++q->chunk_count;
  305. return chunk;
  306. }
  307. else {
  308. chunk = calloc(1, sizeof(*chunk) + q->chunk_size);
  309. if(!chunk)
  310. return NULL;
  311. chunk->dlen = q->chunk_size;
  312. ++q->chunk_count;
  313. return chunk;
  314. }
  315. }
  316. static void prune_head(struct bufq *q)
  317. {
  318. struct buf_chunk *chunk;
  319. while(q->head && chunk_is_empty(q->head)) {
  320. chunk = q->head;
  321. q->head = chunk->next;
  322. if(q->tail == chunk)
  323. q->tail = q->head;
  324. if(q->pool) {
  325. bufcp_put(q->pool, chunk);
  326. --q->chunk_count;
  327. }
  328. else if((q->chunk_count > q->max_chunks) ||
  329. (q->opts & BUFQ_OPT_NO_SPARES)) {
  330. /* SOFT_LIMIT allowed us more than max. free spares until
  331. * we are at max again. Or free them if we are configured
  332. * to not use spares. */
  333. free(chunk);
  334. --q->chunk_count;
  335. }
  336. else {
  337. chunk->next = q->spare;
  338. q->spare = chunk;
  339. }
  340. }
  341. }
  342. static struct buf_chunk *chunk_prev(struct buf_chunk *head,
  343. struct buf_chunk *chunk)
  344. {
  345. while(head) {
  346. if(head == chunk)
  347. return NULL;
  348. if(head->next == chunk)
  349. return head;
  350. head = head->next;
  351. }
  352. return NULL;
  353. }
  354. static void prune_tail(struct bufq *q)
  355. {
  356. struct buf_chunk *chunk;
  357. while(q->tail && chunk_is_empty(q->tail)) {
  358. chunk = q->tail;
  359. q->tail = chunk_prev(q->head, chunk);
  360. if(q->tail)
  361. q->tail->next = NULL;
  362. if(q->head == chunk)
  363. q->head = q->tail;
  364. if(q->pool) {
  365. bufcp_put(q->pool, chunk);
  366. --q->chunk_count;
  367. }
  368. else if((q->chunk_count > q->max_chunks) ||
  369. (q->opts & BUFQ_OPT_NO_SPARES)) {
  370. /* SOFT_LIMIT allowed us more than max. free spares until
  371. * we are at max again. Or free them if we are configured
  372. * to not use spares. */
  373. free(chunk);
  374. --q->chunk_count;
  375. }
  376. else {
  377. chunk->next = q->spare;
  378. q->spare = chunk;
  379. }
  380. }
  381. }
  382. static struct buf_chunk *get_non_full_tail(struct bufq *q)
  383. {
  384. struct buf_chunk *chunk;
  385. if(q->tail && !chunk_is_full(q->tail))
  386. return q->tail;
  387. chunk = get_spare(q);
  388. if(chunk) {
  389. /* new tail, and possibly new head */
  390. if(q->tail) {
  391. q->tail->next = chunk;
  392. q->tail = chunk;
  393. }
  394. else {
  395. DEBUGASSERT(!q->head);
  396. q->head = q->tail = chunk;
  397. }
  398. }
  399. return chunk;
  400. }
  401. ssize_t Curl_bufq_write(struct bufq *q,
  402. const unsigned char *buf, size_t len,
  403. CURLcode *err)
  404. {
  405. struct buf_chunk *tail;
  406. ssize_t nwritten = 0;
  407. size_t n;
  408. DEBUGASSERT(q->max_chunks > 0);
  409. while(len) {
  410. tail = get_non_full_tail(q);
  411. if(!tail) {
  412. if((q->chunk_count < q->max_chunks) || (q->opts & BUFQ_OPT_SOFT_LIMIT)) {
  413. *err = CURLE_OUT_OF_MEMORY;
  414. return -1;
  415. }
  416. break;
  417. }
  418. n = chunk_append(tail, buf, len);
  419. if(!n)
  420. break;
  421. nwritten += n;
  422. buf += n;
  423. len -= n;
  424. }
  425. if(nwritten == 0 && len) {
  426. *err = CURLE_AGAIN;
  427. return -1;
  428. }
  429. *err = CURLE_OK;
  430. return nwritten;
  431. }
  432. CURLcode Curl_bufq_cwrite(struct bufq *q,
  433. const char *buf, size_t len,
  434. size_t *pnwritten)
  435. {
  436. ssize_t n;
  437. CURLcode result;
  438. n = Curl_bufq_write(q, (const unsigned char *)buf, len, &result);
  439. *pnwritten = (n < 0) ? 0 : (size_t)n;
  440. return result;
  441. }
  442. CURLcode Curl_bufq_unwrite(struct bufq *q, size_t len)
  443. {
  444. while(len && q->tail) {
  445. len -= chunk_unwrite(q->head, len);
  446. prune_tail(q);
  447. }
  448. return len ? CURLE_AGAIN : CURLE_OK;
  449. }
  450. ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len,
  451. CURLcode *err)
  452. {
  453. ssize_t nread = 0;
  454. size_t n;
  455. *err = CURLE_OK;
  456. while(len && q->head) {
  457. n = chunk_read(q->head, buf, len);
  458. if(n) {
  459. nread += n;
  460. buf += n;
  461. len -= n;
  462. }
  463. prune_head(q);
  464. }
  465. if(nread == 0) {
  466. *err = CURLE_AGAIN;
  467. return -1;
  468. }
  469. return nread;
  470. }
  471. CURLcode Curl_bufq_cread(struct bufq *q, char *buf, size_t len,
  472. size_t *pnread)
  473. {
  474. ssize_t n;
  475. CURLcode result;
  476. n = Curl_bufq_read(q, (unsigned char *)buf, len, &result);
  477. *pnread = (n < 0) ? 0 : (size_t)n;
  478. return result;
  479. }
  480. bool Curl_bufq_peek(struct bufq *q,
  481. const unsigned char **pbuf, size_t *plen)
  482. {
  483. if(q->head && chunk_is_empty(q->head)) {
  484. prune_head(q);
  485. }
  486. if(q->head && !chunk_is_empty(q->head)) {
  487. chunk_peek(q->head, pbuf, plen);
  488. return TRUE;
  489. }
  490. *pbuf = NULL;
  491. *plen = 0;
  492. return FALSE;
  493. }
  494. bool Curl_bufq_peek_at(struct bufq *q, size_t offset,
  495. const unsigned char **pbuf, size_t *plen)
  496. {
  497. struct buf_chunk *c = q->head;
  498. size_t clen;
  499. while(c) {
  500. clen = chunk_len(c);
  501. if(!clen)
  502. break;
  503. if(offset >= clen) {
  504. offset -= clen;
  505. c = c->next;
  506. continue;
  507. }
  508. chunk_peek_at(c, offset, pbuf, plen);
  509. return TRUE;
  510. }
  511. *pbuf = NULL;
  512. *plen = 0;
  513. return FALSE;
  514. }
  515. void Curl_bufq_skip(struct bufq *q, size_t amount)
  516. {
  517. size_t n;
  518. while(amount && q->head) {
  519. n = chunk_skip(q->head, amount);
  520. amount -= n;
  521. prune_head(q);
  522. }
  523. }
  524. ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer,
  525. void *writer_ctx, CURLcode *err)
  526. {
  527. const unsigned char *buf;
  528. size_t blen;
  529. ssize_t nwritten = 0;
  530. while(Curl_bufq_peek(q, &buf, &blen)) {
  531. ssize_t chunk_written;
  532. chunk_written = writer(writer_ctx, buf, blen, err);
  533. if(chunk_written < 0) {
  534. if(!nwritten || *err != CURLE_AGAIN) {
  535. /* blocked on first write or real error, fail */
  536. nwritten = -1;
  537. }
  538. break;
  539. }
  540. if(!chunk_written) {
  541. if(!nwritten) {
  542. /* treat as blocked */
  543. *err = CURLE_AGAIN;
  544. nwritten = -1;
  545. }
  546. break;
  547. }
  548. Curl_bufq_skip(q, (size_t)chunk_written);
  549. nwritten += chunk_written;
  550. }
  551. return nwritten;
  552. }
  553. ssize_t Curl_bufq_write_pass(struct bufq *q,
  554. const unsigned char *buf, size_t len,
  555. Curl_bufq_writer *writer, void *writer_ctx,
  556. CURLcode *err)
  557. {
  558. ssize_t nwritten = 0, n;
  559. *err = CURLE_OK;
  560. while(len) {
  561. if(Curl_bufq_is_full(q)) {
  562. /* try to make room in case we are full */
  563. n = Curl_bufq_pass(q, writer, writer_ctx, err);
  564. if(n < 0) {
  565. if(*err != CURLE_AGAIN) {
  566. /* real error, fail */
  567. return -1;
  568. }
  569. /* would block, bufq is full, give up */
  570. break;
  571. }
  572. }
  573. /* Add whatever is remaining now to bufq */
  574. n = Curl_bufq_write(q, buf, len, err);
  575. if(n < 0) {
  576. if(*err != CURLE_AGAIN) {
  577. /* real error, fail */
  578. return -1;
  579. }
  580. /* no room in bufq */
  581. break;
  582. }
  583. /* edge case of writer returning 0 (and len is >0)
  584. * break or we might enter an infinite loop here */
  585. if(n == 0)
  586. break;
  587. /* Maybe only part of `data` has been added, continue to loop */
  588. buf += (size_t)n;
  589. len -= (size_t)n;
  590. nwritten += (size_t)n;
  591. }
  592. if(!nwritten && len) {
  593. *err = CURLE_AGAIN;
  594. return -1;
  595. }
  596. *err = CURLE_OK;
  597. return nwritten;
  598. }
  599. ssize_t Curl_bufq_sipn(struct bufq *q, size_t max_len,
  600. Curl_bufq_reader *reader, void *reader_ctx,
  601. CURLcode *err)
  602. {
  603. struct buf_chunk *tail = NULL;
  604. ssize_t nread;
  605. *err = CURLE_AGAIN;
  606. tail = get_non_full_tail(q);
  607. if(!tail) {
  608. if(q->chunk_count < q->max_chunks) {
  609. *err = CURLE_OUT_OF_MEMORY;
  610. return -1;
  611. }
  612. /* full, blocked */
  613. *err = CURLE_AGAIN;
  614. return -1;
  615. }
  616. nread = chunk_slurpn(tail, max_len, reader, reader_ctx, err);
  617. if(nread < 0) {
  618. return -1;
  619. }
  620. else if(nread == 0) {
  621. /* eof */
  622. *err = CURLE_OK;
  623. }
  624. return nread;
  625. }
  626. /**
  627. * Read up to `max_len` bytes and append it to the end of the buffer queue.
  628. * if `max_len` is 0, no limit is imposed and the call behaves exactly
  629. * the same as `Curl_bufq_slurp()`.
  630. * Returns the total amount of buf read (may be 0) or -1 on other
  631. * reader errors.
  632. * Note that even in case of a -1 chunks may have been read and
  633. * the buffer queue will have different length than before.
  634. */
  635. static ssize_t bufq_slurpn(struct bufq *q, size_t max_len,
  636. Curl_bufq_reader *reader, void *reader_ctx,
  637. CURLcode *err)
  638. {
  639. ssize_t nread = 0, n;
  640. *err = CURLE_AGAIN;
  641. while(1) {
  642. n = Curl_bufq_sipn(q, max_len, reader, reader_ctx, err);
  643. if(n < 0) {
  644. if(!nread || *err != CURLE_AGAIN) {
  645. /* blocked on first read or real error, fail */
  646. nread = -1;
  647. }
  648. else
  649. *err = CURLE_OK;
  650. break;
  651. }
  652. else if(n == 0) {
  653. /* eof */
  654. *err = CURLE_OK;
  655. break;
  656. }
  657. nread += (size_t)n;
  658. if(max_len) {
  659. DEBUGASSERT((size_t)n <= max_len);
  660. max_len -= (size_t)n;
  661. if(!max_len)
  662. break;
  663. }
  664. /* give up slurping when we get less bytes than we asked for */
  665. if(q->tail && !chunk_is_full(q->tail))
  666. break;
  667. }
  668. return nread;
  669. }
  670. ssize_t Curl_bufq_slurp(struct bufq *q, Curl_bufq_reader *reader,
  671. void *reader_ctx, CURLcode *err)
  672. {
  673. return bufq_slurpn(q, 0, reader, reader_ctx, err);
  674. }