bufq.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. /***************************************************************************
  2. * _ _ ____ _
  3. * Project ___| | | | _ \| |
  4. * / __| | | | |_) | |
  5. * | (__| |_| | _ <| |___
  6. * \___|\___/|_| \_\_____|
  7. *
  8. * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
  9. *
  10. * This software is licensed as described in the file COPYING, which
  11. * you should have received as part of this distribution. The terms
  12. * are also available at https://curl.se/docs/copyright.html.
  13. *
  14. * You may opt to use, copy, modify, merge, publish, distribute and/or sell
  15. * copies of the Software, and permit persons to whom the Software is
  16. * furnished to do so, under the terms of the COPYING file.
  17. *
  18. * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
  19. * KIND, either express or implied.
  20. *
  21. * SPDX-License-Identifier: curl
  22. *
  23. ***************************************************************************/
  24. #include "curl_setup.h"
  25. #include "bufq.h"
  26. /* The last 3 #include files should be in this order */
  27. #include "curl_printf.h"
  28. #include "curl_memory.h"
  29. #include "memdebug.h"
  30. static bool chunk_is_empty(const struct buf_chunk *chunk)
  31. {
  32. return chunk->r_offset >= chunk->w_offset;
  33. }
  34. static bool chunk_is_full(const struct buf_chunk *chunk)
  35. {
  36. return chunk->w_offset >= chunk->dlen;
  37. }
  38. static size_t chunk_len(const struct buf_chunk *chunk)
  39. {
  40. return chunk->w_offset - chunk->r_offset;
  41. }
  42. static size_t chunk_space(const struct buf_chunk *chunk)
  43. {
  44. return chunk->dlen - chunk->w_offset;
  45. }
  46. static void chunk_reset(struct buf_chunk *chunk)
  47. {
  48. chunk->next = NULL;
  49. chunk->r_offset = chunk->w_offset = 0;
  50. }
  51. static size_t chunk_append(struct buf_chunk *chunk,
  52. const unsigned char *buf, size_t len)
  53. {
  54. unsigned char *p = &chunk->x.data[chunk->w_offset];
  55. size_t n = chunk->dlen - chunk->w_offset;
  56. DEBUGASSERT(chunk->dlen >= chunk->w_offset);
  57. if(n) {
  58. n = CURLMIN(n, len);
  59. memcpy(p, buf, n);
  60. chunk->w_offset += n;
  61. }
  62. return n;
  63. }
  64. static size_t chunk_read(struct buf_chunk *chunk,
  65. unsigned char *buf, size_t len)
  66. {
  67. unsigned char *p = &chunk->x.data[chunk->r_offset];
  68. size_t n = chunk->w_offset - chunk->r_offset;
  69. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  70. if(!n) {
  71. return 0;
  72. }
  73. else if(n <= len) {
  74. memcpy(buf, p, n);
  75. chunk->r_offset = chunk->w_offset = 0;
  76. return n;
  77. }
  78. else {
  79. memcpy(buf, p, len);
  80. chunk->r_offset += len;
  81. return len;
  82. }
  83. }
  84. static ssize_t chunk_slurpn(struct buf_chunk *chunk, size_t max_len,
  85. Curl_bufq_reader *reader,
  86. void *reader_ctx, CURLcode *err)
  87. {
  88. unsigned char *p = &chunk->x.data[chunk->w_offset];
  89. size_t n = chunk->dlen - chunk->w_offset; /* free amount */
  90. ssize_t nread;
  91. DEBUGASSERT(chunk->dlen >= chunk->w_offset);
  92. if(!n) {
  93. *err = CURLE_AGAIN;
  94. return -1;
  95. }
  96. if(max_len && n > max_len)
  97. n = max_len;
  98. nread = reader(reader_ctx, p, n, err);
  99. if(nread > 0) {
  100. DEBUGASSERT((size_t)nread <= n);
  101. chunk->w_offset += nread;
  102. }
  103. return nread;
  104. }
  105. static void chunk_peek(const struct buf_chunk *chunk,
  106. const unsigned char **pbuf, size_t *plen)
  107. {
  108. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  109. *pbuf = &chunk->x.data[chunk->r_offset];
  110. *plen = chunk->w_offset - chunk->r_offset;
  111. }
  112. static void chunk_peek_at(const struct buf_chunk *chunk, size_t offset,
  113. const unsigned char **pbuf, size_t *plen)
  114. {
  115. offset += chunk->r_offset;
  116. DEBUGASSERT(chunk->w_offset >= offset);
  117. *pbuf = &chunk->x.data[offset];
  118. *plen = chunk->w_offset - offset;
  119. }
  120. static size_t chunk_skip(struct buf_chunk *chunk, size_t amount)
  121. {
  122. size_t n = chunk->w_offset - chunk->r_offset;
  123. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  124. if(n) {
  125. n = CURLMIN(n, amount);
  126. chunk->r_offset += n;
  127. }
  128. return n;
  129. }
  130. static void chunk_list_free(struct buf_chunk **anchor)
  131. {
  132. struct buf_chunk *chunk;
  133. while(*anchor) {
  134. chunk = *anchor;
  135. *anchor = chunk->next;
  136. free(chunk);
  137. }
  138. }
  139. void Curl_bufcp_init(struct bufc_pool *pool,
  140. size_t chunk_size, size_t spare_max)
  141. {
  142. DEBUGASSERT(chunk_size > 0);
  143. DEBUGASSERT(spare_max > 0);
  144. memset(pool, 0, sizeof(*pool));
  145. pool->chunk_size = chunk_size;
  146. pool->spare_max = spare_max;
  147. }
  148. CURLcode Curl_bufcp_take(struct bufc_pool *pool,
  149. struct buf_chunk **pchunk)
  150. {
  151. struct buf_chunk *chunk = NULL;
  152. if(pool->spare) {
  153. chunk = pool->spare;
  154. pool->spare = chunk->next;
  155. --pool->spare_count;
  156. chunk_reset(chunk);
  157. *pchunk = chunk;
  158. return CURLE_OK;
  159. }
  160. chunk = calloc(1, sizeof(*chunk) + pool->chunk_size);
  161. if(!chunk) {
  162. *pchunk = NULL;
  163. return CURLE_OUT_OF_MEMORY;
  164. }
  165. chunk->dlen = pool->chunk_size;
  166. *pchunk = chunk;
  167. return CURLE_OK;
  168. }
  169. void Curl_bufcp_put(struct bufc_pool *pool,
  170. struct buf_chunk *chunk)
  171. {
  172. if(pool->spare_count >= pool->spare_max) {
  173. free(chunk);
  174. }
  175. else {
  176. chunk_reset(chunk);
  177. chunk->next = pool->spare;
  178. pool->spare = chunk;
  179. ++pool->spare_count;
  180. }
  181. }
  182. void Curl_bufcp_free(struct bufc_pool *pool)
  183. {
  184. chunk_list_free(&pool->spare);
  185. pool->spare_count = 0;
  186. }
  187. static void bufq_init(struct bufq *q, struct bufc_pool *pool,
  188. size_t chunk_size, size_t max_chunks, int opts)
  189. {
  190. DEBUGASSERT(chunk_size > 0);
  191. DEBUGASSERT(max_chunks > 0);
  192. memset(q, 0, sizeof(*q));
  193. q->chunk_size = chunk_size;
  194. q->max_chunks = max_chunks;
  195. q->pool = pool;
  196. q->opts = opts;
  197. }
  198. void Curl_bufq_init2(struct bufq *q, size_t chunk_size, size_t max_chunks,
  199. int opts)
  200. {
  201. bufq_init(q, NULL, chunk_size, max_chunks, opts);
  202. }
  203. void Curl_bufq_init(struct bufq *q, size_t chunk_size, size_t max_chunks)
  204. {
  205. bufq_init(q, NULL, chunk_size, max_chunks, BUFQ_OPT_NONE);
  206. }
  207. void Curl_bufq_initp(struct bufq *q, struct bufc_pool *pool,
  208. size_t max_chunks, int opts)
  209. {
  210. bufq_init(q, pool, pool->chunk_size, max_chunks, opts);
  211. }
  212. void Curl_bufq_free(struct bufq *q)
  213. {
  214. chunk_list_free(&q->head);
  215. chunk_list_free(&q->spare);
  216. q->tail = NULL;
  217. q->chunk_count = 0;
  218. }
  219. void Curl_bufq_reset(struct bufq *q)
  220. {
  221. struct buf_chunk *chunk;
  222. while(q->head) {
  223. chunk = q->head;
  224. q->head = chunk->next;
  225. chunk->next = q->spare;
  226. q->spare = chunk;
  227. }
  228. q->tail = NULL;
  229. }
  230. size_t Curl_bufq_len(const struct bufq *q)
  231. {
  232. const struct buf_chunk *chunk = q->head;
  233. size_t len = 0;
  234. while(chunk) {
  235. len += chunk_len(chunk);
  236. chunk = chunk->next;
  237. }
  238. return len;
  239. }
  240. size_t Curl_bufq_space(const struct bufq *q)
  241. {
  242. size_t space = 0;
  243. if(q->tail)
  244. space += chunk_space(q->tail);
  245. if(q->chunk_count < q->max_chunks) {
  246. space += (q->max_chunks - q->chunk_count) * q->chunk_size;
  247. }
  248. return space;
  249. }
  250. bool Curl_bufq_is_empty(const struct bufq *q)
  251. {
  252. return !q->head || chunk_is_empty(q->head);
  253. }
  254. bool Curl_bufq_is_full(const struct bufq *q)
  255. {
  256. if(!q->tail || q->spare)
  257. return FALSE;
  258. if(q->chunk_count < q->max_chunks)
  259. return FALSE;
  260. if(q->chunk_count > q->max_chunks)
  261. return TRUE;
  262. /* we have no spares and cannot make more, is the tail full? */
  263. return chunk_is_full(q->tail);
  264. }
  265. static struct buf_chunk *get_spare(struct bufq *q)
  266. {
  267. struct buf_chunk *chunk = NULL;
  268. if(q->spare) {
  269. chunk = q->spare;
  270. q->spare = chunk->next;
  271. chunk_reset(chunk);
  272. return chunk;
  273. }
  274. if(q->chunk_count >= q->max_chunks && (!(q->opts & BUFQ_OPT_SOFT_LIMIT)))
  275. return NULL;
  276. if(q->pool) {
  277. if(Curl_bufcp_take(q->pool, &chunk))
  278. return NULL;
  279. ++q->chunk_count;
  280. return chunk;
  281. }
  282. else {
  283. chunk = calloc(1, sizeof(*chunk) + q->chunk_size);
  284. if(!chunk)
  285. return NULL;
  286. chunk->dlen = q->chunk_size;
  287. ++q->chunk_count;
  288. return chunk;
  289. }
  290. }
  291. static void prune_head(struct bufq *q)
  292. {
  293. struct buf_chunk *chunk;
  294. while(q->head && chunk_is_empty(q->head)) {
  295. chunk = q->head;
  296. q->head = chunk->next;
  297. if(q->tail == chunk)
  298. q->tail = q->head;
  299. if(q->pool) {
  300. Curl_bufcp_put(q->pool, chunk);
  301. --q->chunk_count;
  302. }
  303. else if((q->chunk_count > q->max_chunks) ||
  304. (q->opts & BUFQ_OPT_NO_SPARES)) {
  305. /* SOFT_LIMIT allowed us more than max. free spares until
  306. * we are at max again. Or free them if we are configured
  307. * to not use spares. */
  308. free(chunk);
  309. --q->chunk_count;
  310. }
  311. else {
  312. chunk->next = q->spare;
  313. q->spare = chunk;
  314. }
  315. }
  316. }
  317. static struct buf_chunk *get_non_full_tail(struct bufq *q)
  318. {
  319. struct buf_chunk *chunk;
  320. if(q->tail && !chunk_is_full(q->tail))
  321. return q->tail;
  322. chunk = get_spare(q);
  323. if(chunk) {
  324. /* new tail, and possibly new head */
  325. if(q->tail) {
  326. q->tail->next = chunk;
  327. q->tail = chunk;
  328. }
  329. else {
  330. DEBUGASSERT(!q->head);
  331. q->head = q->tail = chunk;
  332. }
  333. }
  334. return chunk;
  335. }
  336. ssize_t Curl_bufq_write(struct bufq *q,
  337. const unsigned char *buf, size_t len,
  338. CURLcode *err)
  339. {
  340. struct buf_chunk *tail;
  341. ssize_t nwritten = 0;
  342. size_t n;
  343. DEBUGASSERT(q->max_chunks > 0);
  344. while(len) {
  345. tail = get_non_full_tail(q);
  346. if(!tail) {
  347. if(q->chunk_count < q->max_chunks) {
  348. *err = CURLE_OUT_OF_MEMORY;
  349. return -1;
  350. }
  351. break;
  352. }
  353. n = chunk_append(tail, buf, len);
  354. DEBUGASSERT(n);
  355. nwritten += n;
  356. buf += n;
  357. len -= n;
  358. }
  359. if(nwritten == 0 && len) {
  360. *err = CURLE_AGAIN;
  361. return -1;
  362. }
  363. *err = CURLE_OK;
  364. return nwritten;
  365. }
  366. ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len,
  367. CURLcode *err)
  368. {
  369. ssize_t nread = 0;
  370. size_t n;
  371. *err = CURLE_OK;
  372. while(len && q->head) {
  373. n = chunk_read(q->head, buf, len);
  374. if(n) {
  375. nread += n;
  376. buf += n;
  377. len -= n;
  378. }
  379. prune_head(q);
  380. }
  381. if(nread == 0) {
  382. *err = CURLE_AGAIN;
  383. return -1;
  384. }
  385. return nread;
  386. }
  387. bool Curl_bufq_peek(struct bufq *q,
  388. const unsigned char **pbuf, size_t *plen)
  389. {
  390. if(q->head && chunk_is_empty(q->head)) {
  391. prune_head(q);
  392. }
  393. if(q->head && !chunk_is_empty(q->head)) {
  394. chunk_peek(q->head, pbuf, plen);
  395. return TRUE;
  396. }
  397. *pbuf = NULL;
  398. *plen = 0;
  399. return FALSE;
  400. }
  401. bool Curl_bufq_peek_at(struct bufq *q, size_t offset,
  402. const unsigned char **pbuf, size_t *plen)
  403. {
  404. struct buf_chunk *c = q->head;
  405. size_t clen;
  406. while(c) {
  407. clen = chunk_len(c);
  408. if(!clen)
  409. break;
  410. if(offset >= clen) {
  411. offset -= clen;
  412. c = c->next;
  413. continue;
  414. }
  415. chunk_peek_at(c, offset, pbuf, plen);
  416. return TRUE;
  417. }
  418. *pbuf = NULL;
  419. *plen = 0;
  420. return FALSE;
  421. }
  422. void Curl_bufq_skip(struct bufq *q, size_t amount)
  423. {
  424. size_t n;
  425. while(amount && q->head) {
  426. n = chunk_skip(q->head, amount);
  427. amount -= n;
  428. prune_head(q);
  429. }
  430. }
  431. ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer,
  432. void *writer_ctx, CURLcode *err)
  433. {
  434. const unsigned char *buf;
  435. size_t blen;
  436. ssize_t nwritten = 0;
  437. while(Curl_bufq_peek(q, &buf, &blen)) {
  438. ssize_t chunk_written;
  439. chunk_written = writer(writer_ctx, buf, blen, err);
  440. if(chunk_written < 0) {
  441. if(!nwritten || *err != CURLE_AGAIN) {
  442. /* blocked on first write or real error, fail */
  443. nwritten = -1;
  444. }
  445. break;
  446. }
  447. Curl_bufq_skip(q, (size_t)chunk_written);
  448. nwritten += chunk_written;
  449. }
  450. return nwritten;
  451. }
  452. ssize_t Curl_bufq_write_pass(struct bufq *q,
  453. const unsigned char *buf, size_t len,
  454. Curl_bufq_writer *writer, void *writer_ctx,
  455. CURLcode *err)
  456. {
  457. ssize_t nwritten = 0, n;
  458. *err = CURLE_OK;
  459. while(len) {
  460. if(Curl_bufq_is_full(q)) {
  461. /* try to make room in case we are full */
  462. n = Curl_bufq_pass(q, writer, writer_ctx, err);
  463. if(n < 0) {
  464. if(*err != CURLE_AGAIN) {
  465. /* real error, fail */
  466. return -1;
  467. }
  468. /* would block */
  469. }
  470. }
  471. if(len) {
  472. /* Add whatever is remaining now to bufq */
  473. n = Curl_bufq_write(q, buf, len, err);
  474. if(n < 0) {
  475. if(*err != CURLE_AGAIN) {
  476. /* real error, fail */
  477. return -1;
  478. }
  479. /* no room in bufq, bail out */
  480. goto out;
  481. }
  482. /* Maybe only part of `data` has been added, continue to loop */
  483. buf += (size_t)n;
  484. len -= (size_t)n;
  485. nwritten += (size_t)n;
  486. }
  487. }
  488. out:
  489. return nwritten;
  490. }
  491. ssize_t Curl_bufq_sipn(struct bufq *q, size_t max_len,
  492. Curl_bufq_reader *reader, void *reader_ctx,
  493. CURLcode *err)
  494. {
  495. struct buf_chunk *tail = NULL;
  496. ssize_t nread;
  497. *err = CURLE_AGAIN;
  498. tail = get_non_full_tail(q);
  499. if(!tail) {
  500. if(q->chunk_count < q->max_chunks) {
  501. *err = CURLE_OUT_OF_MEMORY;
  502. return -1;
  503. }
  504. /* full, blocked */
  505. *err = CURLE_AGAIN;
  506. return -1;
  507. }
  508. nread = chunk_slurpn(tail, max_len, reader, reader_ctx, err);
  509. if(nread < 0) {
  510. return -1;
  511. }
  512. else if(nread == 0) {
  513. /* eof */
  514. *err = CURLE_OK;
  515. }
  516. return nread;
  517. }
  518. ssize_t Curl_bufq_slurpn(struct bufq *q, size_t max_len,
  519. Curl_bufq_reader *reader, void *reader_ctx,
  520. CURLcode *err)
  521. {
  522. ssize_t nread = 0, n;
  523. *err = CURLE_AGAIN;
  524. while(1) {
  525. n = Curl_bufq_sipn(q, max_len, reader, reader_ctx, err);
  526. if(n < 0) {
  527. if(!nread || *err != CURLE_AGAIN) {
  528. /* blocked on first read or real error, fail */
  529. nread = -1;
  530. }
  531. break;
  532. }
  533. else if(n == 0) {
  534. /* eof */
  535. *err = CURLE_OK;
  536. break;
  537. }
  538. nread += (size_t)n;
  539. if(max_len) {
  540. DEBUGASSERT((size_t)n <= max_len);
  541. max_len -= (size_t)n;
  542. if(!max_len)
  543. break;
  544. }
  545. /* give up slurping when we get less bytes than we asked for */
  546. if(q->tail && !chunk_is_full(q->tail))
  547. break;
  548. }
  549. return nread;
  550. }
  551. ssize_t Curl_bufq_slurp(struct bufq *q, Curl_bufq_reader *reader,
  552. void *reader_ctx, CURLcode *err)
  553. {
  554. return Curl_bufq_slurpn(q, 0, reader, reader_ctx, err);
  555. }