Browse Source

tests/http: more tests with specific clients

- Makefile support for building test specific clients in tests/http/clients
- auto-make of clients when invoking pytest
- added test_09_02 for server PUSH_PROMISEs using clients/h2-serverpush
- added test_02_21 for lib based downloads and pausing/unpausing transfers

curl url parser:
- added internal method `curl_url_set_authority()` for setting the
  authority part of a url (used for PUSH_PROMISE)

http2:
- made logging of PUSH_PROMISE handling nicer

Placing python test requirements in requirements.txt files
- separate files to base test suite and http tests since use
  and module lists differ
- using the files in the gh workflows

websocket test cases, fixes for we and bufq
- bufq: account for spare chunks in space calculation
- bufq: reset chunks that are skipped empty
- ws: correctly encode frames with 126 bytes payload
- ws: update frame meta information on first call of collect
  callback that fills user buffer
- test client ws-data: some test/reporting improvements

Closes #11006
Stefan Eissing 11 months ago
parent
commit
acd82c8bfd

+ 1 - 1
.github/workflows/linux.yml

@@ -288,7 +288,7 @@ jobs:
     - if: ${{ contains(matrix.build.install_steps, 'pytest') }}
       run: |
         sudo apt-get install apache2 apache2-dev libnghttp2-dev
-        sudo python3 -m pip install impacket pytest cryptography multipart
+        sudo python3 -m pip install -r tests/http/requirements.txt
         git clone --quiet --depth=1 -b master https://github.com/icing/mod_h2
         cd mod_h2
         autoreconf -fi

+ 4 - 1
.github/workflows/ngtcp2-gnutls.yml

@@ -75,7 +75,6 @@ jobs:
         sudo apt-get update
         sudo apt-get install libtool autoconf automake pkg-config stunnel4 ${{ matrix.build.install }}
         sudo apt-get install apache2 apache2-dev
-        sudo python3 -m pip install impacket pytest cryptography multipart
       name: 'install prereqs and impacket, pytest, crypto'
 
     - run: |
@@ -136,6 +135,10 @@ jobs:
 
     - uses: actions/checkout@v3
 
+    - run: |
+        sudo python3 -m pip install -r tests/requirements.txt -r tests/http/requirements.txt
+      name: 'install python test prereqs'
+
     - run: autoreconf -fi
       name: 'autoreconf'
 

+ 4 - 1
.github/workflows/ngtcp2-quictls.yml

@@ -66,7 +66,6 @@ jobs:
         sudo apt-get update
         sudo apt-get install libtool autoconf automake pkg-config stunnel4 ${{ matrix.build.install }}
         sudo apt-get install apache2 apache2-dev
-        sudo python3 -m pip install impacket pytest cryptography multipart
       name: 'install prereqs and impacket, pytest, crypto'
 
     - run: |
@@ -111,6 +110,10 @@ jobs:
 
     - uses: actions/checkout@v3
 
+    - run: |
+        sudo python3 -m pip install -r tests/requirements.txt -r tests/http/requirements.txt
+      name: 'install python test prereqs'
+
     - run: autoreconf -fi
       name: 'autoreconf'
 

+ 5 - 2
.github/workflows/ngtcp2-wolfssl.yml

@@ -70,8 +70,7 @@ jobs:
         sudo apt-get update
         sudo apt-get install libtool autoconf automake pkg-config stunnel4 ${{ matrix.build.install }}
         sudo apt-get install apache2 apache2-dev
-        sudo python3 -m pip install impacket pytest cryptography multipart
-      name: 'install prereqs and impacket, pytest, crypto'
+      name: 'install prereqs'
 
     - run: |
         git clone --quiet --depth=1 https://github.com/wolfSSL/wolfssl.git
@@ -123,6 +122,10 @@ jobs:
 
     - uses: actions/checkout@v3
 
+    - run: |
+        sudo python3 -m pip install -r tests/requirements.txt -r tests/http/requirements.txt
+      name: 'install python test prereqs'
+
     - run: autoreconf -fi
       name: 'autoreconf'
 

+ 1 - 0
.lift/config.toml

@@ -3,5 +3,6 @@
 # SPDX-License-Identifier: curl
 
 ignoreRules = [ "DEAD_STORE", "subprocess_without_shell_equals_true" ]
+ignoreFiles = [ "tests/http/**" ]
 build = "make"
 setup = ".lift/setup.sh"

+ 1 - 0
configure.ac

@@ -4693,6 +4693,7 @@ AC_CONFIG_FILES([Makefile \
            tests/unit/Makefile \
            tests/http/config.ini \
            tests/http/Makefile \
+           tests/http/clients/Makefile \
            packages/Makefile \
            packages/vms/Makefile \
            curl-config \

+ 8 - 4
docs/examples/http2-serverpush.c

@@ -130,7 +130,7 @@ int my_trace(CURL *handle, curl_infotype type,
 
 #define OUTPUTFILE "dl"
 
-static int setup(CURL *hnd)
+static int setup(CURL *hnd, const char *url)
 {
   FILE *out = fopen(OUTPUTFILE, "wb");
   if(!out)
@@ -141,7 +141,7 @@ static int setup(CURL *hnd)
   curl_easy_setopt(hnd, CURLOPT_WRITEDATA, out);
 
   /* set the same URL */
-  curl_easy_setopt(hnd, CURLOPT_URL, "https://localhost:8443/index.html");
+  curl_easy_setopt(hnd, CURLOPT_URL, url);
 
   /* please be verbose */
   curl_easy_setopt(hnd, CURLOPT_VERBOSE, 1L);
@@ -211,12 +211,16 @@ static int server_push_callback(CURL *parent,
 /*
  * Download a file over HTTP/2, take care of server push.
  */
-int main(void)
+int main(int argc, char *argv[])
 {
   CURL *easy;
   CURLM *multi_handle;
   int transfers = 1; /* we start with one */
   struct CURLMsg *m;
+  const char *url = "https://localhost:8443/index.html";
+
+  if(argc == 2)
+    url = argv[1];
 
   /* init a multi stack */
   multi_handle = curl_multi_init();
@@ -224,7 +228,7 @@ int main(void)
   easy = curl_easy_init();
 
   /* set options */
-  if(setup(easy)) {
+  if(setup(easy, url)) {
     fprintf(stderr, "failed\n");
     return 1;
   }

+ 11 - 0
lib/bufq.c

@@ -138,6 +138,8 @@ static size_t chunk_skip(struct buf_chunk *chunk, size_t amount)
   if(n) {
     n = CURLMIN(n, amount);
     chunk->r_offset += n;
+    if(chunk->r_offset == chunk->w_offset)
+      chunk->r_offset = chunk->w_offset = 0;
   }
   return n;
 }
@@ -288,6 +290,13 @@ size_t Curl_bufq_space(const struct bufq *q)
   size_t space = 0;
   if(q->tail)
     space += chunk_space(q->tail);
+  if(q->spare) {
+    struct buf_chunk *chunk = q->spare;
+    while(chunk) {
+      space += chunk->dlen;
+      chunk = chunk->next;
+    }
+  }
   if(q->chunk_count < q->max_chunks) {
     space += (q->max_chunks - q->chunk_count) * q->chunk_size;
   }
@@ -611,6 +620,8 @@ ssize_t Curl_bufq_slurpn(struct bufq *q, size_t max_len,
         /* blocked on first read or real error, fail */
         nread = -1;
       }
+      else
+        *err = CURLE_OK;
       break;
     }
     else if(n == 0) {

+ 34 - 25
lib/http2.c

@@ -38,6 +38,7 @@
 #include "strcase.h"
 #include "multiif.h"
 #include "url.h"
+#include "urlapi-int.h"
 #include "cfilters.h"
 #include "connect.h"
 #include "strtoofft.h"
@@ -768,7 +769,7 @@ static int set_transfer_url(struct Curl_easy *data,
 
   v = curl_pushheader_byname(hp, HTTP_PSEUDO_AUTHORITY);
   if(v) {
-    uc = curl_url_set(u, CURLUPART_HOST, v, 0);
+    uc = curl_url_set_authority(u, v, CURLU_DISALLOW_USER);
     if(uc) {
       rc = 2;
       goto fail;
@@ -950,8 +951,15 @@ static CURLcode on_stream_frame(struct Curl_cfilter *cf,
 
   switch(frame->hd.type) {
   case NGHTTP2_DATA:
+    DEBUGF(LOG_CF(data, cf, "[h2sid=%d] FRAME[DATA len=%zu pad=%zu], "
+                  "buffered=%zu, window=%d/%d",
+                  stream_id, frame->hd.length, frame->data.padlen,
+                  Curl_bufq_len(&stream->recvbuf),
+                  nghttp2_session_get_stream_effective_recv_data_length(
+                    ctx->h2, stream->id),
+                  nghttp2_session_get_stream_effective_local_window_size(
+                    ctx->h2, stream->id)));
     /* If !body started on this stream, then receiving DATA is illegal. */
-    DEBUGF(LOG_CF(data, cf, "[h2sid=%d] recv frame DATA", stream_id));
     if(!stream->bodystarted) {
       rv = nghttp2_submit_rst_stream(ctx->h2, NGHTTP2_FLAG_NONE,
                                      stream_id, NGHTTP2_PROTOCOL_ERROR);
@@ -965,7 +973,7 @@ static CURLcode on_stream_frame(struct Curl_cfilter *cf,
     }
     break;
   case NGHTTP2_HEADERS:
-    DEBUGF(LOG_CF(data, cf, "[h2sid=%d] recv frame HEADERS", stream_id));
+    DEBUGF(LOG_CF(data, cf, "[h2sid=%d] FRAME[HEADERS]", stream_id));
     if(stream->bodystarted) {
       /* Only valid HEADERS after body started is trailer HEADERS.  We
          buffer them in on_header callback. */
@@ -993,7 +1001,7 @@ static CURLcode on_stream_frame(struct Curl_cfilter *cf,
     drain_stream(cf, data, stream);
     break;
   case NGHTTP2_PUSH_PROMISE:
-    DEBUGF(LOG_CF(data, cf, "[h2sid=%d] recv PUSH_PROMISE", stream_id));
+    DEBUGF(LOG_CF(data, cf, "[h2sid=%d] FRAME[PUSH_PROMISE]", stream_id));
     rv = push_promise(cf, data, &frame->push_promise);
     if(rv) { /* deny! */
       DEBUGASSERT((rv > CURL_PUSH_OK) && (rv <= CURL_PUSH_ERROROUT));
@@ -1010,13 +1018,13 @@ static CURLcode on_stream_frame(struct Curl_cfilter *cf,
     }
     break;
   case NGHTTP2_RST_STREAM:
-    DEBUGF(LOG_CF(data, cf, "[h2sid=%d] recv RST", stream_id));
+    DEBUGF(LOG_CF(data, cf, "[h2sid=%d] FARME[RST]", stream_id));
     stream->closed = TRUE;
     stream->reset = TRUE;
     drain_stream(cf, data, stream);
     break;
   case NGHTTP2_WINDOW_UPDATE:
-    DEBUGF(LOG_CF(data, cf, "[h2sid=%d] recv WINDOW_UPDATE", stream_id));
+    DEBUGF(LOG_CF(data, cf, "[h2sid=%d] FRAME[WINDOW_UPDATE]", stream_id));
     if((data->req.keepon & KEEP_SEND_HOLD) &&
        (data->req.keepon & KEEP_SEND)) {
       data->req.keepon &= ~KEEP_SEND_HOLD;
@@ -1026,7 +1034,7 @@ static CURLcode on_stream_frame(struct Curl_cfilter *cf,
     }
     break;
   default:
-    DEBUGF(LOG_CF(data, cf, "[h2sid=%d] recv frame %x",
+    DEBUGF(LOG_CF(data, cf, "[h2sid=%d] FRAME[%x]",
                   stream_id, frame->hd.type));
     break;
   }
@@ -1048,7 +1056,7 @@ static int on_frame_recv(nghttp2_session *session, const nghttp2_frame *frame,
     switch(frame->hd.type) {
     case NGHTTP2_SETTINGS: {
       uint32_t max_conn = ctx->max_concurrent_streams;
-      DEBUGF(LOG_CF(data, cf, "recv frame SETTINGS"));
+      DEBUGF(LOG_CF(data, cf, "FRAME[SETTINGS]"));
       ctx->max_concurrent_streams = nghttp2_session_get_remote_settings(
           session, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS);
       ctx->enable_push = nghttp2_session_get_remote_settings(
@@ -1070,7 +1078,7 @@ static int on_frame_recv(nghttp2_session *session, const nghttp2_frame *frame,
       ctx->goaway_error = frame->goaway.error_code;
       ctx->last_stream_id = frame->goaway.last_stream_id;
       if(data) {
-        DEBUGF(LOG_CF(data, cf, "recv GOAWAY, error=%d, last_stream=%u",
+        DEBUGF(LOG_CF(data, cf, "FRAME[GOAWAY, error=%d, last_stream=%u]",
                       ctx->goaway_error, ctx->last_stream_id));
         infof(data, "received GOAWAY, error=%d, last_stream=%u",
                     ctx->goaway_error, ctx->last_stream_id);
@@ -1078,7 +1086,7 @@ static int on_frame_recv(nghttp2_session *session, const nghttp2_frame *frame,
       }
       break;
     case NGHTTP2_WINDOW_UPDATE:
-      DEBUGF(LOG_CF(data, cf, "recv frame WINDOW_UPDATE"));
+      DEBUGF(LOG_CF(data, cf, "FRAME[WINDOW_UPDATE]"));
       break;
     default:
       DEBUGF(LOG_CF(data, cf, "recv frame %x on 0", frame->hd.type));
@@ -1139,10 +1147,6 @@ static int on_data_chunk_recv(nghttp2_session *session, uint8_t flags,
   drain_stream(cf, data_s, stream);
 
   DEBUGASSERT((size_t)nwritten == len);
-  DEBUGF(LOG_CF(data_s, cf, "[h2sid=%d] %zd/%zu DATA recvd, "
-                "(buffer now holds %zu)",
-                stream_id, nwritten, len, Curl_bufq_len(&stream->recvbuf)));
-
   return 0;
 }
 
@@ -1675,11 +1679,13 @@ static ssize_t stream_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
 
   if(nread < 0) {
     if(stream->closed) {
+      DEBUGF(LOG_CF(data, cf, "[h2sid=%d] returning CLOSE", stream->id));
       nread = http2_handle_stream_close(cf, data, stream, err);
     }
     else if(stream->reset ||
             (ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) ||
             (ctx->goaway && ctx->last_stream_id < stream->id)) {
+      DEBUGF(LOG_CF(data, cf, "[h2sid=%d] returning ERR", stream->id));
       *err = stream->bodystarted? CURLE_PARTIAL_FILE : CURLE_RECV_ERROR;
       nread = -1;
     }
@@ -1730,7 +1736,8 @@ static CURLcode h2_progress_ingress(struct Curl_cfilter *cf,
                   Curl_bufq_len(&ctx->inbufq), nread, result)); */
     if(nread < 0) {
       if(result != CURLE_AGAIN) {
-        failf(data, "Failed receiving HTTP2 data");
+        failf(data, "Failed receiving HTTP2 data: %d(%s)", result,
+              curl_easy_strerror(result));
         return result;
       }
       break;
@@ -1762,12 +1769,6 @@ static ssize_t cf_h2_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
 
   CF_DATA_SAVE(save, cf, data);
 
-  DEBUGF(LOG_CF(data, cf, "[h2sid=%d] cf_recv: win %u/%u",
-                stream->id,
-                nghttp2_session_get_local_window_size(ctx->h2),
-                nghttp2_session_get_stream_local_window_size(ctx->h2,
-                                                             stream->id)
-           ));
   nread = stream_recv(cf, data, buf, len, err);
   if(nread < 0 && *err != CURLE_AGAIN)
     goto out;
@@ -1794,8 +1795,6 @@ static ssize_t cf_h2_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
         stream->resp_hds_len = 0;
       }
       if(data_consumed) {
-        DEBUGF(LOG_CF(data, cf, "[h2sid=%d] increase window by %zu",
-                      stream->id, data_consumed));
         nghttp2_session_consume(ctx->h2, stream->id, data_consumed);
       }
     }
@@ -1813,8 +1812,15 @@ out:
     *err = result;
     nread = -1;
   }
-  DEBUGF(LOG_CF(data, cf, "[h2sid=%d] cf_recv(len=%zu) -> %zd %d",
-                stream->id, len, nread, *err));
+  DEBUGF(LOG_CF(data, cf, "[h2sid=%d] cf_recv(len=%zu) -> %zd %d, "
+                "buffered=%zu, window=%d/%d",
+                stream->id, len, nread, *err,
+                Curl_bufq_len(&stream->recvbuf),
+                nghttp2_session_get_stream_effective_recv_data_length(
+                  ctx->h2, stream->id),
+                nghttp2_session_get_stream_effective_local_window_size(
+                  ctx->h2, stream->id)));
+
   CF_DATA_RESTORE(cf, save);
   return nread;
 }
@@ -2205,6 +2211,9 @@ static CURLcode http2_data_pause(struct Curl_cfilter *cf,
       return CURLE_HTTP2;
     }
 
+    if(!pause)
+      drain_stream(cf, data, stream);
+
     /* make sure the window update gets sent */
     result = h2_progress_egress(cf, data);
     if(result)

+ 3 - 0
lib/urlapi-int.h

@@ -28,6 +28,9 @@
 size_t Curl_is_absolute_url(const char *url, char *buf, size_t buflen,
                             bool guess_scheme);
 
+CURLUcode curl_url_set_authority(CURLU *u, const char *authority,
+                                 unsigned int flags);
+
 #ifdef DEBUGBUILD
 CURLUcode Curl_parse_port(struct Curl_URL *u, struct dynbuf *host,
                           bool has_scheme);

+ 75 - 41
lib/urlapi.c

@@ -432,6 +432,7 @@ static CURLUcode parse_hostname_login(struct Curl_URL *u,
 
   DEBUGASSERT(login);
 
+  *offset = 0;
   ptr = memchr(login, '@', len);
   if(!ptr)
     goto out;
@@ -462,14 +463,17 @@ static CURLUcode parse_hostname_login(struct Curl_URL *u,
       result = CURLUE_USER_NOT_ALLOWED;
       goto out;
     }
+    free(u->user);
     u->user = userp;
   }
 
   if(passwdp) {
+    free(u->password);
     u->password = passwdp;
   }
 
   if(optionsp) {
+    free(u->options);
     u->options = optionsp;
   }
 
@@ -543,6 +547,7 @@ UNITTEST CURLUcode Curl_parse_port(struct Curl_URL *u, struct dynbuf *host,
 
     u->portnum = port;
     /* generate a new port number string to get rid of leading zeroes etc */
+    free(u->port);
     u->port = aprintf("%ld", port);
     if(!u->port)
       return CURLUE_OUT_OF_MEMORY;
@@ -763,6 +768,75 @@ static CURLUcode urldecode_host(struct dynbuf *host)
   return CURLUE_OK;
 }
 
+static CURLUcode parse_authority(struct Curl_URL *u,
+                                 const char *auth, size_t authlen,
+                                 unsigned int flags,
+                                 struct dynbuf *host,
+                                 bool has_scheme)
+{
+  size_t offset;
+  CURLUcode result;
+
+  /*
+   * Parse the login details and strip them out of the host name.
+   */
+  result = parse_hostname_login(u, auth, authlen, flags, &offset);
+  if(result)
+    goto out;
+
+  if(Curl_dyn_addn(host, auth + offset, authlen - offset)) {
+    result = CURLUE_OUT_OF_MEMORY;
+    goto out;
+  }
+
+  result = Curl_parse_port(u, host, has_scheme);
+  if(result)
+    goto out;
+
+  switch(ipv4_normalize(host)) {
+  case HOST_IPV4:
+    break;
+  case HOST_IPV6:
+    result = ipv6_parse(u, Curl_dyn_ptr(host), Curl_dyn_len(host));
+    break;
+  case HOST_NAME:
+    result = urldecode_host(host);
+    if(!result)
+      result = hostname_check(u, Curl_dyn_ptr(host), Curl_dyn_len(host));
+    break;
+  case HOST_ERROR:
+    result = CURLUE_OUT_OF_MEMORY;
+    break;
+  case HOST_BAD:
+  default:
+    result = CURLUE_BAD_HOSTNAME; /* Bad IPv4 address even */
+    break;
+  }
+
+out:
+  return result;
+}
+
+CURLUcode curl_url_set_authority(CURLU *u, const char *authority,
+                                 unsigned int flags)
+{
+  CURLUcode result;
+  struct dynbuf host;
+
+  DEBUGASSERT(authority);
+  Curl_dyn_init(&host, CURL_MAX_INPUT_LENGTH);
+
+  result = parse_authority(u, authority, strlen(authority), flags,
+                           &host, !!u->scheme);
+  if(result)
+    Curl_dyn_free(&host);
+  else {
+    free(u->host);
+    u->host = Curl_dyn_ptr(&host);
+  }
+  return result;
+}
+
 /*
  * "Remove Dot Segments"
  * https://datatracker.ietf.org/doc/html/rfc3986#section-5.2.4
@@ -1091,48 +1165,8 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags)
     /* this pathlen also contains the query and the fragment */
     pathlen = urllen - (path - url);
     if(hostlen) {
-      /* number of bytes into the string the host name starts: */
-      size_t offset = 0;
 
-      /*
-       * Parse the login details and strip them out of the host name.
-       */
-      result = parse_hostname_login(u, hostp, hostlen, flags, &offset);
-      if(!result) {
-        hostp += offset;
-        hostlen -= offset;
-        if(Curl_dyn_addn(&host, hostp, hostlen))
-          result = CURLUE_OUT_OF_MEMORY;
-        else
-          result = Curl_parse_port(u, &host, schemelen);
-      }
-      if(!result) {
-        int norm = ipv4_normalize(&host);
-        switch(norm) {
-        case HOST_IPV4:
-          break;
-
-        case HOST_IPV6:
-          result = ipv6_parse(u, Curl_dyn_ptr(&host), Curl_dyn_len(&host));
-          break;
-
-        case HOST_NAME:
-          result = urldecode_host(&host);
-          if(!result)
-            result = hostname_check(u, Curl_dyn_ptr(&host),
-                                    Curl_dyn_len(&host));
-          break;
-
-        case HOST_ERROR:
-          result = CURLUE_OUT_OF_MEMORY;
-          break;
-
-        case HOST_BAD:
-        default:
-          result = CURLUE_BAD_HOSTNAME; /* Bad IPv4 address even */
-          break;
-        }
-      }
+      result = parse_authority(u, hostp, hostlen, flags, &host, schemelen);
       if(result)
         goto fail;
 

+ 16 - 12
lib/ws.c

@@ -455,7 +455,7 @@ static ssize_t ws_enc_write_head(struct Curl_easy *data,
     head[9] = (unsigned char)(payload_len & 0xff);
     hlen = 10;
   }
-  else if(payload_len > 126) {
+  else if(payload_len >= 126) {
     head[1] = 126 | WSBIT_MASK;
     head[2] = (unsigned char)((payload_len >> 8) & 0xff);
     head[3] = (unsigned char)(payload_len & 0xff);
@@ -786,6 +786,14 @@ static ssize_t ws_client_collect(const unsigned char *buf, size_t buflen,
   size_t nwritten;
   curl_off_t remain = (payload_len - (payload_offset + buflen));
 
+  if(!ctx->bufidx) {
+    /* first write */
+    ctx->frame_age = frame_age;
+    ctx->frame_flags = frame_flags;
+    ctx->payload_offset = payload_offset;
+    ctx->payload_len = payload_len;
+  }
+
   if((frame_flags & CURLWS_PING) && !remain) {
     /* auto-respond to PINGs, only works for single-frame payloads atm */
     size_t bytes;
@@ -810,13 +818,6 @@ static ssize_t ws_client_collect(const unsigned char *buf, size_t buflen,
     }
     *err = CURLE_OK;
     memcpy(ctx->buffer, buf, nwritten);
-    if(!ctx->bufidx) {
-      /* first write */
-      ctx->frame_age = frame_age;
-      ctx->frame_flags = frame_flags;
-      ctx->payload_offset = payload_offset;
-      ctx->payload_len = payload_len;
-    }
     ctx->bufidx += nwritten;
   }
   return nwritten;
@@ -831,7 +832,7 @@ static ssize_t nw_in_recv(void *reader_ctx,
 
   *err = curl_easy_recv(data, buf, buflen, &nread);
   if(*err)
-    return *err;
+    return -1;
   return (ssize_t)nread;
 }
 
@@ -888,6 +889,8 @@ CURL_EXTERN CURLcode curl_ws_recv(struct Curl_easy *data, void *buffer,
         infof(data, "connection expectedly closed?");
         return CURLE_GOT_NOTHING;
       }
+      DEBUGF(infof(data, "curl_ws_recv, added %zu bytes from network",
+                   Curl_bufq_len(&ws->recvbuf)));
     }
 
     result = ws_dec_pass(&ws->dec, data, &ws->recvbuf,
@@ -1015,12 +1018,13 @@ CURL_EXTERN CURLcode curl_ws_send(struct Curl_easy *data, const void *buffer,
   if(result)
     return result;
 
-  /* Limit what we are willing to buffer */
+  /* TODO: the current design does not allow partial writes, afaict.
+   * It is not clear who the application is supposed to react. */
   space = Curl_bufq_space(&ws->sendbuf);
+  DEBUGF(infof(data, "curl_ws_send(len=%zu), sendbuf len=%zu space %zu",
+               buflen, Curl_bufq_len(&ws->sendbuf), space));
   if(space < 14)
     return CURLE_AGAIN;
-  if(buflen > space)
-    buflen = space;
 
   if(sendflags & CURLWS_OFFSET) {
     if(totalsize) {

+ 1 - 0
tests/Makefile.am

@@ -120,6 +120,7 @@ checksrc:
 	cd libtest && $(MAKE) checksrc
 	cd unit && $(MAKE) checksrc
 	cd server && $(MAKE) checksrc
+	cd http && $(MAKE) checksrc
 
 if CURLDEBUG
 # for debug builds, we scan the sources on all regular make invokes

+ 10 - 0
tests/http/Makefile.am

@@ -22,6 +22,16 @@
 #
 ###########################################################################
 
+SUBDIRS = clients
+
 clean-local:
 	rm -rf *.pyc __pycache__
 	rm -rf gen
+
+check: clients
+
+clients:
+	@(cd clients; $(MAKE) check)
+
+checksrc:
+	cd clients && $(MAKE) checksrc

+ 8 - 0
tests/http/clients/.gitignore

@@ -0,0 +1,8 @@
+# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+#
+# SPDX-License-Identifier: curl
+
+h2-serverpush
+h2-download
+ws-data
+ws-pingpong

+ 71 - 0
tests/http/clients/Makefile.am

@@ -0,0 +1,71 @@
+#***************************************************************************
+#                                  _   _ ____  _
+#  Project                     ___| | | |  _ \| |
+#                             / __| | | | |_) | |
+#                            | (__| |_| |  _ <| |___
+#                             \___|\___/|_| \_\_____|
+#
+# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at https://curl.se/docs/copyright.html.
+#
+# You may opt to use, copy, modify, merge, publish, distribute and/or sell
+# copies of the Software, and permit persons to whom the Software is
+# furnished to do so, under the terms of the COPYING file.
+#
+# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+# KIND, either express or implied.
+#
+# SPDX-License-Identifier: curl
+#
+###########################################################################
+
+AUTOMAKE_OPTIONS = foreign nostdinc
+
+
+# Specify our include paths here, and do it relative to $(top_srcdir) and
+# $(top_builddir), to ensure that these paths which belong to the library
+# being currently built and tested are searched before the library which
+# might possibly already be installed in the system.
+#
+# $(top_srcdir)/include is for libcurl's external include files
+
+AM_CPPFLAGS = -I$(top_srcdir)/include   \
+              -DCURL_DISABLE_DEPRECATION
+
+LIBDIR = $(top_builddir)/lib
+
+# Avoid libcurl obsolete stuff
+AM_CPPFLAGS += -DCURL_NO_OLDIES
+
+if USE_CPPFLAG_CURL_STATICLIB
+AM_CPPFLAGS += -DCURL_STATICLIB
+endif
+
+# Prevent LIBS from being used for all link targets
+LIBS = $(BLANK_AT_MAKETIME)
+
+# Dependencies
+if USE_EXPLICIT_LIB_DEPS
+LDADD = $(LIBDIR)/libcurl.la @LIBCURL_LIBS@
+else
+LDADD = $(LIBDIR)/libcurl.la
+endif
+
+# This might hold -Werror
+CFLAGS += @CURL_CFLAG_EXTRAS@
+
+# Makefile.inc provides the check_PROGRAMS and COMPLICATED_EXAMPLES defines
+include Makefile.inc
+
+all: $(check_PROGRAMS)
+
+CHECKSRC = $(CS_$(V))
+CS_0 = @echo "  RUN     " $@;
+CS_1 =
+CS_ = $(CS_0)
+
+checksrc:
+	$(CHECKSRC)(@PERL@ $(top_srcdir)/scripts/checksrc.pl -D$(srcdir) $(srcdir)/*.c)

+ 30 - 0
tests/http/clients/Makefile.inc

@@ -0,0 +1,30 @@
+#***************************************************************************
+#                                  _   _ ____  _
+#  Project                     ___| | | |  _ \| |
+#                             / __| | | | |_) | |
+#                            | (__| |_| |  _ <| |___
+#                             \___|\___/|_| \_\_____|
+#
+# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at https://curl.se/docs/copyright.html.
+#
+# You may opt to use, copy, modify, merge, publish, distribute and/or sell
+# copies of the Software, and permit persons to whom the Software is
+# furnished to do so, under the terms of the COPYING file.
+#
+# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+# KIND, either express or implied.
+#
+# SPDX-License-Identifier: curl
+#
+###########################################################################
+
+# These are all libcurl example programs to be test compiled
+check_PROGRAMS = \
+  h2-serverpush \
+  h2-download \
+  ws-data \
+  ws-pingpong

+ 275 - 0
tests/http/clients/h2-download.c

@@ -0,0 +1,275 @@
+/***************************************************************************
+ *                                  _   _ ____  _
+ *  Project                     ___| | | |  _ \| |
+ *                             / __| | | | |_) | |
+ *                            | (__| |_| |  _ <| |___
+ *                             \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+/* <DESC>
+ * HTTP/2 server push
+ * </DESC>
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* somewhat unix-specific */
+#include <sys/time.h>
+#include <unistd.h>
+
+/* curl stuff */
+#include <curl/curl.h>
+#include <curl/mprintf.h>
+
+#ifndef CURLPIPE_MULTIPLEX
+#error "too old libcurl, cannot do HTTP/2 server push!"
+#endif
+
+static int verbose = 1;
+
+static
+int my_trace(CURL *handle, curl_infotype type,
+             char *data, size_t size,
+             void *userp)
+{
+  const char *text;
+  (void)handle; /* prevent compiler warning */
+  (void)userp;
+
+  switch(type) {
+  case CURLINFO_TEXT:
+    fprintf(stderr, "== Info: %s", data);
+    /* FALLTHROUGH */
+  default: /* in case a new one is introduced to shock us */
+    return 0;
+
+  case CURLINFO_HEADER_OUT:
+    text = "=> Send header";
+    break;
+  case CURLINFO_DATA_OUT:
+    if(verbose <= 1)
+      return 0;
+    text = "=> Send data";
+    break;
+  case CURLINFO_HEADER_IN:
+    text = "<= Recv header";
+    break;
+  case CURLINFO_DATA_IN:
+    if(verbose <= 1)
+      return 0;
+    text = "<= Recv data";
+    break;
+  }
+
+  fprintf(stderr, "%s, %lu bytes (0x%lx)\n",
+          text, (unsigned long)size, (unsigned long)size);
+  return 0;
+}
+
+struct transfer {
+  int idx;
+  CURL *easy;
+  char filename[128];
+  FILE *out;
+  curl_off_t recv_size;
+  curl_off_t pause_at;
+  int paused;
+  int resumed;
+  int done;
+};
+
+static size_t transfer_count;
+static struct transfer *transfers;
+
+static struct transfer *get_transfer_for_easy(CURL *easy)
+{
+  size_t i;
+  for(i = 0; i < transfer_count; ++i) {
+    if(easy == transfers[i].easy)
+      return &transfers[i];
+  }
+  return NULL;
+}
+
+static size_t my_write_cb(char *buf, size_t nitems, size_t buflen,
+                          void *userdata)
+{
+  struct transfer *t = userdata;
+  ssize_t nwritten;
+
+  if(!t->resumed &&
+     t->recv_size < t->pause_at &&
+     ((curl_off_t)(t->recv_size + (nitems * buflen)) >= t->pause_at)) {
+    fprintf(stderr, "transfer %d: PAUSE\n", t->idx);
+    t->paused = 1;
+    return CURL_WRITEFUNC_PAUSE;
+  }
+
+  if(!t->out) {
+    curl_msnprintf(t->filename, sizeof(t->filename)-1, "download_%u.data",
+                   t->idx);
+    t->out = fopen(t->filename, "wb");
+    if(!t->out)
+      return 0;
+  }
+
+  nwritten = fwrite(buf, nitems, buflen, t->out);
+  if(nwritten < 0) {
+    fprintf(stderr, "transfer %d: write failure\n", t->idx);
+    return 0;
+  }
+  t->recv_size += nwritten;
+  return (size_t)nwritten;
+}
+
+static int setup(CURL *hnd, const char *url, struct transfer *t)
+{
+  curl_easy_setopt(hnd, CURLOPT_URL, url);
+  curl_easy_setopt(hnd, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2_0);
+  curl_easy_setopt(hnd, CURLOPT_SSL_VERIFYPEER, 0L);
+  curl_easy_setopt(hnd, CURLOPT_SSL_VERIFYHOST, 0L);
+
+  curl_easy_setopt(hnd, CURLOPT_WRITEFUNCTION, my_write_cb);
+  curl_easy_setopt(hnd, CURLOPT_WRITEDATA, t);
+
+  /* please be verbose */
+  if(verbose) {
+    curl_easy_setopt(hnd, CURLOPT_VERBOSE, 1L);
+    curl_easy_setopt(hnd, CURLOPT_DEBUGFUNCTION, my_trace);
+  }
+
+#if (CURLPIPE_MULTIPLEX > 0)
+  /* wait for pipe connection to confirm */
+  curl_easy_setopt(hnd, CURLOPT_PIPEWAIT, 1L);
+#endif
+  return 0; /* all is good */
+}
+
+/*
+ * Download a file over HTTP/2, take care of server push.
+ */
+int main(int argc, char *argv[])
+{
+  CURLM *multi_handle;
+  int active_transfers;
+  struct CURLMsg *m;
+  const char *url;
+  size_t i;
+  long pause_offset;
+  struct transfer *t;
+
+  if(argc != 4) {
+    fprintf(stderr, "usage: h2-download count pause-offset url\n");
+    return 2;
+  }
+
+  transfer_count = (size_t)strtol(argv[1], NULL, 10);
+  pause_offset = strtol(argv[2], NULL, 10);
+  url = argv[3];
+
+  transfers = calloc(transfer_count, sizeof(*transfers));
+  if(!transfers) {
+    fprintf(stderr, "error allocating transfer structs\n");
+    return 1;
+  }
+
+  multi_handle = curl_multi_init();
+  curl_multi_setopt(multi_handle, CURLMOPT_PIPELINING, CURLPIPE_MULTIPLEX);
+
+  active_transfers = 0;
+  for(i = 0; i < transfer_count; ++i) {
+    t = &transfers[i];
+    t->idx = (int)i;
+    t->pause_at = (curl_off_t)pause_offset * i;
+    t->easy = curl_easy_init();
+    if(!t->easy || setup(t->easy, url, t)) {
+      fprintf(stderr, "setup of transfer #%d failed\n", (int)i);
+      return 1;
+    }
+    curl_multi_add_handle(multi_handle, t->easy);
+    ++active_transfers;
+  }
+
+  do {
+    int still_running; /* keep number of running handles */
+    CURLMcode mc = curl_multi_perform(multi_handle, &still_running);
+
+    if(still_running) {
+      /* wait for activity, timeout or "nothing" */
+      mc = curl_multi_poll(multi_handle, NULL, 0, 1000, NULL);
+      fprintf(stderr, "curl_multi_poll() -> %d\n", mc);
+    }
+
+    if(mc)
+      break;
+
+    /*
+     * A little caution when doing server push is that libcurl itself has
+     * created and added one or more easy handles but we need to clean them up
+     * when we are done.
+     */
+    do {
+      int msgq = 0;
+      m = curl_multi_info_read(multi_handle, &msgq);
+      if(m && (m->msg == CURLMSG_DONE)) {
+        CURL *e = m->easy_handle;
+        active_transfers--;
+        curl_multi_remove_handle(multi_handle, e);
+        t = get_transfer_for_easy(e);
+        if(t) {
+          t->done = 1;
+        }
+        else
+          curl_easy_cleanup(e);
+      }
+      else {
+        /* nothing happending, resume one paused transfer if there is one */
+        for(i = 0; i < transfer_count; ++i) {
+          t = &transfers[i];
+          if(!t->done && t->paused) {
+            t->resumed = 1;
+            t->paused = 0;
+            curl_easy_pause(t->easy, CURLPAUSE_CONT);
+            fprintf(stderr, "transfer %d: RESUME\n", t->idx);
+            break;
+          }
+        }
+
+      }
+    } while(m);
+
+  } while(active_transfers); /* as long as we have transfers going */
+
+  for(i = 0; i < transfer_count; ++i) {
+    t = &transfers[i];
+    if(t->out) {
+      fclose(t->out);
+      t->out = NULL;
+    }
+    if(t->easy) {
+      curl_easy_cleanup(t->easy);
+      t->easy = NULL;
+    }
+  }
+  free(transfers);
+
+  curl_multi_cleanup(multi_handle);
+
+  return 0;
+}

+ 271 - 0
tests/http/clients/h2-serverpush.c

@@ -0,0 +1,271 @@
+/***************************************************************************
+ *                                  _   _ ____  _
+ *  Project                     ___| | | |  _ \| |
+ *                             / __| | | | |_) | |
+ *                            | (__| |_| |  _ <| |___
+ *                             \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+/* <DESC>
+ * HTTP/2 server push
+ * </DESC>
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* somewhat unix-specific */
+#include <sys/time.h>
+#include <unistd.h>
+
+/* curl stuff */
+#include <curl/curl.h>
+#include <curl/mprintf.h>
+
+#ifndef CURLPIPE_MULTIPLEX
+#error "too old libcurl, cannot do HTTP/2 server push!"
+#endif
+
+static
+void dump(const char *text, unsigned char *ptr, size_t size,
+          char nohex)
+{
+  size_t i;
+  size_t c;
+
+  unsigned int width = 0x10;
+
+  if(nohex)
+    /* without the hex output, we can fit more on screen */
+    width = 0x40;
+
+  fprintf(stderr, "%s, %lu bytes (0x%lx)\n",
+          text, (unsigned long)size, (unsigned long)size);
+
+  for(i = 0; i<size; i += width) {
+
+    fprintf(stderr, "%4.4lx: ", (unsigned long)i);
+
+    if(!nohex) {
+      /* hex not disabled, show it */
+      for(c = 0; c < width; c++)
+        if(i + c < size)
+          fprintf(stderr, "%02x ", ptr[i + c]);
+        else
+          fputs("   ", stderr);
+    }
+
+    for(c = 0; (c < width) && (i + c < size); c++) {
+      /* check for 0D0A; if found, skip past and start a new line of output */
+      if(nohex && (i + c + 1 < size) && ptr[i + c] == 0x0D &&
+         ptr[i + c + 1] == 0x0A) {
+        i += (c + 2 - width);
+        break;
+      }
+      fprintf(stderr, "%c",
+              (ptr[i + c] >= 0x20) && (ptr[i + c]<0x80)?ptr[i + c]:'.');
+      /* check again for 0D0A, to avoid an extra \n if it's at width */
+      if(nohex && (i + c + 2 < size) && ptr[i + c + 1] == 0x0D &&
+         ptr[i + c + 2] == 0x0A) {
+        i += (c + 3 - width);
+        break;
+      }
+    }
+    fputc('\n', stderr); /* newline */
+  }
+}
+
+static
+int my_trace(CURL *handle, curl_infotype type,
+             char *data, size_t size,
+             void *userp)
+{
+  const char *text;
+  (void)handle; /* prevent compiler warning */
+  (void)userp;
+  switch(type) {
+  case CURLINFO_TEXT:
+    fprintf(stderr, "== Info: %s", data);
+    /* FALLTHROUGH */
+  default: /* in case a new one is introduced to shock us */
+    return 0;
+
+  case CURLINFO_HEADER_OUT:
+    text = "=> Send header";
+    break;
+  case CURLINFO_DATA_OUT:
+    text = "=> Send data";
+    break;
+  case CURLINFO_SSL_DATA_OUT:
+    text = "=> Send SSL data";
+    break;
+  case CURLINFO_HEADER_IN:
+    text = "<= Recv header";
+    break;
+  case CURLINFO_DATA_IN:
+    text = "<= Recv data";
+    break;
+  case CURLINFO_SSL_DATA_IN:
+    text = "<= Recv SSL data";
+    break;
+  }
+
+  dump(text, (unsigned char *)data, size, 1);
+  return 0;
+}
+
+#define OUTPUTFILE "download_0.data"
+
+static int setup(CURL *hnd, const char *url)
+{
+  FILE *out = fopen(OUTPUTFILE, "wb");
+  if(!out)
+    /* failed */
+    return 1;
+
+  curl_easy_setopt(hnd, CURLOPT_URL, url);
+  curl_easy_setopt(hnd, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2_0);
+  curl_easy_setopt(hnd, CURLOPT_SSL_VERIFYPEER, 0L);
+  curl_easy_setopt(hnd, CURLOPT_SSL_VERIFYHOST, 0L);
+
+  curl_easy_setopt(hnd, CURLOPT_WRITEDATA, out);
+
+  /* please be verbose */
+  curl_easy_setopt(hnd, CURLOPT_VERBOSE, 1L);
+  curl_easy_setopt(hnd, CURLOPT_DEBUGFUNCTION, my_trace);
+
+#if (CURLPIPE_MULTIPLEX > 0)
+  /* wait for pipe connection to confirm */
+  curl_easy_setopt(hnd, CURLOPT_PIPEWAIT, 1L);
+#endif
+  return 0; /* all is good */
+}
+
+/* called when there's an incoming push */
+static int server_push_callback(CURL *parent,
+                                CURL *easy,
+                                size_t num_headers,
+                                struct curl_pushheaders *headers,
+                                void *userp)
+{
+  char *headp;
+  size_t i;
+  int *transfers = (int *)userp;
+  char filename[128];
+  FILE *out;
+  static unsigned int count = 0;
+  int rv;
+
+  (void)parent; /* we have no use for this */
+  curl_msnprintf(filename, sizeof(filename)-1, "push%u", count++);
+
+  /* here's a new stream, save it in a new file for each new push */
+  out = fopen(filename, "wb");
+  if(!out) {
+    /* if we cannot save it, deny it */
+    fprintf(stderr, "Failed to create output file for push\n");
+    rv = CURL_PUSH_DENY;
+    goto out;
+  }
+
+  /* write to this file */
+  curl_easy_setopt(easy, CURLOPT_WRITEDATA, out);
+
+  fprintf(stderr, "**** push callback approves stream %u, got %lu headers!\n",
+          count, (unsigned long)num_headers);
+
+  for(i = 0; i<num_headers; i++) {
+    headp = curl_pushheader_bynum(headers, i);
+    fprintf(stderr, "**** header %lu: %s\n", (unsigned long)i, headp);
+  }
+
+  headp = curl_pushheader_byname(headers, ":path");
+  if(headp) {
+    fprintf(stderr, "**** The PATH is %s\n", headp /* skip :path + colon */);
+  }
+
+  (*transfers)++; /* one more */
+  rv = CURL_PUSH_OK;
+
+out:
+  return rv;
+}
+
+
+/*
+ * Download a file over HTTP/2, take care of server push.
+ */
+int main(int argc, char *argv[])
+{
+  CURL *easy;
+  CURLM *multi_handle;
+  int transfers = 1; /* we start with one */
+  struct CURLMsg *m;
+  const char *url;
+
+  if(argc != 2) {
+    fprintf(stderr, "need URL as argument\n");
+    return 2;
+  }
+  url = argv[1];
+
+  multi_handle = curl_multi_init();
+  curl_multi_setopt(multi_handle, CURLMOPT_PIPELINING, CURLPIPE_MULTIPLEX);
+  curl_multi_setopt(multi_handle, CURLMOPT_PUSHFUNCTION, server_push_callback);
+  curl_multi_setopt(multi_handle, CURLMOPT_PUSHDATA, &transfers);
+
+  easy = curl_easy_init();
+  if(setup(easy, url)) {
+    fprintf(stderr, "failed\n");
+    return 1;
+  }
+
+  curl_multi_add_handle(multi_handle, easy);
+  do {
+    int still_running; /* keep number of running handles */
+    CURLMcode mc = curl_multi_perform(multi_handle, &still_running);
+
+    if(still_running)
+      /* wait for activity, timeout or "nothing" */
+      mc = curl_multi_poll(multi_handle, NULL, 0, 1000, NULL);
+
+    if(mc)
+      break;
+
+    /*
+     * A little caution when doing server push is that libcurl itself has
+     * created and added one or more easy handles but we need to clean them up
+     * when we are done.
+     */
+    do {
+      int msgq = 0;
+      m = curl_multi_info_read(multi_handle, &msgq);
+      if(m && (m->msg == CURLMSG_DONE)) {
+        CURL *e = m->easy_handle;
+        transfers--;
+        curl_multi_remove_handle(multi_handle, e);
+        curl_easy_cleanup(e);
+      }
+    } while(m);
+
+  } while(transfers); /* as long as we have transfers going */
+
+  curl_multi_cleanup(multi_handle);
+
+  return 0;
+}

+ 263 - 0
tests/http/clients/ws-data.c

@@ -0,0 +1,263 @@
+/***************************************************************************
+ *                                  _   _ ____  _
+ *  Project                     ___| | | |  _ \| |
+ *                             / __| | | | |_) | |
+ *                            | (__| |_| |  _ <| |___
+ *                             \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+/* <DESC>
+ * Websockets data echos
+ * </DESC>
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* somewhat unix-specific */
+#include <sys/time.h>
+#include <unistd.h>
+
+
+/* curl stuff */
+#include <curl/curl.h>
+#include "../../../lib/curl_setup.h"
+
+#ifdef USE_WEBSOCKETS
+
+static
+void dump(const char *text, unsigned char *ptr, size_t size,
+          char nohex)
+{
+  size_t i;
+  size_t c;
+
+  unsigned int width = 0x10;
+
+  if(nohex)
+    /* without the hex output, we can fit more on screen */
+    width = 0x40;
+
+  fprintf(stderr, "%s, %lu bytes (0x%lx)\n",
+          text, (unsigned long)size, (unsigned long)size);
+
+  for(i = 0; i<size; i += width) {
+
+    fprintf(stderr, "%4.4lx: ", (unsigned long)i);
+
+    if(!nohex) {
+      /* hex not disabled, show it */
+      for(c = 0; c < width; c++)
+        if(i + c < size)
+          fprintf(stderr, "%02x ", ptr[i + c]);
+        else
+          fputs("   ", stderr);
+    }
+
+    for(c = 0; (c < width) && (i + c < size); c++) {
+      /* check for 0D0A; if found, skip past and start a new line of output */
+      if(nohex && (i + c + 1 < size) && ptr[i + c] == 0x0D &&
+         ptr[i + c + 1] == 0x0A) {
+        i += (c + 2 - width);
+        break;
+      }
+      fprintf(stderr, "%c",
+              (ptr[i + c] >= 0x20) && (ptr[i + c]<0x80)?ptr[i + c]:'.');
+      /* check again for 0D0A, to avoid an extra \n if it's at width */
+      if(nohex && (i + c + 2 < size) && ptr[i + c + 1] == 0x0D &&
+         ptr[i + c + 2] == 0x0A) {
+        i += (c + 3 - width);
+        break;
+      }
+    }
+    fputc('\n', stderr); /* newline */
+  }
+}
+
+static CURLcode send_binary(CURL *curl, char *buf, size_t buflen)
+{
+  size_t nwritten;
+  CURLcode result =
+    curl_ws_send(curl, buf, buflen, &nwritten, 0, CURLWS_BINARY);
+  fprintf(stderr, "ws: send_binary(len=%ld) -> %d, %ld\n",
+          (long)buflen, result, (long)nwritten);
+  return result;
+}
+
+static CURLcode recv_binary(CURL *curl, char *exp_data, size_t exp_len)
+{
+  struct curl_ws_frame *frame;
+  char recvbuf[256];
+  size_t r_offset, nread;
+  CURLcode result;
+
+  fprintf(stderr, "recv_binary: expected payload %ld bytes\n", (long)exp_len);
+  r_offset = 0;
+  while(1) {
+    result = curl_ws_recv(curl, recvbuf, sizeof(recvbuf), &nread, &frame);
+    if(result == CURLE_AGAIN) {
+      fprintf(stderr, "EAGAIN, sleep, try again\n");
+      usleep(100*1000);
+      continue;
+    }
+    fprintf(stderr, "ws: curl_ws_recv(offset=%ld, len=%ld) -> %d, %ld\n",
+            (long)r_offset, (long)sizeof(recvbuf), result, (long)nread);
+    if(result) {
+      return result;
+    }
+    if(!(frame->flags & CURLWS_BINARY)) {
+      fprintf(stderr, "recv_data: wrong frame, got %ld bytes rflags %x\n",
+              (long)nread, frame->flags);
+      return CURLE_RECV_ERROR;
+    }
+    if(frame->offset != (curl_off_t)r_offset) {
+      fprintf(stderr, "recv_data: frame offset, expected %ld, got %ld\n",
+              (long)r_offset, (long)frame->offset);
+      return CURLE_RECV_ERROR;
+    }
+    if(frame->bytesleft != (curl_off_t)(exp_len - r_offset - nread)) {
+      fprintf(stderr, "recv_data: frame bytesleft, expected %ld, got %ld\n",
+              (long)(exp_len - r_offset - nread), (long)frame->bytesleft);
+      return CURLE_RECV_ERROR;
+    }
+    if(r_offset + nread > exp_len) {
+      fprintf(stderr, "recv_data: data length, expected %ld, now at %ld\n",
+              (long)exp_len, (long)(r_offset + nread));
+      return CURLE_RECV_ERROR;
+    }
+    if(memcmp(exp_data + r_offset, recvbuf, nread)) {
+      fprintf(stderr, "recv_data: data differs, offset=%ld, len=%ld\n",
+              (long)r_offset, (long)nread);
+      dump("expected:", (unsigned char *)exp_data + r_offset, nread, 0);
+      dump("received:", (unsigned char *)recvbuf, nread, 0);
+      return CURLE_RECV_ERROR;
+    }
+    r_offset += nread;
+    if(r_offset >= exp_len) {
+      fprintf(stderr, "recv_data: frame complete\n");
+      break;
+    }
+  }
+  return CURLE_OK;
+}
+
+/* just close the connection */
+static void websocket_close(CURL *curl)
+{
+  size_t sent;
+  CURLcode result =
+    curl_ws_send(curl, "", 0, &sent, 0, CURLWS_CLOSE);
+  fprintf(stderr,
+          "ws: curl_ws_send returned %u, sent %u\n", (int)result, (int)sent);
+}
+
+static CURLcode data_echo(CURL *curl, size_t plen_min, size_t plen_max)
+{
+  CURLcode res;
+  size_t len;
+  char *send_buf;
+  size_t i;
+
+  send_buf = calloc(1, plen_max);
+  if(!send_buf)
+    return CURLE_OUT_OF_MEMORY;
+  for(i = 0; i < plen_max; ++i) {
+    send_buf[i] = (char)('0' + ((int)i % 10));
+  }
+
+  for(len = plen_min; len <= plen_max; ++len) {
+    res = send_binary(curl, send_buf, len);
+    if(res)
+      goto out;
+    res = recv_binary(curl, send_buf, len);
+    if(res) {
+      fprintf(stderr, "recv_data(len=%ld) -> %d\n", (long)len, res);
+      goto out;
+    }
+  }
+
+out:
+  if(!res)
+    websocket_close(curl);
+  free(send_buf);
+  return res;
+}
+
+#endif
+
+int main(int argc, char *argv[])
+{
+#ifdef USE_WEBSOCKETS
+  CURL *curl;
+  CURLcode res = CURLE_OK;
+  const char *url;
+  curl_off_t l1, l2;
+  size_t plen_min, plen_max;
+
+
+  if(argc != 4) {
+    fprintf(stderr, "usage: ws-data url minlen maxlen\n");
+    return 2;
+  }
+  url = argv[1];
+  l1 = strtol(argv[2], NULL, 10);
+  if(l1 < 0) {
+    fprintf(stderr, "minlen must be >= 0, got %ld\n", (long)l1);
+    return 2;
+  }
+  l2 = strtol(argv[3], NULL, 10);
+  if(l2 < 0) {
+    fprintf(stderr, "maxlen must be >= 0, got %ld\n", (long)l2);
+    return 2;
+  }
+  plen_min = l1;
+  plen_max = l2;
+  if(plen_max < plen_min) {
+    fprintf(stderr, "maxlen must be >= minlen, got %ld-%ld\n",
+            (long)plen_min, (long)plen_max);
+    return 2;
+  }
+
+  curl_global_init(CURL_GLOBAL_ALL);
+
+  curl = curl_easy_init();
+  if(curl) {
+    curl_easy_setopt(curl, CURLOPT_URL, url);
+
+    /* use the callback style */
+    curl_easy_setopt(curl, CURLOPT_USERAGENT, "ws-data");
+    curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
+    curl_easy_setopt(curl, CURLOPT_CONNECT_ONLY, 2L); /* websocket style */
+    res = curl_easy_perform(curl);
+    fprintf(stderr, "curl_easy_perform() returned %u\n", (int)res);
+    if(res == CURLE_OK)
+      res = data_echo(curl, plen_min, plen_max);
+
+    /* always cleanup */
+    curl_easy_cleanup(curl);
+  }
+  curl_global_cleanup();
+  return (int)res;
+
+#else /* USE_WEBSOCKETS */
+  (void)argc;
+  (void)argv;
+  fprintf(stderr, "websockets not enabled in libcurl\n");
+  return 1;
+#endif /* !USE_WEBSOCKETS */
+}

+ 158 - 0
tests/http/clients/ws-pingpong.c

@@ -0,0 +1,158 @@
+/***************************************************************************
+ *                                  _   _ ____  _
+ *  Project                     ___| | | |  _ \| |
+ *                             / __| | | | |_) | |
+ *                            | (__| |_| |  _ <| |___
+ *                             \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ * SPDX-License-Identifier: curl
+ *
+ ***************************************************************************/
+/* <DESC>
+ * Websockets pingpong
+ * </DESC>
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* somewhat unix-specific */
+#include <sys/time.h>
+#include <unistd.h>
+
+
+/* curl stuff */
+#include <curl/curl.h>
+#include "../../../lib/curl_setup.h"
+
+#ifdef USE_WEBSOCKETS
+
+static CURLcode ping(CURL *curl, const char *send_payload)
+{
+  size_t sent;
+  CURLcode result =
+    curl_ws_send(curl, send_payload, strlen(send_payload), &sent, 0,
+                 CURLWS_PING);
+  fprintf(stderr,
+          "ws: curl_ws_send returned %u, sent %u\n", (int)result, (int)sent);
+
+  return result;
+}
+
+static CURLcode recv_pong(CURL *curl, const char *exected_payload)
+{
+  size_t rlen;
+  struct curl_ws_frame *meta;
+  char buffer[256];
+  CURLcode result = curl_ws_recv(curl, buffer, sizeof(buffer), &rlen, &meta);
+  if(result) {
+    fprintf(stderr, "ws: curl_ws_recv returned %u, received %ld\n",
+            (int)result, (long)rlen);
+    return result;
+  }
+
+  if(!(meta->flags & CURLWS_PONG)) {
+    fprintf(stderr, "recv_pong: wrong frame, got %d bytes rflags %x\n",
+            (int)rlen, meta->flags);
+    return CURLE_RECV_ERROR;
+  }
+
+  fprintf(stderr, "ws: got PONG back\n");
+  if(rlen == strlen(exected_payload) &&
+     !memcmp(exected_payload, buffer, rlen)) {
+    fprintf(stderr, "ws: got the same payload back\n");
+    return CURLE_OK;
+  }
+  fprintf(stderr, "ws: did NOT get the same payload back\n");
+  return CURLE_RECV_ERROR;
+}
+
+/* just close the connection */
+static void websocket_close(CURL *curl)
+{
+  size_t sent;
+  CURLcode result =
+    curl_ws_send(curl, "", 0, &sent, 0, CURLWS_CLOSE);
+  fprintf(stderr,
+          "ws: curl_ws_send returned %u, sent %u\n", (int)result, (int)sent);
+}
+
+static CURLcode pingpong(CURL *curl, const char *payload)
+{
+  CURLcode res;
+  int i;
+
+  res = ping(curl, payload);
+  if(res)
+    return res;
+  for(i = 0; i < 10; ++i) {
+    fprintf(stderr, "Receive pong\n");
+    res = recv_pong(curl, payload);
+    if(res == CURLE_AGAIN) {
+      usleep(100*1000);
+      continue;
+    }
+    websocket_close(curl);
+    return res;
+  }
+  websocket_close(curl);
+  return CURLE_RECV_ERROR;
+}
+
+#endif
+
+int main(int argc, char *argv[])
+{
+#ifdef USE_WEBSOCKETS
+  CURL *curl;
+  CURLcode res = CURLE_OK;
+  const char *url, *payload;
+
+  if(argc != 3) {
+    fprintf(stderr, "usage: ws-pingpong url payload\n");
+    return 2;
+  }
+  url = argv[1];
+  payload = argv[2];
+
+  curl_global_init(CURL_GLOBAL_ALL);
+
+  curl = curl_easy_init();
+  if(curl) {
+    curl_easy_setopt(curl, CURLOPT_URL, url);
+
+    /* use the callback style */
+    curl_easy_setopt(curl, CURLOPT_USERAGENT, "ws-pingpong");
+    curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
+    curl_easy_setopt(curl, CURLOPT_CONNECT_ONLY, 2L); /* websocket style */
+    res = curl_easy_perform(curl);
+    fprintf(stderr, "curl_easy_perform() returned %u\n", (int)res);
+    if(res == CURLE_OK)
+      res = pingpong(curl, payload);
+
+    /* always cleanup */
+    curl_easy_cleanup(curl);
+  }
+  curl_global_cleanup();
+  return (int)res;
+
+#else /* USE_WEBSOCKETS */
+  (void)argc;
+  (void)argv;
+  fprintf(stderr, "websockets not enabled in libcurl\n");
+  return 1;
+#endif /* !USE_WEBSOCKETS */
+}

+ 2 - 0
tests/http/conftest.py

@@ -46,6 +46,8 @@ def env(pytestconfig) -> Env:
         pytest.skip(env.incomplete_reason())
 
     env.setup()
+    if not env.make_clients():
+        pytest.exit(1)
     return env
 
 @pytest.fixture(scope="package", autouse=True)

+ 29 - 0
tests/http/requirements.txt

@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+#***************************************************************************
+#                                  _   _ ____  _
+#  Project                     ___| | | |  _ \| |
+#                             / __| | | | |_) | |
+#                            | (__| |_| |  _ <| |___
+#                             \___|\___/|_| \_\_____|
+#
+# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at https://curl.se/docs/copyright.html.
+#
+# You may opt to use, copy, modify, merge, publish, distribute and/or sell
+# copies of the Software, and permit persons to whom the Software is
+# furnished to do so, under the terms of the COPYING file.
+#
+# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+# KIND, either express or implied.
+#
+# SPDX-License-Identifier: curl
+#
+###########################################################################
+#
+pytest
+cryptography
+multipart
+websockets

+ 29 - 10
tests/http/test_02_download.py

@@ -30,7 +30,7 @@ import logging
 import os
 import pytest
 
-from testenv import Env, CurlClient
+from testenv import Env, CurlClient, LocalClient
 
 
 log = logging.getLogger(__name__)
@@ -47,9 +47,12 @@ class TestDownload:
 
     @pytest.fixture(autouse=True, scope='class')
     def _class_scope(self, env, httpd):
-        env.make_data_file(indir=httpd.docs_dir, fname="data-100k", fsize=100*1024)
-        env.make_data_file(indir=httpd.docs_dir, fname="data-1m", fsize=1024*1024)
-        env.make_data_file(indir=httpd.docs_dir, fname="data-10m", fsize=10*1024*1024)
+        indir = httpd.docs_dir
+        env.make_data_file(indir=indir, fname="data-10k", fsize=10*1024)
+        env.make_data_file(indir=indir, fname="data-100k", fsize=100*1024)
+        env.make_data_file(indir=indir, fname="data-1m", fsize=1024*1024)
+        env.make_data_file(indir=indir, fname="data-10m", fsize=10*1024*1024)
+        env.make_data_file(indir=indir, fname="data-50m", fsize=50*1024*1024)
 
     # download 1 file
     @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
@@ -272,8 +275,29 @@ class TestDownload:
         ])
         r.check_response(count=count, http_status=200)
         srcfile = os.path.join(httpd.docs_dir, 'data-1m')
+        self.check_downloads(curl, srcfile, count)
+        # restore httpd defaults
+        httpd.set_extra_config(env.domain1, lines=None)
+        assert httpd.stop()
+        assert httpd.start()
+
+    # download via lib client, pause/resume at different offsets
+    @pytest.mark.parametrize("pause_offset", [0, 10*1024, 100*1023, 640000])
+    def test_02_21_h2_lib_download(self, env: Env, httpd, nghttpx, pause_offset, repeat):
+        count = 10
+        docname = 'data-10m'
+        url = f'https://localhost:{env.https_port}/{docname}'
+        client = LocalClient(name='h2-download', env=env)
+        if not client.exists():
+            pytest.skip(f'example client not built: {client.name}')
+        r = client.run(args=[str(count), str(pause_offset), url])
+        r.check_exit_code(0)
+        srcfile = os.path.join(httpd.docs_dir, docname)
+        self.check_downloads(client, srcfile, count)
+
+    def check_downloads(self, client, srcfile: str, count: int):
         for i in range(count):
-            dfile = curl.download_file(i)
+            dfile = client.download_file(i)
             assert os.path.exists(dfile)
             if not filecmp.cmp(srcfile, dfile, shallow=False):
                 diff = "".join(difflib.unified_diff(a=open(srcfile).readlines(),
@@ -282,8 +306,3 @@ class TestDownload:
                                                     tofile=dfile,
                                                     n=1))
                 assert False, f'download {dfile} differs:\n{diff}'
-        # restore httpd defaults
-        httpd.set_extra_config(env.domain1, lines=None)
-        assert httpd.stop()
-        assert httpd.start()
-

+ 16 - 5
tests/http/test_09_push.py

@@ -28,7 +28,7 @@ import logging
 import os
 import pytest
 
-from testenv import Env, CurlClient
+from testenv import Env, CurlClient, LocalClient
 
 
 log = logging.getLogger(__name__)
@@ -41,9 +41,9 @@ class TestPush:
         push_dir = os.path.join(httpd.docs_dir, 'push')
         if not os.path.exists(push_dir):
             os.makedirs(push_dir)
-        env.make_data_file(indir=push_dir, fname="data1", fsize=100*1024)
-        env.make_data_file(indir=push_dir, fname="data2", fsize=100*1024)
-        env.make_data_file(indir=push_dir, fname="data3", fsize=100*1024)
+        env.make_data_file(indir=push_dir, fname="data1", fsize=1*1024)
+        env.make_data_file(indir=push_dir, fname="data2", fsize=1*1024)
+        env.make_data_file(indir=push_dir, fname="data3", fsize=1*1024)
         httpd.set_extra_config(env.domain1, [
             f'H2EarlyHints on',
             f'<Location /push/data1>',
@@ -61,7 +61,7 @@ class TestPush:
         httpd.reload()
 
     # download a file that triggers a "103 Early Hints" response
-    def test_09_01_early_hints(self, env: Env, httpd, repeat):
+    def test_09_01_h2_early_hints(self, env: Env, httpd, repeat):
         curl = CurlClient(env=env)
         url = f'https://{env.domain1}:{env.https_port}/push/data1'
         r = curl.http_download(urls=[url], alpn_proto='h2', with_stats=False,
@@ -71,3 +71,14 @@ class TestPush:
         assert r.responses[0]['status'] == 103, f'{r.responses}'
         assert 'link' in r.responses[0]['header'], f'{r.responses[0]}'
         assert r.responses[0]['header']['link'] == '</push/data2>; rel=preload', f'{r.responses[0]}'
+
+    def test_09_02_h2_push(self, env: Env, httpd, repeat):
+        # use localhost as we do not have resolve support in local client
+        url = f'https://localhost:{env.https_port}/push/data1'
+        client = LocalClient(name='h2-serverpush', env=env)
+        if not client.exists():
+            pytest.skip(f'example client not built: {client.name}')
+        r = client.run(args=[url])
+        r.check_exit_code(0)
+        assert os.path.exists(client.download_file(0))
+        assert os.path.exists(os.path.join(client.run_dir, 'push0')), r.dump_logs()

+ 131 - 0
tests/http/test_20_websockets.py

@@ -0,0 +1,131 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#***************************************************************************
+#                                  _   _ ____  _
+#  Project                     ___| | | |  _ \| |
+#                             / __| | | | |_) | |
+#                            | (__| |_| |  _ <| |___
+#                             \___|\___/|_| \_\_____|
+#
+# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at https://curl.se/docs/copyright.html.
+#
+# You may opt to use, copy, modify, merge, publish, distribute and/or sell
+# copies of the Software, and permit persons to whom the Software is
+# furnished to do so, under the terms of the COPYING file.
+#
+# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+# KIND, either express or implied.
+#
+# SPDX-License-Identifier: curl
+#
+###########################################################################
+#
+import logging
+import os
+import shutil
+import subprocess
+import time
+from datetime import datetime, timedelta
+import pytest
+
+from testenv import Env, CurlClient, LocalClient
+
+
+log = logging.getLogger(__name__)
+
+
+@pytest.mark.skipif(condition=not Env.curl_has_protocol('ws'),
+                    reason='curl lacks ws protocol support')
+class TestWebsockets:
+
+    def check_alive(self, env, timeout=5):
+        curl = CurlClient(env=env)
+        url = f'http://localhost:{env.ws_port}/'
+        end = datetime.now() + timedelta(seconds=timeout)
+        while datetime.now() < end:
+            r = curl.http_download(urls=[url])
+            if r.exit_code == 0:
+                return True
+            time.sleep(.1)
+        return False
+
+    def _mkpath(self, path):
+        if not os.path.exists(path):
+            return os.makedirs(path)
+
+    def _rmrf(self, path):
+        if os.path.exists(path):
+            return shutil.rmtree(path)
+
+    @pytest.fixture(autouse=True, scope='class')
+    def ws_echo(self, env):
+        run_dir = os.path.join(env.gen_dir, 'ws-echo-server')
+        err_file = os.path.join(run_dir, 'stderr')
+        self._rmrf(run_dir)
+        self._mkpath(run_dir)
+
+        with open(err_file, 'w') as cerr:
+            cmd = os.path.join(env.project_dir,
+                               'tests/http/testenv/ws_echo_server.py')
+            args = [cmd, '--port', str(env.ws_port)]
+            p = subprocess.Popen(args=args, cwd=run_dir, stderr=cerr,
+                                 stdout=cerr)
+            assert self.check_alive(env)
+            yield
+            p.terminate()
+
+    def test_20_01_basic(self, env: Env, ws_echo, repeat):
+        curl = CurlClient(env=env)
+        url = f'http://localhost:{env.ws_port}/'
+        r = curl.http_download(urls=[url])
+        r.check_response(http_status=426)
+
+    def test_20_02_pingpong_small(self, env: Env, ws_echo, repeat):
+        payload = 125 * "x"
+        client = LocalClient(env=env, name='ws-pingpong')
+        if not client.exists():
+            pytest.skip(f'example client not built: {client.name}')
+        url = f'ws://localhost:{env.ws_port}/'
+        r = client.run(args=[url, payload])
+        r.check_exit_code(0)
+
+    # the python websocket server does not like 'large' control frames
+    def test_20_03_pingpong_too_large(self, env: Env, ws_echo, repeat):
+        payload = 127 * "x"
+        client = LocalClient(env=env, name='ws-pingpong')
+        if not client.exists():
+            pytest.skip(f'example client not built: {client.name}')
+        url = f'ws://localhost:{env.ws_port}/'
+        r = client.run(args=[url, payload])
+        r.check_exit_code(56)
+
+    # the python websocket server does not like 'large' control frames
+    def test_20_04_data_small(self, env: Env, ws_echo, repeat):
+        client = LocalClient(env=env, name='ws-data')
+        if not client.exists():
+            pytest.skip(f'example client not built: {client.name}')
+        url = f'ws://localhost:{env.ws_port}/'
+        r = client.run(args=[url, str(0), str(10)])
+        r.check_exit_code(0)
+
+    # the python websocket server does not like 'large' control frames
+    def test_20_05_data_med(self, env: Env, ws_echo, repeat):
+        client = LocalClient(env=env, name='ws-data')
+        if not client.exists():
+            pytest.skip(f'example client not built: {client.name}')
+        url = f'ws://localhost:{env.ws_port}/'
+        r = client.run(args=[url, str(120), str(130)])
+        r.check_exit_code(0)
+
+    # the python websocket server does not like 'large' control frames
+    def test_20_06_data_large(self, env: Env, ws_echo, repeat):
+        client = LocalClient(env=env, name='ws-data')
+        if not client.exists():
+            pytest.skip(f'example client not built: {client.name}')
+        url = f'ws://localhost:{env.ws_port}/'
+        r = client.run(args=[url, str(65535 - 5), str(65535 + 5)])
+        r.check_exit_code(0)

+ 1 - 0
tests/http/testenv/__init__.py

@@ -33,5 +33,6 @@ from .certs import TestCA, Credentials
 from .caddy import Caddy
 from .httpd import Httpd
 from .curl import CurlClient, ExecResult
+from .client import LocalClient
 from .nghttpx import Nghttpx
 from .nghttpx import Nghttpx, NghttpxQuic, NghttpxFwd

+ 105 - 0
tests/http/testenv/client.py

@@ -0,0 +1,105 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#***************************************************************************
+#                                  _   _ ____  _
+#  Project                     ___| | | |  _ \| |
+#                             / __| | | | |_) | |
+#                            | (__| |_| |  _ <| |___
+#                             \___|\___/|_| \_\_____|
+#
+# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at https://curl.se/docs/copyright.html.
+#
+# You may opt to use, copy, modify, merge, publish, distribute and/or sell
+# copies of the Software, and permit persons to whom the Software is
+# furnished to do so, under the terms of the COPYING file.
+#
+# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+# KIND, either express or implied.
+#
+# SPDX-License-Identifier: curl
+#
+###########################################################################
+#
+import pytest
+import json
+import logging
+import os
+import re
+import shutil
+import subprocess
+from datetime import timedelta, datetime
+from typing import List, Optional, Dict, Union
+from urllib.parse import urlparse
+
+from . import ExecResult
+from .env import Env
+
+
+log = logging.getLogger(__name__)
+
+
+class LocalClient:
+
+    def __init__(self, name: str, env: Env, run_dir: Optional[str] = None,
+                 timeout: Optional[float] = None):
+        self.name = name
+        self.path = os.path.join(env.project_dir, f'tests/http/clients/{name}')
+        self.env = env
+        self._timeout = timeout if timeout else env.test_timeout
+        self._curl = os.environ['CURL'] if 'CURL' in os.environ else env.curl
+        self._run_dir = run_dir if run_dir else os.path.join(env.gen_dir, name)
+        self._stdoutfile = f'{self._run_dir}/stdout'
+        self._stderrfile = f'{self._run_dir}/stderr'
+        self._rmrf(self._run_dir)
+        self._mkpath(self._run_dir)
+
+    @property
+    def run_dir(self) -> str:
+        return self._run_dir
+
+    def exists(self) -> bool:
+        return os.path.exists(self.path)
+
+    def download_file(self, i: int) -> str:
+        return os.path.join(self._run_dir, f'download_{i}.data')
+
+    def _rmf(self, path):
+        if os.path.exists(path):
+            return os.remove(path)
+
+    def _rmrf(self, path):
+        if os.path.exists(path):
+            return shutil.rmtree(path)
+
+    def _mkpath(self, path):
+        if not os.path.exists(path):
+            return os.makedirs(path)
+
+    def run(self, args):
+        self._rmf(self._stdoutfile)
+        self._rmf(self._stderrfile)
+        start = datetime.now()
+        exception = None
+        myargs = [self.path]
+        myargs.extend(args)
+        try:
+            with open(self._stdoutfile, 'w') as cout:
+                with open(self._stderrfile, 'w') as cerr:
+                    p = subprocess.run(myargs, stderr=cerr, stdout=cout,
+                                       cwd=self._run_dir, shell=False,
+                                       input=None,
+                                       timeout=self._timeout)
+                    exitcode = p.returncode
+        except subprocess.TimeoutExpired:
+            log.warning(f'Timeout after {self._timeout}s: {args}')
+            exitcode = -1
+            exception = 'TimeoutExpired'
+        coutput = open(self._stdoutfile).readlines()
+        cerrput = open(self._stderrfile).readlines()
+        return ExecResult(args=myargs, exit_code=exitcode, exception=exception,
+                          stdout=coutput, stderr=cerrput,
+                          duration=datetime.now() - start)

+ 22 - 0
tests/http/testenv/env.py

@@ -33,6 +33,8 @@ import sys
 from configparser import ConfigParser, ExtendedInterpolation
 from typing import Optional
 
+import pytest
+
 from .certs import CertificateSpec, TestCA, Credentials
 from .ports import alloc_ports
 
@@ -60,6 +62,7 @@ class EnvConfig:
     def __init__(self):
         self.tests_dir = TESTS_HTTPD_PATH
         self.gen_dir = os.path.join(self.tests_dir, 'gen')
+        self.project_dir = os.path.dirname(os.path.dirname(self.tests_dir))
         self.config = DEF_CONFIG
         # check cur and its features
         self.curl = CURL
@@ -109,6 +112,7 @@ class EnvConfig:
             'h2proxys': socket.SOCK_STREAM,
             'caddy': socket.SOCK_STREAM,
             'caddys': socket.SOCK_STREAM,
+            'ws': socket.SOCK_STREAM,
         })
         self.httpd = self.config['httpd']['httpd']
         self.apachectl = self.config['httpd']['apachectl']
@@ -347,6 +351,10 @@ class Env:
     def gen_dir(self) -> str:
         return self.CONFIG.gen_dir
 
+    @property
+    def project_dir(self) -> str:
+        return self.CONFIG.project_dir
+
     @property
     def ca(self):
         return self._ca
@@ -407,6 +415,10 @@ class Env:
     def caddy_http_port(self) -> int:
         return self.CONFIG.ports['caddy']
 
+    @property
+    def ws_port(self) -> int:
+        return self.CONFIG.ports['ws']
+
     @property
     def curl(self) -> str:
         return self.CONFIG.curl
@@ -448,3 +460,13 @@ class Env:
                 s = f"{i:09d}-{s}\n"
                 fd.write(s[0:remain])
         return fpath
+
+    def make_clients(self):
+        client_dir = os.path.join(self.project_dir, 'tests/http/clients')
+        p = subprocess.run(['make'], capture_output=True, text=True,
+                           cwd=client_dir)
+        if p.returncode != 0:
+            pytest.exit(f"`make`in {client_dir} failed:\n{p.stderr}")
+            return False
+        return True
+

+ 1 - 0
tests/http/testenv/httpd.py

@@ -260,6 +260,7 @@ class Httpd:
             conf.extend([  # https host for domain1, h1 + h2
                 f'<VirtualHost *:{self.env.https_port}>',
                 f'    ServerName {domain1}',
+                f'    ServerAlias localhost',
                 f'    Protocols h2 http/1.1',
                 f'    SSLEngine on',
                 f'    SSLCertificateFile {creds1.cert_file}',

+ 66 - 0
tests/http/testenv/ws_echo_server.py

@@ -0,0 +1,66 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#***************************************************************************
+#                                  _   _ ____  _
+#  Project                     ___| | | |  _ \| |
+#                             / __| | | | |_) | |
+#                            | (__| |_| |  _ <| |___
+#                             \___|\___/|_| \_\_____|
+#
+# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at https://curl.se/docs/copyright.html.
+#
+# You may opt to use, copy, modify, merge, publish, distribute and/or sell
+# copies of the Software, and permit persons to whom the Software is
+# furnished to do so, under the terms of the COPYING file.
+#
+# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+# KIND, either express or implied.
+#
+# SPDX-License-Identifier: curl
+#
+###########################################################################
+#
+import argparse
+import asyncio
+import logging
+from asyncio import IncompleteReadError
+
+from websockets import server
+from websockets.exceptions import ConnectionClosedError
+
+
+async def echo(websocket):
+    try:
+        async for message in websocket:
+            await websocket.send(message)
+    except ConnectionClosedError:
+        pass
+
+
+async def run_server(port):
+    async with server.serve(echo, "localhost", port):
+        await asyncio.Future()  # run forever
+
+
+def main():
+    parser = argparse.ArgumentParser(prog='scorecard', description="""
+        Run a websocket echo server.
+        """)
+    parser.add_argument("--port", type=int,
+                        default=9876, help="port to listen on")
+    args = parser.parse_args()
+
+    logging.basicConfig(
+        format="%(asctime)s %(message)s",
+        level=logging.DEBUG,
+    )
+
+    asyncio.run(run_server(args.port))
+
+
+if __name__ == "__main__":
+    main()

+ 26 - 0
tests/requirements.txt

@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+#***************************************************************************
+#                                  _   _ ____  _
+#  Project                     ___| | | |  _ \| |
+#                             / __| | | | |_) | |
+#                            | (__| |_| |  _ <| |___
+#                             \___|\___/|_| \_\_____|
+#
+# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at https://curl.se/docs/copyright.html.
+#
+# You may opt to use, copy, modify, merge, publish, distribute and/or sell
+# copies of the Software, and permit persons to whom the Software is
+# furnished to do so, under the terms of the COPYING file.
+#
+# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+# KIND, either express or implied.
+#
+# SPDX-License-Identifier: curl
+#
+###########################################################################
+#
+impacket