[PATCH 2 of 3] HTTP/3: allowed QUIC stream connection reuse
Roman Arutyunyan
arut at nginx.com
Wed Nov 10 21:42:18 UTC 2021
On Wed, Nov 10, 2021 at 03:59:39PM +0300, Sergey Kandaurov wrote:
>
> > On 18 Oct 2021, at 15:48, Roman Arutyunyan <arut at nginx.com> wrote:
> >
> > # HG changeset patch
> > # User Roman Arutyunyan <arut at nginx.com>
> > # Date 1634561226 -10800
> > # Mon Oct 18 15:47:06 2021 +0300
> > # Branch quic
> > # Node ID 8ae53c592c719af4f3ba47dbd85f78be27aaf7db
> > # Parent 8739f475583031399879ef0af2eb5af76008449e
> > HTTP/3: allowed QUIC stream connection reuse.
> >
> > A QUIC stream connection is treated as reusable until first bytes of request
> > arrive, which is also when the request object is now allocated. A connection
> > closed as a result of draining, is reset with the error code
> > H3_REQUEST_REJECTED. Such behavior is allowed by quic-http-34:
> >
> > Once a request stream has been opened, the request MAY be cancelled
> > by either endpoint. Clients cancel requests if the response is no
> > longer of interest; servers cancel requests if they are unable to or
> > choose not to respond.
> >
> > When the server cancels a request without performing any application
> > processing, the request is considered "rejected." The server SHOULD
> > abort its response stream with the error code H3_REQUEST_REJECTED.
> >
> > The client can treat requests rejected by the server as though they had
> > never been sent at all, thereby allowing them to be retried later.
> >
>
> Looks good. See below for minor comments.
> BTW, if we still hit the worker_connections limit, this leads to
> an entire QUIC connection close, but I doubt we can easily improve this.
When there's not enough worker_connections for a new QUIC stream, we can
send H3_REQUEST_REJECTED to client without creating a stream. We can discuss
this later.
> > diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c
> > --- a/src/http/ngx_http_request.c
> > +++ b/src/http/ngx_http_request.c
> > @@ -3743,15 +3743,14 @@ ngx_http_free_request(ngx_http_request_t
> >
> > log->action = "closing request";
> >
> > - if (r->connection->timedout) {
> > + if (r->connection->timedout
> > +#if (NGX_HTTP_QUIC)
> > + && r->connection->quic == NULL
> > +#endif
> > + )
> > + {
> > clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);
> >
> > -#if (NGX_HTTP_V3)
> > - if (r->connection->quic) {
> > - (void) ngx_quic_reset_stream(r->connection,
> > - NGX_HTTP_V3_ERR_GENERAL_PROTOCOL_ERROR);
> > - } else
> > -#endif
> > if (clcf->reset_timedout_connection) {
> > linger.l_onoff = 1;
> > linger.l_linger = 0;
> > @@ -3763,14 +3762,6 @@ ngx_http_free_request(ngx_http_request_t
> > "setsockopt(SO_LINGER) failed");
> > }
> > }
> > -
> > - } else if (!r->response_sent) {
> > -#if (NGX_HTTP_V3)
> > - if (r->connection->quic) {
> > - (void) ngx_quic_reset_stream(r->connection,
> > - NGX_HTTP_V3_ERR_INTERNAL_ERROR);
> > - }
> > -#endif
> > }
> >
> > /* the various request strings were allocated from r->pool */
> > @@ -3830,6 +3821,12 @@ ngx_http_close_connection(ngx_connection
> >
> > #endif
> >
> > +#if (NGX_HTTP_V3)
> > + if (ngx_http_v3_connection(c)) {
> > + ngx_http_v3_reset_connection(c);
> > + }
> > +#endif
> > +
> > #if (NGX_STAT_STUB)
> > (void) ngx_atomic_fetch_add(ngx_stat_active, -1);
> > #endif
> > diff --git a/src/http/v3/ngx_http_v3.h b/src/http/v3/ngx_http_v3.h
> > --- a/src/http/v3/ngx_http_v3.h
> > +++ b/src/http/v3/ngx_http_v3.h
> > @@ -90,6 +90,9 @@
> > #define ngx_http_v3_shutdown_connection(c, code, reason) \
> > ngx_quic_shutdown_connection(c->quic->parent, code, reason)
> >
> > +#define ngx_http_v3_connection(c) \
> > + ((c)->quic ? ngx_http_quic_get_connection(c)->addr_conf->http3 : 0)
> > +
> >
> > typedef struct {
> > size_t max_table_capacity;
> > @@ -138,6 +141,7 @@ struct ngx_http_v3_session_s {
> >
> >
> > void ngx_http_v3_init(ngx_connection_t *c);
> > +void ngx_http_v3_reset_connection(ngx_connection_t *c);
> > ngx_int_t ngx_http_v3_init_session(ngx_connection_t *c);
> > ngx_int_t ngx_http_v3_check_flood(ngx_connection_t *c);
> >
> > diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c
> > --- a/src/http/v3/ngx_http_v3_request.c
> > +++ b/src/http/v3/ngx_http_v3_request.c
> > @@ -10,6 +10,7 @@
> > #include <ngx_http.h>
> >
> >
> > +static void ngx_http_v3_wait_request_handler(ngx_event_t *rev);
> > static void ngx_http_v3_cleanup_request(void *data);
> > static void ngx_http_v3_process_request(ngx_event_t *rev);
> > static ngx_int_t ngx_http_v3_process_header(ngx_http_request_t *r,
> > @@ -53,12 +54,8 @@ static const struct {
> > void
> > ngx_http_v3_init(ngx_connection_t *c)
> > {
> > - size_t size;
> > uint64_t n;
> > - ngx_buf_t *b;
> > ngx_event_t *rev;
> > - ngx_pool_cleanup_t *cln;
> > - ngx_http_request_t *r;
> > ngx_http_connection_t *hc;
> > ngx_http_v3_session_t *h3c;
> > ngx_http_core_loc_conf_t *clcf;
> > @@ -96,7 +93,7 @@ ngx_http_v3_init(ngx_connection_t *c)
> > h3c = ngx_http_v3_get_session(c);
> >
> > if (h3c->goaway) {
> > - ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_REQUEST_REJECTED);
> > + c->close = 1;
> > ngx_http_close_connection(c);
> > return;
> > }
> > @@ -116,21 +113,57 @@ ngx_http_v3_init(ngx_connection_t *c)
> > "reached maximum number of requests");
> > }
> >
> > - cln = ngx_pool_cleanup_add(c->pool, 0);
> > - if (cln == NULL) {
> > + rev = c->read;
> > + rev->handler = ngx_http_v3_wait_request_handler;
> > + c->write->handler = ngx_http_empty_handler;
> > +
> > + if (rev->ready) {
> > + rev->handler(rev);
> > + return;
> > + }
> > +
> > + cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module);
> > +
> > + ngx_add_timer(rev, cscf->client_header_timeout);
> > + ngx_reusable_connection(c, 1);
> > +
> > + if (ngx_handle_read_event(rev, 0) != NGX_OK) {
> > + ngx_http_close_connection(c);
> > + return;
> > + }
> > +}
> > +
> > +
> > +static void
> > +ngx_http_v3_wait_request_handler(ngx_event_t *rev)
> > +{
> > + size_t size;
> > + ssize_t n;
> > + ngx_buf_t *b;
> > + ngx_connection_t *c;
> > + ngx_pool_cleanup_t *cln;
> > + ngx_http_request_t *r;
> > + ngx_http_connection_t *hc;
> > + ngx_http_v3_session_t *h3c;
> > + ngx_http_core_srv_conf_t *cscf;
> > +
> > + c = rev->data;
> > +
> > + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 wait request handler");
> > +
> > + if (rev->timedout) {
> > + ngx_log_error(NGX_LOG_INFO, c->log, NGX_ETIMEDOUT, "client timed out");
> > + c->timedout = 1;
> > ngx_http_close_connection(c);
> > return;
> > }
> >
> > - cln->handler = ngx_http_v3_cleanup_request;
> > - cln->data = c;
> > -
> > - h3c->nrequests++;
> > -
> > - if (h3c->keepalive.timer_set) {
> > - ngx_del_timer(&h3c->keepalive);
> > + if (c->close) {
> > + ngx_http_close_connection(c);
> > + return;
> > }
> >
> > + hc = c->data;
> > cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module);
> >
> > size = cscf->client_header_buffer_size;
> > @@ -159,8 +192,49 @@ ngx_http_v3_init(ngx_connection_t *c)
> > b->end = b->last + size;
> > }
> >
> > + n = c->recv(c, b->last, size);
> > +
> > + if (n == NGX_AGAIN) {
> > +
> > + if (!rev->timer_set) {
> > + ngx_add_timer(rev, cscf->client_header_timeout);
> > + ngx_reusable_connection(c, 1);
> > + }
> > +
> > + if (ngx_handle_read_event(rev, 0) != NGX_OK) {
> > + ngx_http_close_connection(c);
> > + return;
> > + }
> > +
> > + /*
> > + * We are trying to not hold c->buffer's memory for an idle connection.
> > + */
> > +
> > + if (ngx_pfree(c->pool, b->start) == NGX_OK) {
> > + b->start = NULL;
> > + }
> > +
> > + return;
> > + }
> > +
> > + if (n == NGX_ERROR) {
> > + ngx_http_close_connection(c);
> > + return;
> > + }
> > +
> > + if (n == 0) {
> > + ngx_log_error(NGX_LOG_INFO, c->log, 0,
> > + "client closed connection");
> > + ngx_http_close_connection(c);
> > + return;
> > + }
> > +
> > + b->last += n;
> > +
> > c->log->action = "reading client request";
> >
> > + ngx_reusable_connection(c, 0);
> > +
> > r = ngx_http_create_request(c);
> > if (r == NULL) {
> > ngx_http_close_connection(c);
> > @@ -171,7 +245,7 @@ ngx_http_v3_init(ngx_connection_t *c)
> >
> > r->v3_parse = ngx_pcalloc(r->pool, sizeof(ngx_http_v3_parse_t));
> > if (r->v3_parse == NULL) {
> > - ngx_http_close_connection(c);
> > + ngx_http_close_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR);
> > return;
> > }
> >
>
> Since we now defer request initialization until first bytes, that's fine.
>
> > @@ -179,23 +253,59 @@ ngx_http_v3_init(ngx_connection_t *c)
> > * cscf->large_client_header_buffers.num;
> >
> > c->data = r;
> > - c->requests = n + 1;
> > + c->requests = (c->quic->id >> 2) + 1;
> > +
> > + cln = ngx_pool_cleanup_add(r->pool, 0);
> > + if (cln == NULL) {
> > + ngx_http_close_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR);
> > + return;
> > + }
> > +
> > + cln->handler = ngx_http_v3_cleanup_request;
> > + cln->data = r;
> > +
> > + h3c = ngx_http_v3_get_session(c);
> > + h3c->nrequests++;
> > +
> > + if (h3c->keepalive.timer_set) {
> > + ngx_del_timer(&h3c->keepalive);
> > + }
> >
> > - rev = c->read;
> > rev->handler = ngx_http_v3_process_request;
> > + ngx_http_v3_process_request(rev);
> > +}
> >
> > - ngx_http_v3_process_request(rev);
> > +
> > +void
> > +ngx_http_v3_reset_connection(ngx_connection_t *c)
> > +{
> > + if (c->timedout) {
> > + ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_GENERAL_PROTOCOL_ERROR);
> > +
> > + } else if (c->close) {
> > + ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_REQUEST_REJECTED);
> > +
> > + } else if (c->requests == 0 || c->error) {
> > + ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR);
> > + }
> > }
>
> The c->requests check looks suspicious,
> is it something to catch a not yet initialized request?
If for whatever reason we close the stream connection before creating a request
(thus c->requests == 0), that is a clear signal we need to reset the stream
with NGX_HTTP_V3_ERR_INTERNAL_ERROR error code. The exceptions are
timeout and connection reuse, but these cases are handled just before.
All other cases are errors (failed allocation, recv() failure etc).
> > static void
> > ngx_http_v3_cleanup_request(void *data)
> > {
> > - ngx_connection_t *c = data;
> > + ngx_http_request_t *r = data;
> >
> > + ngx_connection_t *c;
> > ngx_http_v3_session_t *h3c;
> > ngx_http_core_loc_conf_t *clcf;
> >
> > + c = r->connection;
> > +
> > + if (!r->response_sent) {
> > + c->error = 1;
> > + }
> > +
> > h3c = ngx_http_v3_get_session(c);
> >
> > if (--h3c->nrequests == 0) {
> > diff --git a/src/http/v3/ngx_http_v3_streams.c b/src/http/v3/ngx_http_v3_streams.c
> > --- a/src/http/v3/ngx_http_v3_streams.c
> > +++ b/src/http/v3/ngx_http_v3_streams.c
> > @@ -49,7 +49,8 @@ ngx_http_v3_init_uni_stream(ngx_connecti
> > ngx_http_v3_finalize_connection(c,
> > NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR,
> > "reached maximum number of uni streams");
> > - ngx_http_close_connection(c);
> > + c->data = NULL;
> > + ngx_http_v3_close_uni_stream(c);
> > return;
> > }
> >
> > @@ -57,7 +58,11 @@ ngx_http_v3_init_uni_stream(ngx_connecti
> >
> > us = ngx_pcalloc(c->pool, sizeof(ngx_http_v3_uni_stream_t));
> > if (us == NULL) {
> > - ngx_http_close_connection(c);
> > + ngx_http_v3_finalize_connection(c,
> > + NGX_HTTP_V3_ERR_INTERNAL_ERROR,
> > + "memory allocation error");
> > + c->data = NULL;
> > + ngx_http_v3_close_uni_stream(c);
> > return;
> > }
> >
> > @@ -79,12 +84,12 @@ ngx_http_v3_close_uni_stream(ngx_connect
> > ngx_http_v3_session_t *h3c;
> > ngx_http_v3_uni_stream_t *us;
> >
> > - us = c->data;
> > - h3c = ngx_http_v3_get_session(c);
> > -
> > ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 close stream");
> >
> > - if (us->index >= 0) {
> > + us = c->data;
> > +
> > + if (us && us->index >= 0) {
> > + h3c = ngx_http_v3_get_session(c);
> > h3c->known_streams[us->index] = NULL;
> > }
> >
>
> Is there any difference after switching to ngx_http_v3_close_uni_stream(),
> besides ngx_stat_active now no longer decremented? This itself looks like
> a right change, since ngx_stat_active isn't incremented for uni streams.
ngx_stat_active is indeed a good reason. The other reason is
ngx_http_v3_reset_connection() which is called when closing an HTTP/3 stream
connection. A uni stream connection is also an HTTP/3 stream connection, but
we don't want to call ngx_http_v3_reset_connection() for it. We only
want to call it for request streams. We could add another condition, but it's
easier not to call common http code for uni streams at all.
> BTW, we need additional checks to prevent processing new streams
> after ngx_http_v3_finalize_connection().
Makes sense.
> # HG changeset patch
> # User Sergey Kandaurov <pluknet at nginx.com>
> # Date 1636549164 -10800
> # Wed Nov 10 15:59:24 2021 +0300
> # Branch quic
> # Node ID 924ee879f8befa1ec574d41a3979e5c5c5db8639
> # Parent c6386afdd1552105c18ce5c47b4b5cd6f6de8b88
> QUIC: stop processing new client streams on quic connection error.
>
> diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c
> --- a/src/event/quic/ngx_event_quic_streams.c
> +++ b/src/event/quic/ngx_event_quic_streams.c
> @@ -314,7 +314,7 @@ ngx_quic_create_client_stream(ngx_connec
>
> qc = ngx_quic_get_connection(c);
>
> - if (qc->shutdown) {
> + if (qc->shutdown || qc->error) {
Why not qc->closing?
> return NGX_QUIC_STREAM_GONE;
> }
>
> @@ -385,7 +385,7 @@ ngx_quic_create_client_stream(ngx_connec
> return NULL;
> }
>
> - if (qc->shutdown) {
> + if (qc->shutdown || qc->error) {
> return NGX_QUIC_STREAM_GONE;
> }
> }
>
>
> --
> Sergey Kandaurov
>
> _______________________________________________
> nginx-devel mailing list
> nginx-devel at nginx.org
> http://mailman.nginx.org/mailman/listinfo/nginx-devel
--
Roman Arutyunyan
More information about the nginx-devel
mailing list