From arut at nginx.com Tue Feb 1 13:39:59 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 01 Feb 2022 16:39:59 +0300 Subject: [PATCH] QUIC: stream lingering Message-ID: # HG changeset patch # User Roman Arutyunyan # Date 1643722727 -10800 # Tue Feb 01 16:38:47 2022 +0300 # Branch quic # Node ID db31ae16c1f2050be9c9f6b1f117ab6725b97dd4 # Parent 308ac307b3e6952ef0c5ccf10cc82904c59fa4c3 QUIC: stream lingering. Now ngx_quic_stream_t is decoupled from ngx_connection_t in a way that it can persist after connection is closed by application. During this period, server is expecting stream final size from client for correct flow control. Also, buffered output is sent to client as more flow control credit is granted. diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -303,6 +303,7 @@ ngx_quic_new_connection(ngx_connection_t ctp->active_connection_id_limit = 2; ngx_queue_init(&qc->streams.uninitialized); + ngx_queue_init(&qc->streams.free); qc->streams.recv_max_data = qc->tp.initial_max_data; qc->streams.recv_window = qc->streams.recv_max_data; diff --git a/src/event/quic/ngx_event_quic.h b/src/event/quic/ngx_event_quic.h --- a/src/event/quic/ngx_event_quic.h +++ b/src/event/quic/ngx_event_quic.h @@ -78,11 +78,13 @@ struct ngx_quic_stream_s { uint64_t id; uint64_t acked; uint64_t send_max_data; + uint64_t send_offset; + uint64_t send_final_size; uint64_t recv_max_data; uint64_t recv_offset; uint64_t recv_window; uint64_t recv_last; - uint64_t final_size; + uint64_t recv_final_size; ngx_chain_t *in; ngx_chain_t *out; ngx_uint_t cancelable; /* unsigned cancelable:1; */ diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h --- a/src/event/quic/ngx_event_quic_connection.h +++ b/src/event/quic/ngx_event_quic_connection.h @@ -114,13 +114,16 @@ struct ngx_quic_socket_s { typedef struct { ngx_rbtree_t tree; ngx_rbtree_node_t sentinel; + ngx_queue_t uninitialized; + ngx_queue_t free; uint64_t sent; uint64_t recv_offset; uint64_t recv_window; uint64_t recv_last; uint64_t recv_max_data; + uint64_t send_offset; uint64_t send_max_data; uint64_t server_max_streams_uni; diff --git a/src/event/quic/ngx_event_quic_frames.c b/src/event/quic/ngx_event_quic_frames.c --- a/src/event/quic/ngx_event_quic_frames.c +++ b/src/event/quic/ngx_event_quic_frames.c @@ -391,6 +391,10 @@ ngx_quic_split_frame(ngx_connection_t *c return NGX_ERROR; } + if (f->type == NGX_QUIC_FT_STREAM) { + f->u.stream.fin = 0; + } + ngx_queue_insert_after(&f->queue, &nf->queue); return NGX_OK; diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c --- a/src/event/quic/ngx_event_quic_streams.c +++ b/src/event/quic/ngx_event_quic_streams.c @@ -13,6 +13,8 @@ #define NGX_QUIC_STREAM_GONE (void *) -1 +static ngx_int_t ngx_quic_do_reset_stream(ngx_quic_stream_t *qs, + ngx_uint_t err); static ngx_int_t ngx_quic_shutdown_stream_send(ngx_connection_t *c); static ngx_int_t ngx_quic_shutdown_stream_recv(ngx_connection_t *c); static ngx_quic_stream_t *ngx_quic_get_stream(ngx_connection_t *c, uint64_t id); @@ -28,11 +30,13 @@ static ssize_t ngx_quic_stream_send(ngx_ size_t size); static ngx_chain_t *ngx_quic_stream_send_chain(ngx_connection_t *c, ngx_chain_t *in, off_t limit); -static size_t ngx_quic_max_stream_flow(ngx_connection_t *c); +static ngx_int_t ngx_quic_stream_flush(ngx_quic_stream_t *qs); +static size_t ngx_quic_max_stream_flow(ngx_quic_stream_t *qs); static void ngx_quic_stream_cleanup_handler(void *data); -static ngx_int_t ngx_quic_control_flow(ngx_connection_t *c, uint64_t last); -static ngx_int_t ngx_quic_update_flow(ngx_connection_t *c, uint64_t last); -static ngx_int_t ngx_quic_update_max_stream_data(ngx_connection_t *c); +static ngx_int_t ngx_quic_close_stream(ngx_quic_stream_t *qs); +static ngx_int_t ngx_quic_control_flow(ngx_quic_stream_t *qs, uint64_t last); +static ngx_int_t ngx_quic_update_flow(ngx_quic_stream_t *qs, uint64_t last); +static ngx_int_t ngx_quic_update_max_stream_data(ngx_quic_stream_t *qs); static ngx_int_t ngx_quic_update_max_data(ngx_connection_t *c); static void ngx_quic_set_event(ngx_event_t *ev); @@ -186,15 +190,20 @@ ngx_quic_close_streams(ngx_connection_t ns = 0; #endif - for (node = ngx_rbtree_min(tree->root, tree->sentinel); - node; - node = ngx_rbtree_next(tree, node)) - { + node = ngx_rbtree_min(tree->root, tree->sentinel); + + while (node) { qs = (ngx_quic_stream_t *) node; + node = ngx_rbtree_next(tree, node); qs->recv_state = NGX_QUIC_STREAM_RECV_RESET_RECVD; qs->send_state = NGX_QUIC_STREAM_SEND_RESET_SENT; + if (qs->connection == NULL) { + ngx_quic_close_stream(qs); + continue; + } + ngx_quic_set_event(qs->connection->read); ngx_quic_set_event(qs->connection->write); @@ -213,13 +222,17 @@ ngx_quic_close_streams(ngx_connection_t ngx_int_t ngx_quic_reset_stream(ngx_connection_t *c, ngx_uint_t err) { + return ngx_quic_do_reset_stream(c->quic, err); +} + + +static ngx_int_t +ngx_quic_do_reset_stream(ngx_quic_stream_t *qs, ngx_uint_t err) +{ ngx_connection_t *pc; ngx_quic_frame_t *frame; - ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; - qs = c->quic; - if (qs->send_state == NGX_QUIC_STREAM_SEND_DATA_RECVD || qs->send_state == NGX_QUIC_STREAM_SEND_RESET_SENT || qs->send_state == NGX_QUIC_STREAM_SEND_RESET_RECVD) @@ -228,10 +241,14 @@ ngx_quic_reset_stream(ngx_connection_t * } qs->send_state = NGX_QUIC_STREAM_SEND_RESET_SENT; + qs->send_final_size = qs->send_offset; pc = qs->parent; qc = ngx_quic_get_connection(pc); + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL reset", qs->id); + frame = ngx_quic_alloc_frame(pc); if (frame == NULL) { return NGX_ERROR; @@ -241,10 +258,13 @@ ngx_quic_reset_stream(ngx_connection_t * frame->type = NGX_QUIC_FT_RESET_STREAM; frame->u.reset_stream.id = qs->id; frame->u.reset_stream.error_code = err; - frame->u.reset_stream.final_size = c->sent; + frame->u.reset_stream.final_size = qs->send_offset; ngx_quic_queue_frame(qc, frame); + ngx_quic_free_chain(pc, qs->out); + qs->out = NULL; + return NGX_OK; } @@ -271,10 +291,7 @@ ngx_quic_shutdown_stream(ngx_connection_ static ngx_int_t ngx_quic_shutdown_stream_send(ngx_connection_t *c) { - ngx_connection_t *pc; - ngx_quic_frame_t *frame; - ngx_quic_stream_t *qs; - ngx_quic_connection_t *qc; + ngx_quic_stream_t *qs; qs = c->quic; @@ -284,32 +301,13 @@ ngx_quic_shutdown_stream_send(ngx_connec return NGX_OK; } - qs->send_state = NGX_QUIC_STREAM_SEND_DATA_SENT; - - pc = qs->parent; - qc = ngx_quic_get_connection(pc); + qs->send_state = NGX_QUIC_STREAM_SEND_SEND; + qs->send_final_size = c->sent; - frame = ngx_quic_alloc_frame(pc); - if (frame == NULL) { - return NGX_ERROR; - } - - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, qs->parent->log, 0, "quic stream id:0x%xL send shutdown", qs->id); - frame->level = ssl_encryption_application; - frame->type = NGX_QUIC_FT_STREAM; - frame->u.stream.off = 1; - frame->u.stream.len = 1; - frame->u.stream.fin = 1; - - frame->u.stream.stream_id = qs->id; - frame->u.stream.offset = c->sent; - frame->u.stream.length = 0; - - ngx_quic_queue_frame(qc, frame); - - return NGX_OK; + return ngx_quic_stream_flush(qs); } @@ -341,7 +339,7 @@ ngx_quic_shutdown_stream_recv(ngx_connec return NGX_ERROR; } - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, "quic stream id:0x%xL recv shutdown", qs->id); frame->level = ssl_encryption_application; @@ -591,6 +589,7 @@ ngx_quic_create_stream(ngx_connection_t { ngx_log_t *log; ngx_pool_t *pool; + ngx_queue_t *q; ngx_connection_t *sc; ngx_quic_stream_t *qs; ngx_pool_cleanup_t *cln; @@ -601,25 +600,41 @@ ngx_quic_create_stream(ngx_connection_t qc = ngx_quic_get_connection(c); - pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, c->log); - if (pool == NULL) { - return NULL; + if (!ngx_queue_empty(&qc->streams.free)) { + q = ngx_queue_head(&qc->streams.free); + qs = ngx_queue_data(q, ngx_quic_stream_t, queue); + ngx_queue_remove(&qs->queue); + + } else { + /* + * the number of streams is limited by transport + * parameters and application requirements + */ + + qs = ngx_palloc(c->pool, sizeof(ngx_quic_stream_t)); + if (qs == NULL) { + return NULL; + } } - qs = ngx_pcalloc(pool, sizeof(ngx_quic_stream_t)); - if (qs == NULL) { - ngx_destroy_pool(pool); - return NULL; - } + ngx_memzero(qs, sizeof(ngx_quic_stream_t)); qs->node.key = id; qs->parent = c; qs->id = id; - qs->final_size = (uint64_t) -1; + qs->send_final_size = (uint64_t) -1; + qs->recv_final_size = (uint64_t) -1; + + pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, c->log); + if (pool == NULL) { + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); + return NULL; + } log = ngx_palloc(pool, sizeof(ngx_log_t)); if (log == NULL) { ngx_destroy_pool(pool); + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); return NULL; } @@ -629,6 +644,7 @@ ngx_quic_create_stream(ngx_connection_t sc = ngx_get_connection(c->fd, log); if (sc == NULL) { ngx_destroy_pool(pool); + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); return NULL; } @@ -697,6 +713,7 @@ ngx_quic_create_stream(ngx_connection_t if (cln == NULL) { ngx_close_connection(sc); ngx_destroy_pool(pool); + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); return NULL; } @@ -737,7 +754,7 @@ ngx_quic_stream_recv(ngx_connection_t *c return NGX_ERROR; } - ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, "quic stream id:0x%xL recv buf:%uz", qs->id, size); if (size == 0) { @@ -763,7 +780,7 @@ ngx_quic_stream_recv(ngx_connection_t *c rev->ready = 0; if (qs->recv_state == NGX_QUIC_STREAM_RECV_SIZE_KNOWN - && qs->recv_offset == qs->final_size) + && qs->recv_offset == qs->recv_final_size) { qs->recv_state = NGX_QUIC_STREAM_RECV_DATA_READ; } @@ -781,7 +798,7 @@ ngx_quic_stream_recv(ngx_connection_t *c ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic stream id:0x%xL recv len:%z", qs->id, len); - if (ngx_quic_update_flow(c, qs->recv_offset + len) != NGX_OK) { + if (ngx_quic_update_flow(qs, qs->recv_offset + len) != NGX_OK) { return NGX_ERROR; } @@ -822,9 +839,7 @@ ngx_quic_stream_send_chain(ngx_connectio off_t flow; size_t n; ngx_event_t *wev; - ngx_chain_t *out; ngx_connection_t *pc; - ngx_quic_frame_t *frame; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; @@ -842,7 +857,8 @@ ngx_quic_stream_send_chain(ngx_connectio qs->send_state = NGX_QUIC_STREAM_SEND_SEND; - flow = ngx_quic_max_stream_flow(c); + flow = qs->acked + qc->conf->stream_buffer_size - c->sent; + if (flow == 0) { wev->ready = 0; return in; @@ -852,37 +868,15 @@ ngx_quic_stream_send_chain(ngx_connectio limit = flow; } - in = ngx_quic_write_chain(pc, &qs->out, in, limit, 0, &n); + in = ngx_quic_write_chain(pc, &qs->out, in, limit, + c->sent - qs->send_offset, &n); if (in == NGX_CHAIN_ERROR) { return NGX_CHAIN_ERROR; } - out = ngx_quic_read_chain(pc, &qs->out, n); - if (out == NGX_CHAIN_ERROR) { - return NGX_CHAIN_ERROR; - } - - frame = ngx_quic_alloc_frame(pc); - if (frame == NULL) { - return NGX_CHAIN_ERROR; - } - - frame->level = ssl_encryption_application; - frame->type = NGX_QUIC_FT_STREAM; - frame->data = out; - frame->u.stream.off = 1; - frame->u.stream.len = 1; - frame->u.stream.fin = 0; - - frame->u.stream.stream_id = qs->id; - frame->u.stream.offset = c->sent; - frame->u.stream.length = n; - c->sent += n; qc->streams.sent += n; - ngx_quic_queue_frame(qc, frame); - if (in) { wev->ready = 0; } @@ -890,59 +884,133 @@ ngx_quic_stream_send_chain(ngx_connectio ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic send_chain sent:%uz", n); + if (ngx_quic_stream_flush(qs) != NGX_OK) { + return NGX_CHAIN_ERROR; + } + return in; } +static ngx_int_t +ngx_quic_stream_flush(ngx_quic_stream_t *qs) +{ + size_t limit, len; + ngx_uint_t last; + ngx_chain_t *out, *cl; + ngx_quic_frame_t *frame; + ngx_connection_t *pc; + ngx_quic_connection_t *qc; + + if (qs->send_state != NGX_QUIC_STREAM_SEND_SEND) { + return NGX_OK; + } + + pc = qs->parent; + qc = ngx_quic_get_connection(pc); + + limit = ngx_quic_max_stream_flow(qs); + last = 0; + + out = ngx_quic_read_chain(pc, &qs->out, limit); + if (out == NGX_CHAIN_ERROR) { + return NGX_ERROR; + } + + len = 0; + last = 0; + + for (cl = out; cl; cl = cl->next) { + len += cl->buf->last - cl->buf->pos; + } + + if (qs->send_final_size != (uint64_t) -1 + && qs->send_final_size == qs->send_offset + len) + { + qs->send_state = NGX_QUIC_STREAM_SEND_DATA_SENT; + last = 1; + } + + if (len == 0 && !last) { + return NGX_OK; + } + + frame = ngx_quic_alloc_frame(pc); + if (frame == NULL) { + return NGX_ERROR; + } + + frame = ngx_quic_alloc_frame(pc); + if (frame == NULL) { + return NGX_ERROR; + } + + frame->level = ssl_encryption_application; + frame->type = NGX_QUIC_FT_STREAM; + frame->data = out; + + frame->u.stream.off = 1; + frame->u.stream.len = 1; + frame->u.stream.fin = last; + + frame->u.stream.stream_id = qs->id; + frame->u.stream.offset = qs->send_offset; + frame->u.stream.length = len; + + ngx_quic_queue_frame(qc, frame); + + qs->send_offset += len; + qc->streams.send_offset += len; + + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL flush len:%uz last:%ui", + qs->id, len, last); + + if (qs->connection == NULL) { + return ngx_quic_close_stream(qs); + } + + return NGX_OK; +} + + static size_t -ngx_quic_max_stream_flow(ngx_connection_t *c) +ngx_quic_max_stream_flow(ngx_quic_stream_t *qs) { size_t size; - uint64_t sent, unacked; - ngx_quic_stream_t *qs; + ngx_connection_t *pc; ngx_quic_connection_t *qc; - qs = c->quic; - qc = ngx_quic_get_connection(qs->parent); - + pc = qs->parent; + qc = ngx_quic_get_connection(pc); size = qc->conf->stream_buffer_size; - sent = c->sent; - unacked = sent - qs->acked; if (qc->streams.send_max_data == 0) { qc->streams.send_max_data = qc->ctp.initial_max_data; } - if (unacked >= size) { - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic send flow hit buffer size"); - return 0; - } - - size -= unacked; - - if (qc->streams.sent >= qc->streams.send_max_data) { - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic send flow hit MAX_DATA"); + if (qc->streams.send_offset == qc->streams.send_max_data) { + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL flow:0 MAX_DATA", qs->id); return 0; } - if (qc->streams.sent + size > qc->streams.send_max_data) { - size = qc->streams.send_max_data - qc->streams.sent; + if (qc->streams.send_offset + size > qc->streams.send_max_data) { + size = qc->streams.send_max_data - qc->streams.send_offset; } - if (sent >= qs->send_max_data) { - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic send flow hit MAX_STREAM_DATA"); + if (qs->send_offset == qs->send_max_data) { + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL flow:0 MAX_STREAM_DATA", qs->id); return 0; } - if (sent + size > qs->send_max_data) { - size = qs->send_max_data - sent; + if (qs->send_offset + size > qs->send_max_data) { + size = qs->send_max_data - qs->send_offset; } - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic send flow:%uz", size); + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL send flow:%uz", qs->id, size); return size; } @@ -953,40 +1021,65 @@ ngx_quic_stream_cleanup_handler(void *da { ngx_connection_t *c = data; + ngx_quic_stream_t *qs; + + qs = c->quic; + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, qs->parent->log, 0, + "quic stream id:0x%xL cleanup", qs->id); + + if (ngx_quic_shutdown_stream(c, NGX_RDWR_SHUTDOWN) != NGX_OK) { + ngx_quic_close_connection(c, NGX_ERROR); + return; + } + + qs->connection = NULL; + + if (ngx_quic_close_stream(qs) != NGX_OK) { + ngx_quic_close_connection(c, NGX_ERROR); + return; + } +} + + +static ngx_int_t +ngx_quic_close_stream(ngx_quic_stream_t *qs) +{ ngx_connection_t *pc; ngx_quic_frame_t *frame; - ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; - qs = c->quic; pc = qs->parent; qc = ngx_quic_get_connection(pc); - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic stream id:0x%xL cleanup", qs->id); + if (!qc->closing) { + if (qs->recv_state == NGX_QUIC_STREAM_RECV_RECV + || qs->send_state == NGX_QUIC_STREAM_SEND_READY + || qs->send_state == NGX_QUIC_STREAM_SEND_SEND) + { + return NGX_OK; + } + } + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL close", qs->id); + + ngx_quic_free_chain(pc, qs->in); + ngx_quic_free_chain(pc, qs->out); ngx_rbtree_delete(&qc->streams.tree, &qs->node); - ngx_quic_free_chain(pc, qs->in); - ngx_quic_free_chain(pc, qs->out); + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); if (qc->closing) { /* schedule handler call to continue ngx_quic_close_connection() */ ngx_post_event(pc->read, &ngx_posted_events); - return; + return NGX_OK; } - if (qc->error) { - goto done; - } - - (void) ngx_quic_shutdown_stream(c, NGX_RDWR_SHUTDOWN); - - (void) ngx_quic_update_flow(c, qs->recv_last); - if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0) { frame = ngx_quic_alloc_frame(pc); if (frame == NULL) { - goto done; + return NGX_ERROR; } frame->level = ssl_encryption_application; @@ -1004,13 +1097,11 @@ ngx_quic_stream_cleanup_handler(void *da ngx_quic_queue_frame(qc, frame); } -done: - - (void) ngx_quic_output(pc); - if (qc->shutdown) { ngx_post_event(pc->read, &ngx_posted_events); } + + return NGX_OK; } @@ -1019,7 +1110,6 @@ ngx_quic_handle_stream_frame(ngx_connect ngx_quic_frame_t *frame) { uint64_t last; - ngx_connection_t *sc; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; ngx_quic_stream_frame_t *f; @@ -1047,19 +1137,17 @@ ngx_quic_handle_stream_frame(ngx_connect return NGX_OK; } - sc = qs->connection; - if (qs->recv_state != NGX_QUIC_STREAM_RECV_RECV && qs->recv_state != NGX_QUIC_STREAM_RECV_SIZE_KNOWN) { return NGX_OK; } - if (ngx_quic_control_flow(sc, last) != NGX_OK) { + if (ngx_quic_control_flow(qs, last) != NGX_OK) { return NGX_ERROR; } - if (qs->final_size != (uint64_t) -1 && last > qs->final_size) { + if (qs->recv_final_size != (uint64_t) -1 && last > qs->recv_final_size) { qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; return NGX_ERROR; } @@ -1074,7 +1162,8 @@ ngx_quic_handle_stream_frame(ngx_connect } if (f->fin) { - if (qs->final_size != (uint64_t) -1 && qs->final_size != last) { + if (qs->recv_final_size != (uint64_t) -1 && qs->recv_final_size != last) + { qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; return NGX_ERROR; } @@ -1084,7 +1173,7 @@ ngx_quic_handle_stream_frame(ngx_connect return NGX_ERROR; } - qs->final_size = last; + qs->recv_final_size = last; qs->recv_state = NGX_QUIC_STREAM_RECV_SIZE_KNOWN; } @@ -1095,8 +1184,12 @@ ngx_quic_handle_stream_frame(ngx_connect return NGX_ERROR; } + if (qs->connection == NULL) { + return ngx_quic_close_stream(qs); + } + if (f->offset == qs->recv_offset) { - ngx_quic_set_event(sc->read); + ngx_quic_set_event(qs->connection->read); } return NGX_OK; @@ -1119,20 +1212,26 @@ ngx_quic_handle_max_data_frame(ngx_conne return NGX_OK; } - if (tree->root != tree->sentinel - && qc->streams.sent >= qc->streams.send_max_data) + if (tree->root == tree->sentinel + || qc->streams.send_offset < qc->streams.send_max_data) { - - for (node = ngx_rbtree_min(tree->root, tree->sentinel); - node; - node = ngx_rbtree_next(tree, node)) - { - qs = (ngx_quic_stream_t *) node; - ngx_quic_set_event(qs->connection->write); - } + /* not blocked on MAX_DATA */ + qc->streams.send_max_data = f->max_data; + return NGX_OK; } qc->streams.send_max_data = f->max_data; + node = ngx_rbtree_min(tree->root, tree->sentinel); + + while (node && qc->streams.send_offset < qc->streams.send_max_data) { + + qs = (ngx_quic_stream_t *) node; + node = ngx_rbtree_next(tree, node); + + if (ngx_quic_stream_flush(qs) != NGX_OK) { + return NGX_ERROR; + } + } return NGX_OK; } @@ -1180,7 +1279,7 @@ ngx_quic_handle_stream_data_blocked_fram return NGX_OK; } - return ngx_quic_update_max_stream_data(qs->connection); + return ngx_quic_update_max_stream_data(qs); } @@ -1188,7 +1287,6 @@ ngx_int_t ngx_quic_handle_max_stream_data_frame(ngx_connection_t *c, ngx_quic_header_t *pkt, ngx_quic_max_stream_data_frame_t *f) { - uint64_t sent; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; @@ -1215,15 +1313,15 @@ ngx_quic_handle_max_stream_data_frame(ng return NGX_OK; } - sent = qs->connection->sent; - - if (sent >= qs->send_max_data) { - ngx_quic_set_event(qs->connection->write); + if (qs->send_offset < qs->send_max_data) { + /* not blocked on MAX_STREAM_DATA */ + qs->send_max_data = f->limit; + return NGX_OK; } qs->send_max_data = f->limit; - return NGX_OK; + return ngx_quic_stream_flush(qs); } @@ -1231,7 +1329,6 @@ ngx_int_t ngx_quic_handle_reset_stream_frame(ngx_connection_t *c, ngx_quic_header_t *pkt, ngx_quic_reset_stream_frame_t *f) { - ngx_connection_t *sc; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; @@ -1262,13 +1359,13 @@ ngx_quic_handle_reset_stream_frame(ngx_c qs->recv_state = NGX_QUIC_STREAM_RECV_RESET_RECVD; - sc = qs->connection; - - if (ngx_quic_control_flow(sc, f->final_size) != NGX_OK) { + if (ngx_quic_control_flow(qs, f->final_size) != NGX_OK) { return NGX_ERROR; } - if (qs->final_size != (uint64_t) -1 && qs->final_size != f->final_size) { + if (qs->recv_final_size != (uint64_t) -1 + && qs->recv_final_size != f->final_size) + { qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; return NGX_ERROR; } @@ -1278,12 +1375,16 @@ ngx_quic_handle_reset_stream_frame(ngx_c return NGX_ERROR; } - qs->final_size = f->final_size; + qs->recv_final_size = f->final_size; - if (ngx_quic_update_flow(sc, qs->final_size) != NGX_OK) { + if (ngx_quic_update_flow(qs, qs->recv_final_size) != NGX_OK) { return NGX_ERROR; } + if (qs->connection == NULL) { + return ngx_quic_close_stream(qs); + } + ngx_quic_set_event(qs->connection->read); return NGX_OK; @@ -1316,10 +1417,14 @@ ngx_quic_handle_stop_sending_frame(ngx_c return NGX_OK; } - if (ngx_quic_reset_stream(qs->connection, f->error_code) != NGX_OK) { + if (ngx_quic_do_reset_stream(qs, f->error_code) != NGX_OK) { return NGX_ERROR; } + if (qs->connection == NULL) { + return ngx_quic_close_stream(qs); + } + ngx_quic_set_event(qs->connection->write); return NGX_OK; @@ -1369,30 +1474,37 @@ ngx_quic_handle_stream_ack(ngx_connectio return; } + if (qs->connection == NULL) { + qs->acked += f->u.stream.length; + return; + } + sent = qs->connection->sent; unacked = sent - qs->acked; + qs->acked += f->u.stream.length; - if (unacked >= qc->conf->stream_buffer_size) { - ngx_quic_set_event(qs->connection->write); + ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic stream id:0x%xL ack len:%uL acked:%uL unacked:%uL", + qs->id, f->u.stream.length, qs->acked, sent - qs->acked); + + if (unacked != qc->conf->stream_buffer_size) { + /* not blocked on buffer size */ + return; } - qs->acked += f->u.stream.length; - - ngx_log_debug3(NGX_LOG_DEBUG_EVENT, qs->connection->log, 0, - "quic stream ack len:%uL acked:%uL unacked:%uL", - f->u.stream.length, qs->acked, sent - qs->acked); + ngx_quic_set_event(qs->connection->write); } static ngx_int_t -ngx_quic_control_flow(ngx_connection_t *c, uint64_t last) +ngx_quic_control_flow(ngx_quic_stream_t *qs, uint64_t last) { uint64_t len; - ngx_quic_stream_t *qs; + ngx_connection_t *pc; ngx_quic_connection_t *qc; - qs = c->quic; - qc = ngx_quic_get_connection(qs->parent); + pc = qs->parent; + qc = ngx_quic_get_connection(pc); if (last <= qs->recv_last) { return NGX_OK; @@ -1400,9 +1512,9 @@ ngx_quic_control_flow(ngx_connection_t * len = last - qs->recv_last; - ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic flow control msd:%uL/%uL md:%uL/%uL", - last, qs->recv_max_data, qc->streams.recv_last + len, + ngx_log_debug5(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL flow control msd:%uL/%uL md:%uL/%uL", + qs->id, last, qs->recv_max_data, qc->streams.recv_last + len, qc->streams.recv_max_data); qs->recv_last += len; @@ -1426,14 +1538,12 @@ ngx_quic_control_flow(ngx_connection_t * static ngx_int_t -ngx_quic_update_flow(ngx_connection_t *c, uint64_t last) +ngx_quic_update_flow(ngx_quic_stream_t *qs, uint64_t last) { uint64_t len; ngx_connection_t *pc; - ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; - qs = c->quic; pc = qs->parent; qc = ngx_quic_get_connection(pc); @@ -1443,13 +1553,13 @@ ngx_quic_update_flow(ngx_connection_t *c len = last - qs->recv_offset; - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic flow update %uL", last); + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL flow update %uL", qs->id, last); qs->recv_offset += len; if (qs->recv_max_data <= qs->recv_offset + qs->recv_window / 2) { - if (ngx_quic_update_max_stream_data(c) != NGX_OK) { + if (ngx_quic_update_max_stream_data(qs) != NGX_OK) { return NGX_ERROR; } } @@ -1469,15 +1579,13 @@ ngx_quic_update_flow(ngx_connection_t *c static ngx_int_t -ngx_quic_update_max_stream_data(ngx_connection_t *c) +ngx_quic_update_max_stream_data(ngx_quic_stream_t *qs) { uint64_t recv_max_data; ngx_connection_t *pc; ngx_quic_frame_t *frame; - ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; - qs = c->quic; pc = qs->parent; qc = ngx_quic_get_connection(pc); @@ -1493,8 +1601,9 @@ ngx_quic_update_max_stream_data(ngx_conn qs->recv_max_data = recv_max_data; - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic flow update msd:%uL", qs->recv_max_data); + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL flow update msd:%uL", + qs->id, qs->recv_max_data); frame = ngx_quic_alloc_frame(pc); if (frame == NULL) { diff --git a/src/http/v3/ngx_http_v3_uni.c b/src/http/v3/ngx_http_v3_uni.c --- a/src/http/v3/ngx_http_v3_uni.c +++ b/src/http/v3/ngx_http_v3_uni.c @@ -295,8 +295,6 @@ ngx_http_v3_uni_dummy_write_handler(ngx_ } -/* XXX async & buffered stream writes */ - ngx_connection_t * ngx_http_v3_create_push_stream(ngx_connection_t *c, uint64_t push_id) { From mdounin at mdounin.ru Tue Feb 1 14:48:18 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 01 Feb 2022 14:48:18 +0000 Subject: [nginx] Cache: fixed race in ngx_http_file_cache_forced_expire(). Message-ID: details: https://hg.nginx.org/nginx/rev/dd718d1cef3c branches: changeset: 8005:dd718d1cef3c user: Maxim Dounin date: Tue Feb 01 16:29:28 2022 +0300 description: Cache: fixed race in ngx_http_file_cache_forced_expire(). During configuration reload two cache managers might exist for a short time. If both tried to delete the same cache node, the "ignore long locked inactive cache entry" alert appeared in logs. Additionally, ngx_http_file_cache_forced_expire() might be also called by worker processes, with similar results. Fix is to ignore cache nodes being deleted, similarly to how it is done in ngx_http_file_cache_expire() since 3755:76e3a93821b1. This was somehow missed in 7002:ab199f0eb8e8, when ignoring long locked cache entries was introduced in ngx_http_file_cache_forced_expire(). diffstat: src/http/ngx_http_file_cache.c | 5 +++++ 1 files changed, 5 insertions(+), 0 deletions(-) diffs (15 lines): diff -r c0a432c0301b -r dd718d1cef3c src/http/ngx_http_file_cache.c --- a/src/http/ngx_http_file_cache.c Wed Jan 26 20:40:00 2022 +0300 +++ b/src/http/ngx_http_file_cache.c Tue Feb 01 16:29:28 2022 +0300 @@ -1756,6 +1756,11 @@ ngx_http_file_cache_forced_expire(ngx_ht break; } + if (fcn->deleting) { + wait = 1; + break; + } + p = ngx_hex_dump(key, (u_char *) &fcn->node.key, sizeof(ngx_rbtree_key_t)); len = NGX_HTTP_CACHE_KEY_LEN - sizeof(ngx_rbtree_key_t); From pluknet at nginx.com Tue Feb 1 15:24:47 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 1 Feb 2022 18:24:47 +0300 Subject: [PATCH] HTTP/2: made it possible to flush response headers (ticket #1743) In-Reply-To: References: Message-ID: > On 27 Jan 2022, at 04:24, Maxim Dounin wrote: > > # HG changeset patch > # User Maxim Dounin > # Date 1643225036 -10800 > # Wed Jan 26 22:23:56 2022 +0300 > # Node ID f76e0cf8e525996563e5f0092fa48a4fee873e93 > # Parent 56ead48cfe885e8b89b30017459bf621b21d95f5 > HTTP/2: made it possible to flush response headers (ticket #1743). > > Response headers can be buffered in the SSL buffer. But stream's fake > connection buffered flag did not reflect this, so any attempts to flush > the buffer without sending additional data were stopped by the write filter. > > It does not seem to be possible to reflect this in fc->buffered though, as > we never known if main connection's c->buffered corresponds to the particular > stream or not. As such, fc->buffered might prevent request finalization > due to sending data on some other stream. > > Fix is to implement handling of flush buffers when c->need_last_buf is set, > similarly to the existing last buffer handling. > > diff --git a/src/http/ngx_http_write_filter_module.c b/src/http/ngx_http_write_filter_module.c > --- a/src/http/ngx_http_write_filter_module.c > +++ b/src/http/ngx_http_write_filter_module.c > @@ -227,7 +227,7 @@ ngx_http_write_filter(ngx_http_request_t > > if (size == 0 > && !(c->buffered & NGX_LOWLEVEL_BUFFERED) > - && !(last && c->need_last_buf)) > + && !((last || flush) && c->need_last_buf)) > { > if (last || flush || sync) { > for (cl = r->out; cl; /* void */) { > diff --git a/src/http/v2/ngx_http_v2_filter_module.c b/src/http/v2/ngx_http_v2_filter_module.c > --- a/src/http/v2/ngx_http_v2_filter_module.c > +++ b/src/http/v2/ngx_http_v2_filter_module.c > @@ -1815,7 +1815,11 @@ ngx_http_v2_waiting_queue(ngx_http_v2_co > static ngx_inline ngx_int_t > ngx_http_v2_filter_send(ngx_connection_t *fc, ngx_http_v2_stream_t *stream) > { > - if (stream->queued == 0) { > + ngx_connection_t *c; > + > + c = stream->connection->connection; > + > + if (stream->queued == 0 && !c->buffered) { > fc->buffered &= ~NGX_HTTP_V2_BUFFERED; > return NGX_OK; > } > Nitpicking: Shouldn't NGX_HTTP_V2_BUFFERED still be cleared regardless? if (stream->queued == 0) { fc->buffered &= ~NGX_HTTP_V2_BUFFERED; if (!c->buffered) { return NGX_OK; } } It doesn't seem to change anything, at a glance. However, this would save a contract that (fc->buffered & NGX_HTTP_V2_BUFFERED) is influenced by stream->queued. Otherwise, looks good. -- Sergey Kandaurov From mdounin at mdounin.ru Wed Feb 2 03:26:53 2022 From: mdounin at mdounin.ru (=?utf-8?q?Maxim_Dounin?=) Date: Wed, 02 Feb 2022 06:26:53 +0300 Subject: [PATCH] HTTP/2: fixed closed_nodes overflow (ticket #1708) Message-ID: <0b272d5fd6bd0dcca6dc.1643772413@vm-bsd.mdounin.ru> # HG changeset patch # User Maxim Dounin # Date 1643772371 -10800 # Wed Feb 02 06:26:11 2022 +0300 # Node ID 0b272d5fd6bd0dcca6dcca8f27f8fcde33119159 # Parent dd718d1cef3c2ca2754a21176be3318b00be9e62 HTTP/2: fixed closed_nodes overflow (ticket #1708). With large http2_max_concurrent_streams or http2_max_concurrent_pushes, more than 255 ngx_http_v2_node_t structures might be allocated, eventually leading to h2c->closed_nodes overflow when closing corresponding streams. This will in turn result in additional allocations in ngx_http_v2_get_node_by_id(). While mostly harmless, it can result in excessive memory usage by a HTTP/2 connection, notably in configurations with many keepalive_requests allowed. Fix is to use ngx_uint_t for h2c->closed_nodes instead of unsigned:8. diff --git a/src/http/v2/ngx_http_v2.h b/src/http/v2/ngx_http_v2.h --- a/src/http/v2/ngx_http_v2.h +++ b/src/http/v2/ngx_http_v2.h @@ -153,12 +153,12 @@ struct ngx_http_v2_connection_s { ngx_queue_t dependencies; ngx_queue_t closed; + ngx_uint_t closed_nodes; ngx_uint_t last_sid; ngx_uint_t last_push; time_t lingering_time; - unsigned closed_nodes:8; unsigned settings_ack:1; unsigned table_update:1; unsigned blocked:1; From mdounin at mdounin.ru Wed Feb 2 04:02:51 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 2 Feb 2022 07:02:51 +0300 Subject: [PATCH] HTTP/2: made it possible to flush response headers (ticket #1743) In-Reply-To: References: Message-ID: Hello! On Tue, Feb 01, 2022 at 06:24:47PM +0300, Sergey Kandaurov wrote: > > On 27 Jan 2022, at 04:24, Maxim Dounin wrote: > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1643225036 -10800 > > # Wed Jan 26 22:23:56 2022 +0300 > > # Node ID f76e0cf8e525996563e5f0092fa48a4fee873e93 > > # Parent 56ead48cfe885e8b89b30017459bf621b21d95f5 > > HTTP/2: made it possible to flush response headers (ticket #1743). > > > > Response headers can be buffered in the SSL buffer. But stream's fake > > connection buffered flag did not reflect this, so any attempts to flush > > the buffer without sending additional data were stopped by the write filter. > > > > It does not seem to be possible to reflect this in fc->buffered though, as > > we never known if main connection's c->buffered corresponds to the particular > > stream or not. As such, fc->buffered might prevent request finalization > > due to sending data on some other stream. > > > > Fix is to implement handling of flush buffers when c->need_last_buf is set, > > similarly to the existing last buffer handling. > > > > diff --git a/src/http/ngx_http_write_filter_module.c b/src/http/ngx_http_write_filter_module.c > > --- a/src/http/ngx_http_write_filter_module.c > > +++ b/src/http/ngx_http_write_filter_module.c > > @@ -227,7 +227,7 @@ ngx_http_write_filter(ngx_http_request_t > > > > if (size == 0 > > && !(c->buffered & NGX_LOWLEVEL_BUFFERED) > > - && !(last && c->need_last_buf)) > > + && !((last || flush) && c->need_last_buf)) > > { > > if (last || flush || sync) { > > for (cl = r->out; cl; /* void */) { > > diff --git a/src/http/v2/ngx_http_v2_filter_module.c b/src/http/v2/ngx_http_v2_filter_module.c > > --- a/src/http/v2/ngx_http_v2_filter_module.c > > +++ b/src/http/v2/ngx_http_v2_filter_module.c > > @@ -1815,7 +1815,11 @@ ngx_http_v2_waiting_queue(ngx_http_v2_co > > static ngx_inline ngx_int_t > > ngx_http_v2_filter_send(ngx_connection_t *fc, ngx_http_v2_stream_t *stream) > > { > > - if (stream->queued == 0) { > > + ngx_connection_t *c; > > + > > + c = stream->connection->connection; > > + > > + if (stream->queued == 0 && !c->buffered) { > > fc->buffered &= ~NGX_HTTP_V2_BUFFERED; > > return NGX_OK; > > } > > > > Nitpicking: > Shouldn't NGX_HTTP_V2_BUFFERED still be cleared regardless? > > if (stream->queued == 0) { > fc->buffered &= ~NGX_HTTP_V2_BUFFERED; > > if (!c->buffered) { > return NGX_OK; > } > } > > It doesn't seem to change anything, at a glance. > However, this would save a contract that > (fc->buffered & NGX_HTTP_V2_BUFFERED) is influenced by stream->queued. No, it shouldn't. The fc->buffered is to be set before returning from ngx_http_v2_filter_send(), and it is properly set be the following code. > Otherwise, looks good. As discussed privately, it might be good idea to keep the code in line with UDP handling in the stream module. Here is an updated patch which introduces the c->need_flush_buf flag, and uses this flag instead of the c->type checking in the stream module. # HG changeset patch # User Maxim Dounin # Date 1643773980 -10800 # Wed Feb 02 06:53:00 2022 +0300 # Node ID 3a695d5d222262cd57c76124d7d0b3e3863a86c6 # Parent dd718d1cef3c2ca2754a21176be3318b00be9e62 HTTP/2: made it possible to flush response headers (ticket #1743). Response headers can be buffered in the SSL buffer. But stream's fake connection buffered flag did not reflect this, so any attempts to flush the buffer without sending additional data were stopped by the write filter. It does not seem to be possible to reflect this in fc->buffered though, as we never known if main connection's c->buffered corresponds to the particular stream or not. As such, fc->buffered might prevent request finalization due to sending data on some other stream. Fix is to implement handling of flush buffers when the c->need_flush_buf flag is set, similarly to the existing last buffer handling. The same flag is now used for UDP sockets in the stream module instead of explicit checking of c->type. diff --git a/src/core/ngx_connection.h b/src/core/ngx_connection.h --- a/src/core/ngx_connection.h +++ b/src/core/ngx_connection.h @@ -184,6 +184,7 @@ struct ngx_connection_s { unsigned tcp_nopush:2; /* ngx_connection_tcp_nopush_e */ unsigned need_last_buf:1; + unsigned need_flush_buf:1; #if (NGX_HAVE_SENDFILE_NODISKIO || NGX_COMPAT) unsigned busy_count:2; diff --git a/src/event/ngx_event_connect.c b/src/event/ngx_event_connect.c --- a/src/event/ngx_event_connect.c +++ b/src/event/ngx_event_connect.c @@ -179,6 +179,8 @@ ngx_event_connect_peer(ngx_peer_connecti c->recv = ngx_udp_recv; c->send = ngx_send; c->send_chain = ngx_udp_send_chain; + + c->need_flush_buf = 1; } c->log_error = pc->log_error; diff --git a/src/event/ngx_event_udp.c b/src/event/ngx_event_udp.c --- a/src/event/ngx_event_udp.c +++ b/src/event/ngx_event_udp.c @@ -246,6 +246,8 @@ ngx_event_recvmsg(ngx_event_t *ev) c->send = ngx_udp_send; c->send_chain = ngx_udp_send_chain; + c->need_flush_buf = 1; + c->log = log; c->pool->log = log; c->listening = ls; diff --git a/src/http/ngx_http_write_filter_module.c b/src/http/ngx_http_write_filter_module.c --- a/src/http/ngx_http_write_filter_module.c +++ b/src/http/ngx_http_write_filter_module.c @@ -227,7 +227,8 @@ ngx_http_write_filter(ngx_http_request_t if (size == 0 && !(c->buffered & NGX_LOWLEVEL_BUFFERED) - && !(last && c->need_last_buf)) + && !(last && c->need_last_buf) + && !(flush && c->need_flush_buf)) { if (last || flush || sync) { for (cl = r->out; cl; /* void */) { diff --git a/src/http/v2/ngx_http_v2_filter_module.c b/src/http/v2/ngx_http_v2_filter_module.c --- a/src/http/v2/ngx_http_v2_filter_module.c +++ b/src/http/v2/ngx_http_v2_filter_module.c @@ -665,6 +665,7 @@ ngx_http_v2_header_filter(ngx_http_reque fc->send_chain = ngx_http_v2_send_chain; fc->need_last_buf = 1; + fc->need_flush_buf = 1; return ngx_http_v2_filter_send(fc, stream); } @@ -1815,7 +1816,11 @@ ngx_http_v2_waiting_queue(ngx_http_v2_co static ngx_inline ngx_int_t ngx_http_v2_filter_send(ngx_connection_t *fc, ngx_http_v2_stream_t *stream) { - if (stream->queued == 0) { + ngx_connection_t *c; + + c = stream->connection->connection; + + if (stream->queued == 0 && !c->buffered) { fc->buffered &= ~NGX_HTTP_V2_BUFFERED; return NGX_OK; } diff --git a/src/stream/ngx_stream_write_filter_module.c b/src/stream/ngx_stream_write_filter_module.c --- a/src/stream/ngx_stream_write_filter_module.c +++ b/src/stream/ngx_stream_write_filter_module.c @@ -235,7 +235,7 @@ ngx_stream_write_filter(ngx_stream_sessi if (size == 0 && !(c->buffered & NGX_LOWLEVEL_BUFFERED) && !(last && c->need_last_buf) - && !(c->type == SOCK_DGRAM && flush)) + && !(flush && c->need_flush_buf)) { if (last || flush || sync) { for (cl = *out; cl; /* void */) { -- Maxim Dounin http://mdounin.ru/ From vl at nginx.com Wed Feb 2 10:55:01 2022 From: vl at nginx.com (Vladimir Homutov) Date: Wed, 2 Feb 2022 13:55:01 +0300 Subject: [QUIC] padding of Initial packets Message-ID: # HG changeset patch # User Vladimir Homutov # Date 1643796973 -10800 # Wed Feb 02 13:16:13 2022 +0300 # Branch quic # Node ID fbfbcf66990e8964bcf308f3869f37d1a1acceeb # Parent 8c6645ecaeb6cbf27976fd9035440bfcab943117 QUIC: fixed padding of initial packets in case of limited path. Previously, non-padded initial packet could be sent as a result of the following situation: - initial queue is not empty (so padding to 1200 is required) - handhsake queue is not empty (so padding is to be added after h/s packet) - path is limited If serializing handshake packet would violate path limit, such packet was omitted, and the non-padded initial packet was sent. The fix is to avoid sending the packet at all in such case. This follows the original intention introduced in c5155a0cb12f. diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c --- a/src/event/quic/ngx_event_quic_output.c +++ b/src/event/quic/ngx_event_quic_output.c @@ -158,7 +158,14 @@ ngx_quic_create_datagrams(ngx_connection ? NGX_QUIC_MIN_INITIAL_SIZE - (p - dst) : 0; if (min > len) { - continue; + /* padding can't be applied - avoid sending the packet */ + + for (i = 0; i < NGX_QUIC_SEND_CTX_LAST; i++) { + ctx = &qc->send_ctx[i]; + ngx_quic_revert_send(c, ctx, preserved_pnum[i]); + } + + return NGX_OK; } n = ngx_quic_output_packet(c, ctx, p, len, min); From pluknet at nginx.com Wed Feb 2 11:06:52 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 2 Feb 2022 14:06:52 +0300 Subject: [QUIC] padding of Initial packets In-Reply-To: References: Message-ID: > On 2 Feb 2022, at 13:55, Vladimir Homutov wrote: > > # HG changeset patch > # User Vladimir Homutov > # Date 1643796973 -10800 > # Wed Feb 02 13:16:13 2022 +0300 > # Branch quic > # Node ID fbfbcf66990e8964bcf308f3869f37d1a1acceeb > # Parent 8c6645ecaeb6cbf27976fd9035440bfcab943117 > QUIC: fixed padding of initial packets in case of limited path. > > Previously, non-padded initial packet could be sent as a result of the > following situation: > > - initial queue is not empty (so padding to 1200 is required) > - handhsake queue is not empty (so padding is to be added after h/s packet) handshake > - path is limited > > If serializing handshake packet would violate path limit, such packet was > omitted, and the non-padded initial packet was sent. > > The fix is to avoid sending the packet at all in such case. This follows the > original intention introduced in c5155a0cb12f. > > diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c > --- a/src/event/quic/ngx_event_quic_output.c > +++ b/src/event/quic/ngx_event_quic_output.c > @@ -158,7 +158,14 @@ ngx_quic_create_datagrams(ngx_connection > ? NGX_QUIC_MIN_INITIAL_SIZE - (p - dst) : 0; > > if (min > len) { > - continue; > + /* padding can't be applied - avoid sending the packet */ > + > + for (i = 0; i < NGX_QUIC_SEND_CTX_LAST; i++) { > + ctx = &qc->send_ctx[i]; > + ngx_quic_revert_send(c, ctx, preserved_pnum[i]); this could be simplified to reduce ctx variable: ngx_quic_revert_send(c, &qc->send_ctx[i], preserved_pnum[i]); but it won't fit into 80 line, so that's good just as well > + } > + > + return NGX_OK; > } > > n = ngx_quic_output_packet(c, ctx, p, len, min); > -- Sergey Kandaurov From pluknet at nginx.com Wed Feb 2 12:05:07 2022 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 02 Feb 2022 15:05:07 +0300 Subject: [PATCH] QUIC: do not arm loss detection timer if nothing was sent Message-ID: <768445d1ba6e2bce9001.1643803507@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1643803485 -10800 # Wed Feb 02 15:04:45 2022 +0300 # Branch quic # Node ID 768445d1ba6e2bce9001704c52b516ad421ae776 # Parent cd8018bc81a52ca7de2eb4e779dfd574c8a661a2 QUIC: do not arm loss detection timer if nothing was sent. Notably, this became quite practicable after the recent fix in cd8018bc81a5. diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c --- a/src/event/quic/ngx_event_quic_output.c +++ b/src/event/quic/ngx_event_quic_output.c @@ -109,7 +109,9 @@ ngx_quic_output(ngx_connection_t *c) ngx_add_timer(c->read, qc->tp.max_idle_timeout); } - ngx_quic_set_lost_timer(c); + if (in_flight != cg->in_flight) { + ngx_quic_set_lost_timer(c); + } return NGX_OK; } From vl at nginx.com Wed Feb 2 12:21:10 2022 From: vl at nginx.com (Vladimir Homutov) Date: Wed, 2 Feb 2022 15:21:10 +0300 Subject: [PATCH] QUIC: do not arm loss detection timer if nothing was sent In-Reply-To: <768445d1ba6e2bce9001.1643803507@enoparse.local> References: <768445d1ba6e2bce9001.1643803507@enoparse.local> Message-ID: On Wed, Feb 02, 2022 at 03:05:07PM +0300, Sergey Kandaurov wrote: > # HG changeset patch > # User Sergey Kandaurov > # Date 1643803485 -10800 > # Wed Feb 02 15:04:45 2022 +0300 > # Branch quic > # Node ID 768445d1ba6e2bce9001704c52b516ad421ae776 > # Parent cd8018bc81a52ca7de2eb4e779dfd574c8a661a2 > QUIC: do not arm loss detection timer if nothing was sent. > > Notably, this became quite practicable after the recent fix in cd8018bc81a5. > > diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c > --- a/src/event/quic/ngx_event_quic_output.c > +++ b/src/event/quic/ngx_event_quic_output.c > @@ -109,7 +109,9 @@ ngx_quic_output(ngx_connection_t *c) > ngx_add_timer(c->read, qc->tp.max_idle_timeout); > } > > - ngx_quic_set_lost_timer(c); > + if (in_flight != cg->in_flight) { > + ngx_quic_set_lost_timer(c); > + } > > return NGX_OK; > } > Instead of adding one more check, I would invert condition and test if we need to set any timers first, and then arm whatever needed; This would simplify conditions and make logic simpler; i.e. something like: diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c --- a/src/event/quic/ngx_event_quic_output.c +++ b/src/event/quic/ngx_event_quic_output.c @@ -104,7 +104,12 @@ ngx_quic_output(ngx_connection_t *c) return NGX_ERROR; } - if (in_flight != cg->in_flight && !qc->send_timer_set && !qc->closing) { + if (in_flight == cg->in_flight || qc->closing) { + /* no ack-eliciting data was sent or we are done */ + return NGX_OK; + } + + if (!qc->send_timer_set) { qc->send_timer_set = 1; ngx_add_timer(c->read, qc->tp.max_idle_timeout); } From pluknet at nginx.com Wed Feb 2 16:36:15 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 2 Feb 2022 19:36:15 +0300 Subject: [PATCH] HTTP/2: made it possible to flush response headers (ticket #1743) In-Reply-To: References: Message-ID: <271077E9-49C8-42DF-B675-8150BD2424C0@nginx.com> > On 2 Feb 2022, at 07:02, Maxim Dounin wrote: > > Hello! > > [..] > As discussed privately, it might be good idea to keep the code in > line with UDP handling in the stream module. Here is an updated > patch which introduces the c->need_flush_buf flag, and uses > this flag instead of the c->type checking in the stream module. > > # HG changeset patch > # User Maxim Dounin > # Date 1643773980 -10800 > # Wed Feb 02 06:53:00 2022 +0300 > # Node ID 3a695d5d222262cd57c76124d7d0b3e3863a86c6 > # Parent dd718d1cef3c2ca2754a21176be3318b00be9e62 > HTTP/2: made it possible to flush response headers (ticket #1743). > > Response headers can be buffered in the SSL buffer. But stream's fake > connection buffered flag did not reflect this, so any attempts to flush > the buffer without sending additional data were stopped by the write filter. > > It does not seem to be possible to reflect this in fc->buffered though, as > we never known if main connection's c->buffered corresponds to the particular > stream or not. As such, fc->buffered might prevent request finalization > due to sending data on some other stream. > > Fix is to implement handling of flush buffers when the c->need_flush_buf > flag is set, similarly to the existing last buffer handling. The same > flag is now used for UDP sockets in the stream module instead of explicit > checking of c->type. > > diff --git a/src/core/ngx_connection.h b/src/core/ngx_connection.h > --- a/src/core/ngx_connection.h > +++ b/src/core/ngx_connection.h > @@ -184,6 +184,7 @@ struct ngx_connection_s { > unsigned tcp_nopush:2; /* ngx_connection_tcp_nopush_e */ > > unsigned need_last_buf:1; > + unsigned need_flush_buf:1; > > #if (NGX_HAVE_SENDFILE_NODISKIO || NGX_COMPAT) > unsigned busy_count:2; > diff --git a/src/event/ngx_event_connect.c b/src/event/ngx_event_connect.c > --- a/src/event/ngx_event_connect.c > +++ b/src/event/ngx_event_connect.c > @@ -179,6 +179,8 @@ ngx_event_connect_peer(ngx_peer_connecti > c->recv = ngx_udp_recv; > c->send = ngx_send; > c->send_chain = ngx_udp_send_chain; > + > + c->need_flush_buf = 1; > } > > c->log_error = pc->log_error; > diff --git a/src/event/ngx_event_udp.c b/src/event/ngx_event_udp.c > --- a/src/event/ngx_event_udp.c > +++ b/src/event/ngx_event_udp.c > @@ -246,6 +246,8 @@ ngx_event_recvmsg(ngx_event_t *ev) > c->send = ngx_udp_send; > c->send_chain = ngx_udp_send_chain; > > + c->need_flush_buf = 1; > + > c->log = log; > c->pool->log = log; > c->listening = ls; > diff --git a/src/http/ngx_http_write_filter_module.c b/src/http/ngx_http_write_filter_module.c > --- a/src/http/ngx_http_write_filter_module.c > +++ b/src/http/ngx_http_write_filter_module.c > @@ -227,7 +227,8 @@ ngx_http_write_filter(ngx_http_request_t > > if (size == 0 > && !(c->buffered & NGX_LOWLEVEL_BUFFERED) > - && !(last && c->need_last_buf)) > + && !(last && c->need_last_buf) > + && !(flush && c->need_flush_buf)) > { > if (last || flush || sync) { > for (cl = r->out; cl; /* void */) { > diff --git a/src/http/v2/ngx_http_v2_filter_module.c b/src/http/v2/ngx_http_v2_filter_module.c > --- a/src/http/v2/ngx_http_v2_filter_module.c > +++ b/src/http/v2/ngx_http_v2_filter_module.c > @@ -665,6 +665,7 @@ ngx_http_v2_header_filter(ngx_http_reque > > fc->send_chain = ngx_http_v2_send_chain; > fc->need_last_buf = 1; > + fc->need_flush_buf = 1; > > return ngx_http_v2_filter_send(fc, stream); > } > @@ -1815,7 +1816,11 @@ ngx_http_v2_waiting_queue(ngx_http_v2_co > static ngx_inline ngx_int_t > ngx_http_v2_filter_send(ngx_connection_t *fc, ngx_http_v2_stream_t *stream) > { > - if (stream->queued == 0) { > + ngx_connection_t *c; > + > + c = stream->connection->connection; > + > + if (stream->queued == 0 && !c->buffered) { > fc->buffered &= ~NGX_HTTP_V2_BUFFERED; > return NGX_OK; > } > diff --git a/src/stream/ngx_stream_write_filter_module.c b/src/stream/ngx_stream_write_filter_module.c > --- a/src/stream/ngx_stream_write_filter_module.c > +++ b/src/stream/ngx_stream_write_filter_module.c > @@ -235,7 +235,7 @@ ngx_stream_write_filter(ngx_stream_sessi > if (size == 0 > && !(c->buffered & NGX_LOWLEVEL_BUFFERED) > && !(last && c->need_last_buf) > - && !(c->type == SOCK_DGRAM && flush)) > + && !(flush && c->need_flush_buf)) > { > if (last || flush || sync) { > for (cl = *out; cl; /* void */) { > Thanks for the update, it looks good. -- Sergey Kandaurov From mdounin at mdounin.ru Wed Feb 2 22:45:28 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 02 Feb 2022 22:45:28 +0000 Subject: [nginx] HTTP/2: made it possible to flush response headers (ticket #1743). Message-ID: details: https://hg.nginx.org/nginx/rev/32b0ba4855a6 branches: changeset: 8006:32b0ba4855a6 user: Maxim Dounin date: Thu Feb 03 01:44:38 2022 +0300 description: HTTP/2: made it possible to flush response headers (ticket #1743). Response headers can be buffered in the SSL buffer. But stream's fake connection buffered flag did not reflect this, so any attempts to flush the buffer without sending additional data were stopped by the write filter. It does not seem to be possible to reflect this in fc->buffered though, as we never known if main connection's c->buffered corresponds to the particular stream or not. As such, fc->buffered might prevent request finalization due to sending data on some other stream. Fix is to implement handling of flush buffers when the c->need_flush_buf flag is set, similarly to the existing last buffer handling. The same flag is now used for UDP sockets in the stream module instead of explicit checking of c->type. diffstat: src/core/ngx_connection.h | 1 + src/event/ngx_event_connect.c | 2 ++ src/event/ngx_event_udp.c | 2 ++ src/http/ngx_http_write_filter_module.c | 3 ++- src/http/v2/ngx_http_v2_filter_module.c | 7 ++++++- src/stream/ngx_stream_write_filter_module.c | 2 +- 6 files changed, 14 insertions(+), 3 deletions(-) diffs (84 lines): diff -r dd718d1cef3c -r 32b0ba4855a6 src/core/ngx_connection.h --- a/src/core/ngx_connection.h Tue Feb 01 16:29:28 2022 +0300 +++ b/src/core/ngx_connection.h Thu Feb 03 01:44:38 2022 +0300 @@ -184,6 +184,7 @@ struct ngx_connection_s { unsigned tcp_nopush:2; /* ngx_connection_tcp_nopush_e */ unsigned need_last_buf:1; + unsigned need_flush_buf:1; #if (NGX_HAVE_SENDFILE_NODISKIO || NGX_COMPAT) unsigned busy_count:2; diff -r dd718d1cef3c -r 32b0ba4855a6 src/event/ngx_event_connect.c --- a/src/event/ngx_event_connect.c Tue Feb 01 16:29:28 2022 +0300 +++ b/src/event/ngx_event_connect.c Thu Feb 03 01:44:38 2022 +0300 @@ -179,6 +179,8 @@ ngx_event_connect_peer(ngx_peer_connecti c->recv = ngx_udp_recv; c->send = ngx_send; c->send_chain = ngx_udp_send_chain; + + c->need_flush_buf = 1; } c->log_error = pc->log_error; diff -r dd718d1cef3c -r 32b0ba4855a6 src/event/ngx_event_udp.c --- a/src/event/ngx_event_udp.c Tue Feb 01 16:29:28 2022 +0300 +++ b/src/event/ngx_event_udp.c Thu Feb 03 01:44:38 2022 +0300 @@ -246,6 +246,8 @@ ngx_event_recvmsg(ngx_event_t *ev) c->send = ngx_udp_send; c->send_chain = ngx_udp_send_chain; + c->need_flush_buf = 1; + c->log = log; c->pool->log = log; c->listening = ls; diff -r dd718d1cef3c -r 32b0ba4855a6 src/http/ngx_http_write_filter_module.c --- a/src/http/ngx_http_write_filter_module.c Tue Feb 01 16:29:28 2022 +0300 +++ b/src/http/ngx_http_write_filter_module.c Thu Feb 03 01:44:38 2022 +0300 @@ -227,7 +227,8 @@ ngx_http_write_filter(ngx_http_request_t if (size == 0 && !(c->buffered & NGX_LOWLEVEL_BUFFERED) - && !(last && c->need_last_buf)) + && !(last && c->need_last_buf) + && !(flush && c->need_flush_buf)) { if (last || flush || sync) { for (cl = r->out; cl; /* void */) { diff -r dd718d1cef3c -r 32b0ba4855a6 src/http/v2/ngx_http_v2_filter_module.c --- a/src/http/v2/ngx_http_v2_filter_module.c Tue Feb 01 16:29:28 2022 +0300 +++ b/src/http/v2/ngx_http_v2_filter_module.c Thu Feb 03 01:44:38 2022 +0300 @@ -665,6 +665,7 @@ ngx_http_v2_header_filter(ngx_http_reque fc->send_chain = ngx_http_v2_send_chain; fc->need_last_buf = 1; + fc->need_flush_buf = 1; return ngx_http_v2_filter_send(fc, stream); } @@ -1815,7 +1816,11 @@ ngx_http_v2_waiting_queue(ngx_http_v2_co static ngx_inline ngx_int_t ngx_http_v2_filter_send(ngx_connection_t *fc, ngx_http_v2_stream_t *stream) { - if (stream->queued == 0) { + ngx_connection_t *c; + + c = stream->connection->connection; + + if (stream->queued == 0 && !c->buffered) { fc->buffered &= ~NGX_HTTP_V2_BUFFERED; return NGX_OK; } diff -r dd718d1cef3c -r 32b0ba4855a6 src/stream/ngx_stream_write_filter_module.c --- a/src/stream/ngx_stream_write_filter_module.c Tue Feb 01 16:29:28 2022 +0300 +++ b/src/stream/ngx_stream_write_filter_module.c Thu Feb 03 01:44:38 2022 +0300 @@ -235,7 +235,7 @@ ngx_stream_write_filter(ngx_stream_sessi if (size == 0 && !(c->buffered & NGX_LOWLEVEL_BUFFERED) && !(last && c->need_last_buf) - && !(c->type == SOCK_DGRAM && flush)) + && !(flush && c->need_flush_buf)) { if (last || flush || sync) { for (cl = *out; cl; /* void */) { From pluknet at nginx.com Thu Feb 3 17:49:58 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 3 Feb 2022 20:49:58 +0300 Subject: [PATCH] HTTP/2: fixed closed_nodes overflow (ticket #1708) In-Reply-To: <0b272d5fd6bd0dcca6dc.1643772413@vm-bsd.mdounin.ru> References: <0b272d5fd6bd0dcca6dc.1643772413@vm-bsd.mdounin.ru> Message-ID: <75F09159-72C4-4720-A60D-ABF2AEDD95C1@nginx.com> > On 2 Feb 2022, at 06:26, Maxim Dounin wrote: > > # HG changeset patch > # User Maxim Dounin > # Date 1643772371 -10800 > # Wed Feb 02 06:26:11 2022 +0300 > # Node ID 0b272d5fd6bd0dcca6dcca8f27f8fcde33119159 > # Parent dd718d1cef3c2ca2754a21176be3318b00be9e62 > HTTP/2: fixed closed_nodes overflow (ticket #1708). > > With large http2_max_concurrent_streams or http2_max_concurrent_pushes, more > than 255 ngx_http_v2_node_t structures might be allocated, eventually leading > to h2c->closed_nodes overflow when closing corresponding streams. This will > in turn result in additional allocations in ngx_http_v2_get_node_by_id(). > > While mostly harmless, it can result in excessive memory usage by a HTTP/2 > connection, notably in configurations with many keepalive_requests allowed. > Fix is to use ngx_uint_t for h2c->closed_nodes instead of unsigned:8. > > diff --git a/src/http/v2/ngx_http_v2.h b/src/http/v2/ngx_http_v2.h > --- a/src/http/v2/ngx_http_v2.h > +++ b/src/http/v2/ngx_http_v2.h > @@ -153,12 +153,12 @@ struct ngx_http_v2_connection_s { > ngx_queue_t dependencies; > ngx_queue_t closed; > > + ngx_uint_t closed_nodes; > ngx_uint_t last_sid; > ngx_uint_t last_push; > > time_t lingering_time; > > - unsigned closed_nodes:8; > unsigned settings_ack:1; > unsigned table_update:1; > unsigned blocked:1; > Looks good. A minor point is that now h2c will typically increase by 8 bytes. -- Sergey Kandaurov From mdounin at mdounin.ru Thu Feb 3 19:48:36 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 03 Feb 2022 19:48:36 +0000 Subject: [nginx] HTTP/2: fixed closed_nodes overflow (ticket #1708). Message-ID: details: https://hg.nginx.org/nginx/rev/011f5ebdb928 branches: changeset: 8007:011f5ebdb928 user: Maxim Dounin date: Thu Feb 03 22:46:01 2022 +0300 description: HTTP/2: fixed closed_nodes overflow (ticket #1708). With large http2_max_concurrent_streams or http2_max_concurrent_pushes, more than 255 ngx_http_v2_node_t structures might be allocated, eventually leading to h2c->closed_nodes overflow when closing corresponding streams. This will in turn result in additional allocations in ngx_http_v2_get_node_by_id(). While mostly harmless, it can result in excessive memory usage by a HTTP/2 connection, notably in configurations with many keepalive_requests allowed. Fix is to use ngx_uint_t for h2c->closed_nodes instead of unsigned:8. diffstat: src/http/v2/ngx_http_v2.h | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (17 lines): diff -r 32b0ba4855a6 -r 011f5ebdb928 src/http/v2/ngx_http_v2.h --- a/src/http/v2/ngx_http_v2.h Thu Feb 03 01:44:38 2022 +0300 +++ b/src/http/v2/ngx_http_v2.h Thu Feb 03 22:46:01 2022 +0300 @@ -153,12 +153,12 @@ struct ngx_http_v2_connection_s { ngx_queue_t dependencies; ngx_queue_t closed; + ngx_uint_t closed_nodes; ngx_uint_t last_sid; ngx_uint_t last_push; time_t lingering_time; - unsigned closed_nodes:8; unsigned settings_ack:1; unsigned table_update:1; unsigned blocked:1; From pluknet at nginx.com Fri Feb 4 10:40:34 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 04 Feb 2022 10:40:34 +0000 Subject: [nginx] Year 2022. Message-ID: details: https://hg.nginx.org/nginx/rev/1add55d23652 branches: changeset: 8008:1add55d23652 user: Sergey Kandaurov date: Fri Feb 04 13:29:31 2022 +0300 description: Year 2022. diffstat: docs/text/LICENSE | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (11 lines): diff -r 011f5ebdb928 -r 1add55d23652 docs/text/LICENSE --- a/docs/text/LICENSE Thu Feb 03 22:46:01 2022 +0300 +++ b/docs/text/LICENSE Fri Feb 04 13:29:31 2022 +0300 @@ -1,6 +1,6 @@ /* * Copyright (C) 2002-2021 Igor Sysoev - * Copyright (C) 2011-2021 Nginx, Inc. + * Copyright (C) 2011-2022 Nginx, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without From vl at nginx.com Fri Feb 4 13:56:23 2022 From: vl at nginx.com (Vladimir Homutov) Date: Fri, 4 Feb 2022 16:56:23 +0300 Subject: [PATCH] QUIC: stream lingering In-Reply-To: References: Message-ID: On Tue, Feb 01, 2022 at 04:39:59PM +0300, Roman Arutyunyan wrote: > # HG changeset patch > # User Roman Arutyunyan > # Date 1643722727 -10800 > # Tue Feb 01 16:38:47 2022 +0300 > # Branch quic > # Node ID db31ae16c1f2050be9c9f6b1f117ab6725b97dd4 > # Parent 308ac307b3e6952ef0c5ccf10cc82904c59fa4c3 > QUIC: stream lingering. > > Now ngx_quic_stream_t is decoupled from ngx_connection_t in a way that it > can persist after connection is closed by application. During this period, > server is expecting stream final size from client for correct flow control. > Also, buffered output is sent to client as more flow control credit is granted. > [..] > +static ngx_int_t > +ngx_quic_stream_flush(ngx_quic_stream_t *qs) > +{ > + size_t limit, len; > + ngx_uint_t last; > + ngx_chain_t *out, *cl; > + ngx_quic_frame_t *frame; > + ngx_connection_t *pc; > + ngx_quic_connection_t *qc; > + > + if (qs->send_state != NGX_QUIC_STREAM_SEND_SEND) { > + return NGX_OK; > + } > + > + pc = qs->parent; > + qc = ngx_quic_get_connection(pc); > + > + limit = ngx_quic_max_stream_flow(qs); > + last = 0; > + > + out = ngx_quic_read_chain(pc, &qs->out, limit); > + if (out == NGX_CHAIN_ERROR) { > + return NGX_ERROR; > + } > + > + len = 0; > + last = 0; this assignment looks duplicate. [..] > +static ngx_int_t > +ngx_quic_close_stream(ngx_quic_stream_t *qs) > +{ > ngx_connection_t *pc; > ngx_quic_frame_t *frame; > - ngx_quic_stream_t *qs; > ngx_quic_connection_t *qc; > > - qs = c->quic; > pc = qs->parent; > qc = ngx_quic_get_connection(pc); > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > - "quic stream id:0x%xL cleanup", qs->id); > + if (!qc->closing) { > + if (qs->recv_state == NGX_QUIC_STREAM_RECV_RECV > + || qs->send_state == NGX_QUIC_STREAM_SEND_READY > + || qs->send_state == NGX_QUIC_STREAM_SEND_SEND) > + { so basically this are the states where we need to wait for FIN? and thus avoid closing till we get it. I would add a comment here. [..] > + if (qs->connection == NULL) { > + return ngx_quic_close_stream(qs); > + } > + > ngx_quic_set_event(qs->connection->write); this pattern - check connection, close if NULL and set event seem to repeat. Maybe it's worth to try to put this check/action into ngx_quic_set_event somehow ? we could instead have set_read_event/set_write_event maybe. > +static ngx_int_t > +ngx_quic_stream_flush(ngx_quic_stream_t *qs) > + [..] > + if (len == 0 && !last) { > + return NGX_OK; > + } > + > + frame = ngx_quic_alloc_frame(pc); > + if (frame == NULL) { > + return NGX_ERROR; > + } > + > + frame = ngx_quic_alloc_frame(pc); > + if (frame == NULL) { > + return NGX_ERROR; > + } one more dup here. Overal, it looks good, but the testing revealed another issue: with big buffer sizes we run into issue of too long chains in ngx_quic_write_chain(). As discussed, this certainly needs optimization - probably adding some pointer to the end to facilitate appending, or something else. From jiri.setnicka at cdn77.com Mon Feb 7 11:16:00 2022 From: jiri.setnicka at cdn77.com (=?UTF-8?B?SmnFmcOtIFNldG5pxI1rYQ==?=) Date: Mon, 7 Feb 2022 12:16:00 +0100 Subject: [PATCH 00 of 15] Serve all requests from single tempfile In-Reply-To: References: Message-ID: Hello! > We developed the proxy_cache_tempfile mechanism, which acts similarly to > the proxy_cache_lock, but instead of locking other requests waiting for > the file completion, we open the tempfile used by the primary request > and periodically serve parts of it to the waiting requests. > > [...] > > We tested this feature thoroughly for the last few months and we use > it already in part of our infrastructure without noticing any negative > impact, We noticed only a very small increase in memory usage and a > minimal increase in CPU and disk io usage (which corresponds with the > increased throughput of the server). > > We also did some synthetic benchmarks where we compared vanilla nginx > and our patched version with and without cache lock and with cache > tempfiles. Results of the benchmarks, charts, and scripts we used for it > are available on my Github: > > https://github.com/setnicka/nginx-tempfiles-benchmark Highlighting this. Have you had already time to look at the proposed changes? What do you think about them? I am mostly interested in if there isn't some obvious fundamental misconception which I forget. As I wrote before we already use nginx with this patch in small part of our infrastructure without noticing any negative impact. But our usecase is quite specific and maybe there could be some hidden flaw in general use. Jiří Setnička CDN77 From jiri.setnicka at cdn77.com Mon Feb 7 11:18:52 2022 From: jiri.setnicka at cdn77.com (=?UTF-8?B?SmnFmcOtIFNldG5pxI1rYQ==?=) Date: Mon, 7 Feb 2022 12:18:52 +0100 Subject: [PATCH 13 of 15] Tempfiles: Skip cached file if there is already newer tempfile In-Reply-To: References: <5e64af4c94860cd5cf4b.1643387525@pathfinder> Message-ID: Hello! > Thanks for sharing patches. It's interesting for me and I'm going to > test it soon. Did you already had some time to test it? I am interested in you thoughts and results :) > For this particular patch I would suggest to reduce the scope of mutex > locking > and remove it when "serve_tempfile" is not configured. See my version > below: > diff --git a/src/http/ngx_http_file_cache.c > b/src/http/ngx_http_file_cache.c > index db379450..97982aed 100644 > --- a/src/http/ngx_http_file_cache.c > +++ b/src/http/ngx_http_file_cache.c > @@ -460,6 +460,22 @@ngx_http_file_cache_open(ngx_http_request_t *r) >         goto done; >     } > > +    if (c->serve_tempfile) { > +        ngx_shmtx_lock(&cache->shpool->mutex); > + > +        if (c->node->updating) { > +        /* Do not try old cached file, jump directly to cache_lock > and use tempfile */ > +            test = 0; > +        } > + > +        ngx_shmtx_unlock(&cache->shpool->mutex); > + > +        if (!test) { > +            rv = NGX_DECLINED; > +            goto done; > +        } > +    } > + >     rc = ngx_http_file_cache_open_file(r, &c->file.name > ); >     if (rc != NGX_DECLINED) { >         return rc; Yes, this is of course better, thank you. Jiří Setnička -------------- next part -------------- An HTML attachment was scrubbed... URL: From arut at nginx.com Mon Feb 7 11:31:43 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 7 Feb 2022 14:31:43 +0300 Subject: [PATCH 00 of 15] Serve all requests from single tempfile In-Reply-To: References: Message-ID: <20220207113143.jzskvfnm7uvamn2u@Romans-MacBook-Pro.local> Hello, On Fri, Jan 28, 2022 at 05:31:52PM +0100, Jiří Setnička via nginx-devel wrote: > Hello! > > Over the last few months, we (a small team of developers including me > and Jan Prachař, both from CDN77) developed a missing feature for the > proxy caching in Nginx. We are happy to share this feature with the > community in the following patch series. > > We serve a large number of files to an immense number of clients and > often multiple clients want the same file at the very same time - > especially when it came to streaming (when a file is crafted on the > upstream in real-time and getting it could take seconds). > > Previously there were two options in Nginx when using proxy caching: > > * pass all incoming requests to the origin > * use proxy_cache_lock feature, pass only the first request (served in > real-time) and let other requests wait until the first request > completion > > We didn't like any of these options (the first one effectively disables > CDN and the second one is unusable for streaming). We considered using > Varnish, which solves this problem better, but we are very happy with > the Nginx infrastructure we have. Thus we came with the third option. > > We developed the proxy_cache_tempfile mechanism, which acts similarly to > the proxy_cache_lock, but instead of locking other requests waiting for > the file completion, we open the tempfile used by the primary request > and periodically serve parts of it to the waiting requests. > > Because there may be multiple tempfiles for the same file (for example > when the file expires before it is fully downloaded), we use shared > memory per cache with `ngx_http_file_cache_tf_node_t` for each created > tempfiles to synchronize all workers. When a new request is passed to > the origin, we record its tempfile number and when another request is > received, we try to open tempfile with this number and serve from it. > When tempfile is already used for some secondary request, it sticks with > this same tempfile until its completion. > > To accomplish this we rely on the POSIX filesystem feature, when you can > open file and retain its file descriptor even when it is moved to a new > location (on the same filesystem). I'm afraid that this would be hard to > accomplish on Windows and this feature will be non-Windows only. > > We tested this feature thoroughly for the last few months and we use > it already in part of our infrastructure without noticing any negative > impact, We noticed only a very small increase in memory usage and a > minimal increase in CPU and disk io usage (which corresponds with the > increased throughput of the server). > > We also did some synthetic benchmarks where we compared vanilla nginx > and our patched version with and without cache lock and with cache > tempfiles. Results of the benchmarks, charts, and scripts we used for it > are available on my Github: > > https://github.com/setnicka/nginx-tempfiles-benchmark > > It should work also for fastcgi, uwsgi, and scgi caches (as it uses > internally the same mechanism), but we didn't do testing of these. > > New config: > > * proxy_cache_tempfile on; -- activate the whole tempfile logic > * proxy_cache_tempfile_timeout 5s; -- how long to wait for tempfile before 504 > * proxy_cache_tempfile_loop 50ms; -- loop time for check tempfiles > (ans same for fastcgi_cache, uwsgi_cache and scgi_cache) > > New option for proxy_cache_path: tf_zone=name:size (defaults to key zone > name with _tf suffix and 10M size). It creates a shared memory zone used > to store tempfiles nodes. > > We would be very grateful for any reviews and other testing. > > Jiří Setnička > CDN77 > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org Thanks for sharing your work. Indeed, nginx currently lacks a good solution for serving a file that's being downloaded from upstream. We tried to address this issue a few years ago. Our solution was similar to yours, but instead of sharing the temp file between workers, we moved the temp file to its destination right after writing the header. A new bit was added to the header signalling that this file is being updated. The biggest issue with this kind of solutions is how we wait for updates in a file. We believe that polling a file with a given time interval is not a perfect approach, even though nginx does that for cache locks. Some systems provide ways to avoid this. For example, BSD systems have kqueue which allows to wait for file updates. On Linux inotify can do similar things, but the number of watches is limited. Another approach would be to create an inter-worker messaging system for signalling file updates. It's good to know the solution works for you. Please keep us posted about future improvements especially the ones which would avoid polling and decrease complexity. -- Roman Arutyunyan From jiri.setnicka at cdn77.com Mon Feb 7 12:27:15 2022 From: jiri.setnicka at cdn77.com (=?UTF-8?B?SmnFmcOtIFNldG5pxI1rYQ==?=) Date: Mon, 7 Feb 2022 13:27:15 +0100 Subject: [PATCH 00 of 15] Serve all requests from single tempfile In-Reply-To: <20220207113143.jzskvfnm7uvamn2u@Romans-MacBook-Pro.local> References: <20220207113143.jzskvfnm7uvamn2u@Romans-MacBook-Pro.local> Message-ID: <8c1c3901-eb36-454a-db28-d0b058d7ac7e@cdn77.com> Hello, > Thanks for sharing your work. Indeed, nginx currently lacks a good solution > for serving a file that's being downloaded from upstream. We tried to address > this issue a few years ago. Our solution was similar to yours, but instead > of sharing the temp file between workers, we moved the temp file to its > destination right after writing the header. A new bit was added to the header > signalling that this file is being updated. > > The biggest issue with this kind of solutions is how we wait for updates in > a file. We believe that polling a file with a given time interval is not a > perfect approach, even though nginx does that for cache locks. polling is done only on the ngx_http_file_cache_tf_node_t struct in the shared memory (see patch 09 of 15, where c->length is updated from c->tf_node->length and then this length is compared with c->body_sent_bytes), not on the file itself. Length in the tf_node is updated with each write from the primary request (see patch 05 of 15). It is better than polling individual files, but I agree it is still polling, which isn't great. > [...] > Another approach would be to create an > inter-worker messaging system for signalling file updates. We were thinking about creating something like that but we buried this idea because it seems quite complex to do it right and reliable. And polling the tf_node in the shared memory (with very low proxy_cache_tempfile_loop) works sufficiently good. > It's good to know the solution works for you. Please keep us posted about > future improvements especially the ones which would avoid polling and decrease > complexity. We would be happy to get this patch to the mainline nginx in the future, so that all nginx users could benefit from it. We will be thinking about avoiding polling and implementing some inter-worker messaging, but it may be some time, because it seems quite complex. Could you share some hints about how do you thing it would be best to implement it in the worker's event loop? Jiří Setnička CDN77 From arut at nginx.com Mon Feb 7 14:16:17 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 7 Feb 2022 17:16:17 +0300 Subject: [PATCH] QUIC: stream lingering In-Reply-To: References: Message-ID: <20220207141617.av7ft5sycecoce6r@Romans-MacBook-Pro.local> Hi, On Fri, Feb 04, 2022 at 04:56:23PM +0300, Vladimir Homutov wrote: > On Tue, Feb 01, 2022 at 04:39:59PM +0300, Roman Arutyunyan wrote: > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1643722727 -10800 > > # Tue Feb 01 16:38:47 2022 +0300 > > # Branch quic > > # Node ID db31ae16c1f2050be9c9f6b1f117ab6725b97dd4 > > # Parent 308ac307b3e6952ef0c5ccf10cc82904c59fa4c3 > > QUIC: stream lingering. > > > > Now ngx_quic_stream_t is decoupled from ngx_connection_t in a way that it > > can persist after connection is closed by application. During this period, > > server is expecting stream final size from client for correct flow control. > > Also, buffered output is sent to client as more flow control credit is granted. > > > [..] > > > +static ngx_int_t > > +ngx_quic_stream_flush(ngx_quic_stream_t *qs) > > +{ > > + size_t limit, len; > > + ngx_uint_t last; > > + ngx_chain_t *out, *cl; > > + ngx_quic_frame_t *frame; > > + ngx_connection_t *pc; > > + ngx_quic_connection_t *qc; > > + > > + if (qs->send_state != NGX_QUIC_STREAM_SEND_SEND) { > > + return NGX_OK; > > + } > > + > > + pc = qs->parent; > > + qc = ngx_quic_get_connection(pc); > > + > > + limit = ngx_quic_max_stream_flow(qs); > > + last = 0; > > + > > + out = ngx_quic_read_chain(pc, &qs->out, limit); > > + if (out == NGX_CHAIN_ERROR) { > > + return NGX_ERROR; > > + } > > + > > + len = 0; > > + last = 0; > > this assignment looks duplicate. Thanks, fixed. > [..] > > > +static ngx_int_t > > +ngx_quic_close_stream(ngx_quic_stream_t *qs) > > +{ > > ngx_connection_t *pc; > > ngx_quic_frame_t *frame; > > - ngx_quic_stream_t *qs; > > ngx_quic_connection_t *qc; > > > > - qs = c->quic; > > pc = qs->parent; > > qc = ngx_quic_get_connection(pc); > > > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > - "quic stream id:0x%xL cleanup", qs->id); > > + if (!qc->closing) { > > + if (qs->recv_state == NGX_QUIC_STREAM_RECV_RECV > > + || qs->send_state == NGX_QUIC_STREAM_SEND_READY > > + || qs->send_state == NGX_QUIC_STREAM_SEND_SEND) > > + { > > so basically this are the states where we need to wait for FIN? > and thus avoid closing till we get it. > I would add a comment here. On the receiving end we wait either for fin or for reset to have final size. On the sending end we wait for everything that's buffered to be sent. Added a comment about that. > [..] > > + if (qs->connection == NULL) { > > + return ngx_quic_close_stream(qs); > > + } > > + > > ngx_quic_set_event(qs->connection->write); > > this pattern - check connection, close if NULL and set event seem to > repeat. Maybe it's worth to try to put this check/action into > ngx_quic_set_event somehow ? we could instead have > set_read_event/set_write_event maybe. I thought about this too, but it's not always that simple. And even if it was, the new function/macro would have unclear semantics. Let's just remember this as a possible future optimiation. > > +static ngx_int_t > > +ngx_quic_stream_flush(ngx_quic_stream_t *qs) > > + > [..] > > + if (len == 0 && !last) { > > + return NGX_OK; > > + } > > + > > + frame = ngx_quic_alloc_frame(pc); > > + if (frame == NULL) { > > + return NGX_ERROR; > > + } > > + > > + frame = ngx_quic_alloc_frame(pc); > > + if (frame == NULL) { > > + return NGX_ERROR; > > + } > > one more dup here. Yes, thanks. > Overal, it looks good, but the testing revealed another issue: with big > buffer sizes we run into issue of too long chains in ngx_quic_write_chain(). > As discussed, this certainly needs optimization - probably adding some > pointer to the end to facilitate appending, or something else. It's true ngx_quic_write_chain() needs to be optimized. When the buffered chain is big, it takes too much time to find the write point. I'll address this is a separate patch. Meanwhile, attached is an updated version of the current one. In the new version of the patch I also eliminated the ngx_quic_max_stream_flow() function and embedded its content in ngx_quic_stream_flush(). -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1644054894 -10800 # Sat Feb 05 12:54:54 2022 +0300 # Branch quic # Node ID 6e1674c257709341a7508ae4bdab6f7f7d2e9284 # Parent 6c1dfd072859022f830aeea49db7cbe3c9f7fb55 QUIC: stream lingering. Now ngx_quic_stream_t is decoupled from ngx_connection_t in a way that it can persist after connection is closed by application. During this period, server is expecting stream final size from client for correct flow control. Also, buffered output is sent to client as more flow control credit is granted. diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -303,6 +303,7 @@ ngx_quic_new_connection(ngx_connection_t ctp->active_connection_id_limit = 2; ngx_queue_init(&qc->streams.uninitialized); + ngx_queue_init(&qc->streams.free); qc->streams.recv_max_data = qc->tp.initial_max_data; qc->streams.recv_window = qc->streams.recv_max_data; diff --git a/src/event/quic/ngx_event_quic.h b/src/event/quic/ngx_event_quic.h --- a/src/event/quic/ngx_event_quic.h +++ b/src/event/quic/ngx_event_quic.h @@ -78,12 +78,14 @@ struct ngx_quic_stream_s { uint64_t id; uint64_t acked; uint64_t send_max_data; + uint64_t send_offset; + uint64_t send_final_size; uint64_t recv_max_data; uint64_t recv_offset; uint64_t recv_window; uint64_t recv_last; uint64_t recv_size; - uint64_t final_size; + uint64_t recv_final_size; ngx_chain_t *in; ngx_chain_t *out; ngx_uint_t cancelable; /* unsigned cancelable:1; */ diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h --- a/src/event/quic/ngx_event_quic_connection.h +++ b/src/event/quic/ngx_event_quic_connection.h @@ -114,13 +114,16 @@ struct ngx_quic_socket_s { typedef struct { ngx_rbtree_t tree; ngx_rbtree_node_t sentinel; + ngx_queue_t uninitialized; + ngx_queue_t free; uint64_t sent; uint64_t recv_offset; uint64_t recv_window; uint64_t recv_last; uint64_t recv_max_data; + uint64_t send_offset; uint64_t send_max_data; uint64_t server_max_streams_uni; diff --git a/src/event/quic/ngx_event_quic_frames.c b/src/event/quic/ngx_event_quic_frames.c --- a/src/event/quic/ngx_event_quic_frames.c +++ b/src/event/quic/ngx_event_quic_frames.c @@ -391,6 +391,10 @@ ngx_quic_split_frame(ngx_connection_t *c return NGX_ERROR; } + if (f->type == NGX_QUIC_FT_STREAM) { + f->u.stream.fin = 0; + } + ngx_queue_insert_after(&f->queue, &nf->queue); return NGX_OK; diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c --- a/src/event/quic/ngx_event_quic_streams.c +++ b/src/event/quic/ngx_event_quic_streams.c @@ -13,6 +13,8 @@ #define NGX_QUIC_STREAM_GONE (void *) -1 +static ngx_int_t ngx_quic_do_reset_stream(ngx_quic_stream_t *qs, + ngx_uint_t err); static ngx_int_t ngx_quic_shutdown_stream_send(ngx_connection_t *c); static ngx_int_t ngx_quic_shutdown_stream_recv(ngx_connection_t *c); static ngx_quic_stream_t *ngx_quic_get_stream(ngx_connection_t *c, uint64_t id); @@ -28,11 +30,12 @@ static ssize_t ngx_quic_stream_send(ngx_ size_t size); static ngx_chain_t *ngx_quic_stream_send_chain(ngx_connection_t *c, ngx_chain_t *in, off_t limit); -static size_t ngx_quic_max_stream_flow(ngx_connection_t *c); +static ngx_int_t ngx_quic_stream_flush(ngx_quic_stream_t *qs); static void ngx_quic_stream_cleanup_handler(void *data); -static ngx_int_t ngx_quic_control_flow(ngx_connection_t *c, uint64_t last); -static ngx_int_t ngx_quic_update_flow(ngx_connection_t *c, uint64_t last); -static ngx_int_t ngx_quic_update_max_stream_data(ngx_connection_t *c); +static ngx_int_t ngx_quic_close_stream(ngx_quic_stream_t *qs); +static ngx_int_t ngx_quic_control_flow(ngx_quic_stream_t *qs, uint64_t last); +static ngx_int_t ngx_quic_update_flow(ngx_quic_stream_t *qs, uint64_t last); +static ngx_int_t ngx_quic_update_max_stream_data(ngx_quic_stream_t *qs); static ngx_int_t ngx_quic_update_max_data(ngx_connection_t *c); static void ngx_quic_set_event(ngx_event_t *ev); @@ -186,15 +189,20 @@ ngx_quic_close_streams(ngx_connection_t ns = 0; #endif - for (node = ngx_rbtree_min(tree->root, tree->sentinel); - node; - node = ngx_rbtree_next(tree, node)) - { + node = ngx_rbtree_min(tree->root, tree->sentinel); + + while (node) { qs = (ngx_quic_stream_t *) node; + node = ngx_rbtree_next(tree, node); qs->recv_state = NGX_QUIC_STREAM_RECV_RESET_RECVD; qs->send_state = NGX_QUIC_STREAM_SEND_RESET_SENT; + if (qs->connection == NULL) { + ngx_quic_close_stream(qs); + continue; + } + ngx_quic_set_event(qs->connection->read); ngx_quic_set_event(qs->connection->write); @@ -213,13 +221,17 @@ ngx_quic_close_streams(ngx_connection_t ngx_int_t ngx_quic_reset_stream(ngx_connection_t *c, ngx_uint_t err) { + return ngx_quic_do_reset_stream(c->quic, err); +} + + +static ngx_int_t +ngx_quic_do_reset_stream(ngx_quic_stream_t *qs, ngx_uint_t err) +{ ngx_connection_t *pc; ngx_quic_frame_t *frame; - ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; - qs = c->quic; - if (qs->send_state == NGX_QUIC_STREAM_SEND_DATA_RECVD || qs->send_state == NGX_QUIC_STREAM_SEND_RESET_SENT || qs->send_state == NGX_QUIC_STREAM_SEND_RESET_RECVD) @@ -228,10 +240,14 @@ ngx_quic_reset_stream(ngx_connection_t * } qs->send_state = NGX_QUIC_STREAM_SEND_RESET_SENT; + qs->send_final_size = qs->send_offset; pc = qs->parent; qc = ngx_quic_get_connection(pc); + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL reset", qs->id); + frame = ngx_quic_alloc_frame(pc); if (frame == NULL) { return NGX_ERROR; @@ -241,10 +257,13 @@ ngx_quic_reset_stream(ngx_connection_t * frame->type = NGX_QUIC_FT_RESET_STREAM; frame->u.reset_stream.id = qs->id; frame->u.reset_stream.error_code = err; - frame->u.reset_stream.final_size = c->sent; + frame->u.reset_stream.final_size = qs->send_offset; ngx_quic_queue_frame(qc, frame); + ngx_quic_free_chain(pc, qs->out); + qs->out = NULL; + return NGX_OK; } @@ -271,10 +290,7 @@ ngx_quic_shutdown_stream(ngx_connection_ static ngx_int_t ngx_quic_shutdown_stream_send(ngx_connection_t *c) { - ngx_connection_t *pc; - ngx_quic_frame_t *frame; - ngx_quic_stream_t *qs; - ngx_quic_connection_t *qc; + ngx_quic_stream_t *qs; qs = c->quic; @@ -284,32 +300,13 @@ ngx_quic_shutdown_stream_send(ngx_connec return NGX_OK; } - qs->send_state = NGX_QUIC_STREAM_SEND_DATA_SENT; - - pc = qs->parent; - qc = ngx_quic_get_connection(pc); + qs->send_state = NGX_QUIC_STREAM_SEND_SEND; + qs->send_final_size = c->sent; - frame = ngx_quic_alloc_frame(pc); - if (frame == NULL) { - return NGX_ERROR; - } - - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, qs->parent->log, 0, "quic stream id:0x%xL send shutdown", qs->id); - frame->level = ssl_encryption_application; - frame->type = NGX_QUIC_FT_STREAM; - frame->u.stream.off = 1; - frame->u.stream.len = 1; - frame->u.stream.fin = 1; - - frame->u.stream.stream_id = qs->id; - frame->u.stream.offset = c->sent; - frame->u.stream.length = 0; - - ngx_quic_queue_frame(qc, frame); - - return NGX_OK; + return ngx_quic_stream_flush(qs); } @@ -341,7 +338,7 @@ ngx_quic_shutdown_stream_recv(ngx_connec return NGX_ERROR; } - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, "quic stream id:0x%xL recv shutdown", qs->id); frame->level = ssl_encryption_application; @@ -591,6 +588,7 @@ ngx_quic_create_stream(ngx_connection_t { ngx_log_t *log; ngx_pool_t *pool; + ngx_queue_t *q; ngx_connection_t *sc; ngx_quic_stream_t *qs; ngx_pool_cleanup_t *cln; @@ -601,25 +599,41 @@ ngx_quic_create_stream(ngx_connection_t qc = ngx_quic_get_connection(c); - pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, c->log); - if (pool == NULL) { - return NULL; + if (!ngx_queue_empty(&qc->streams.free)) { + q = ngx_queue_head(&qc->streams.free); + qs = ngx_queue_data(q, ngx_quic_stream_t, queue); + ngx_queue_remove(&qs->queue); + + } else { + /* + * the number of streams is limited by transport + * parameters and application requirements + */ + + qs = ngx_palloc(c->pool, sizeof(ngx_quic_stream_t)); + if (qs == NULL) { + return NULL; + } } - qs = ngx_pcalloc(pool, sizeof(ngx_quic_stream_t)); - if (qs == NULL) { - ngx_destroy_pool(pool); - return NULL; - } + ngx_memzero(qs, sizeof(ngx_quic_stream_t)); qs->node.key = id; qs->parent = c; qs->id = id; - qs->final_size = (uint64_t) -1; + qs->send_final_size = (uint64_t) -1; + qs->recv_final_size = (uint64_t) -1; + + pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, c->log); + if (pool == NULL) { + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); + return NULL; + } log = ngx_palloc(pool, sizeof(ngx_log_t)); if (log == NULL) { ngx_destroy_pool(pool); + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); return NULL; } @@ -629,6 +643,7 @@ ngx_quic_create_stream(ngx_connection_t sc = ngx_get_connection(c->fd, log); if (sc == NULL) { ngx_destroy_pool(pool); + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); return NULL; } @@ -697,6 +712,7 @@ ngx_quic_create_stream(ngx_connection_t if (cln == NULL) { ngx_close_connection(sc); ngx_destroy_pool(pool); + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); return NULL; } @@ -737,7 +753,7 @@ ngx_quic_stream_recv(ngx_connection_t *c return NGX_ERROR; } - ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, "quic stream id:0x%xL recv buf:%uz", qs->id, size); if (size == 0) { @@ -763,7 +779,7 @@ ngx_quic_stream_recv(ngx_connection_t *c rev->ready = 0; if (qs->recv_state == NGX_QUIC_STREAM_RECV_DATA_RECVD - && qs->recv_offset == qs->final_size) + && qs->recv_offset == qs->recv_final_size) { qs->recv_state = NGX_QUIC_STREAM_RECV_DATA_READ; } @@ -781,7 +797,7 @@ ngx_quic_stream_recv(ngx_connection_t *c ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic stream id:0x%xL recv len:%z", qs->id, len); - if (ngx_quic_update_flow(c, qs->recv_offset + len) != NGX_OK) { + if (ngx_quic_update_flow(qs, qs->recv_offset + len) != NGX_OK) { return NGX_ERROR; } @@ -822,9 +838,7 @@ ngx_quic_stream_send_chain(ngx_connectio off_t flow; size_t n; ngx_event_t *wev; - ngx_chain_t *out; ngx_connection_t *pc; - ngx_quic_frame_t *frame; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; @@ -842,7 +856,8 @@ ngx_quic_stream_send_chain(ngx_connectio qs->send_state = NGX_QUIC_STREAM_SEND_SEND; - flow = ngx_quic_max_stream_flow(c); + flow = qs->acked + qc->conf->stream_buffer_size - c->sent; + if (flow == 0) { wev->ready = 0; return in; @@ -852,37 +867,15 @@ ngx_quic_stream_send_chain(ngx_connectio limit = flow; } - in = ngx_quic_write_chain(pc, &qs->out, in, limit, 0, &n); + in = ngx_quic_write_chain(pc, &qs->out, in, limit, + c->sent - qs->send_offset, &n); if (in == NGX_CHAIN_ERROR) { return NGX_CHAIN_ERROR; } - out = ngx_quic_read_chain(pc, &qs->out, n); - if (out == NGX_CHAIN_ERROR) { - return NGX_CHAIN_ERROR; - } - - frame = ngx_quic_alloc_frame(pc); - if (frame == NULL) { - return NGX_CHAIN_ERROR; - } - - frame->level = ssl_encryption_application; - frame->type = NGX_QUIC_FT_STREAM; - frame->data = out; - frame->u.stream.off = 1; - frame->u.stream.len = 1; - frame->u.stream.fin = 0; - - frame->u.stream.stream_id = qs->id; - frame->u.stream.offset = c->sent; - frame->u.stream.length = n; - c->sent += n; qc->streams.sent += n; - ngx_quic_queue_frame(qc, frame); - if (in) { wev->ready = 0; } @@ -890,61 +883,96 @@ ngx_quic_stream_send_chain(ngx_connectio ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic send_chain sent:%uz", n); + if (ngx_quic_stream_flush(qs) != NGX_OK) { + return NGX_CHAIN_ERROR; + } + return in; } -static size_t -ngx_quic_max_stream_flow(ngx_connection_t *c) +static ngx_int_t +ngx_quic_stream_flush(ngx_quic_stream_t *qs) { - size_t size; - uint64_t sent, unacked; - ngx_quic_stream_t *qs; + off_t limit; + size_t len; + ngx_uint_t last; + ngx_chain_t *out, *cl; + ngx_quic_frame_t *frame; + ngx_connection_t *pc; ngx_quic_connection_t *qc; - qs = c->quic; - qc = ngx_quic_get_connection(qs->parent); + if (qs->send_state != NGX_QUIC_STREAM_SEND_SEND) { + return NGX_OK; + } - size = qc->conf->stream_buffer_size; - sent = c->sent; - unacked = sent - qs->acked; + pc = qs->parent; + qc = ngx_quic_get_connection(pc); if (qc->streams.send_max_data == 0) { qc->streams.send_max_data = qc->ctp.initial_max_data; } - if (unacked >= size) { - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic send flow hit buffer size"); - return 0; + limit = ngx_min(qc->streams.send_max_data - qc->streams.send_offset, + qs->send_max_data - qs->send_offset); + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL flush limit:%O", qs->id, limit); + + out = ngx_quic_read_chain(pc, &qs->out, limit); + if (out == NGX_CHAIN_ERROR) { + return NGX_ERROR; } - size -= unacked; + len = 0; + last = 0; + + for (cl = out; cl; cl = cl->next) { + len += cl->buf->last - cl->buf->pos; + } - if (qc->streams.sent >= qc->streams.send_max_data) { - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic send flow hit MAX_DATA"); - return 0; + if (qs->send_final_size != (uint64_t) -1 + && qs->send_final_size == qs->send_offset + len) + { + qs->send_state = NGX_QUIC_STREAM_SEND_DATA_SENT; + last = 1; + } + + if (len == 0 && !last) { + return NGX_OK; } - if (qc->streams.sent + size > qc->streams.send_max_data) { - size = qc->streams.send_max_data - qc->streams.sent; + frame = ngx_quic_alloc_frame(pc); + if (frame == NULL) { + return NGX_ERROR; } - if (sent >= qs->send_max_data) { - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic send flow hit MAX_STREAM_DATA"); - return 0; + frame->level = ssl_encryption_application; + frame->type = NGX_QUIC_FT_STREAM; + frame->data = out; + + frame->u.stream.off = 1; + frame->u.stream.len = 1; + frame->u.stream.fin = last; + + frame->u.stream.stream_id = qs->id; + frame->u.stream.offset = qs->send_offset; + frame->u.stream.length = len; + + ngx_quic_queue_frame(qc, frame); + + qs->send_offset += len; + qc->streams.send_offset += len; + + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL flush len:%uz last:%ui", + qs->id, len, last); + + if (qs->connection == NULL) { + return ngx_quic_close_stream(qs); } - if (sent + size > qs->send_max_data) { - size = qs->send_max_data - sent; - } - - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic send flow:%uz", size); - - return size; + return NGX_OK; } @@ -953,40 +981,67 @@ ngx_quic_stream_cleanup_handler(void *da { ngx_connection_t *c = data; + ngx_quic_stream_t *qs; + + qs = c->quic; + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, qs->parent->log, 0, + "quic stream id:0x%xL cleanup", qs->id); + + if (ngx_quic_shutdown_stream(c, NGX_RDWR_SHUTDOWN) != NGX_OK) { + ngx_quic_close_connection(c, NGX_ERROR); + return; + } + + qs->connection = NULL; + + if (ngx_quic_close_stream(qs) != NGX_OK) { + ngx_quic_close_connection(c, NGX_ERROR); + return; + } +} + + +static ngx_int_t +ngx_quic_close_stream(ngx_quic_stream_t *qs) +{ ngx_connection_t *pc; ngx_quic_frame_t *frame; - ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; - qs = c->quic; pc = qs->parent; qc = ngx_quic_get_connection(pc); - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic stream id:0x%xL cleanup", qs->id); + if (!qc->closing) { + /* make sure everything is sent and final size is received */ + + if (qs->recv_state == NGX_QUIC_STREAM_RECV_RECV + || qs->send_state == NGX_QUIC_STREAM_SEND_READY + || qs->send_state == NGX_QUIC_STREAM_SEND_SEND) + { + return NGX_OK; + } + } + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL close", qs->id); + + ngx_quic_free_chain(pc, qs->in); + ngx_quic_free_chain(pc, qs->out); ngx_rbtree_delete(&qc->streams.tree, &qs->node); - ngx_quic_free_chain(pc, qs->in); - ngx_quic_free_chain(pc, qs->out); + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); if (qc->closing) { /* schedule handler call to continue ngx_quic_close_connection() */ ngx_post_event(pc->read, &ngx_posted_events); - return; + return NGX_OK; } - if (qc->error) { - goto done; - } - - (void) ngx_quic_shutdown_stream(c, NGX_RDWR_SHUTDOWN); - - (void) ngx_quic_update_flow(c, qs->recv_last); - if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0) { frame = ngx_quic_alloc_frame(pc); if (frame == NULL) { - goto done; + return NGX_ERROR; } frame->level = ssl_encryption_application; @@ -1004,13 +1059,11 @@ ngx_quic_stream_cleanup_handler(void *da ngx_quic_queue_frame(qc, frame); } -done: - - (void) ngx_quic_output(pc); - if (qc->shutdown) { ngx_post_event(pc->read, &ngx_posted_events); } + + return NGX_OK; } @@ -1020,7 +1073,6 @@ ngx_quic_handle_stream_frame(ngx_connect { size_t size; uint64_t last; - ngx_connection_t *sc; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; ngx_quic_stream_frame_t *f; @@ -1048,19 +1100,17 @@ ngx_quic_handle_stream_frame(ngx_connect return NGX_OK; } - sc = qs->connection; - if (qs->recv_state != NGX_QUIC_STREAM_RECV_RECV && qs->recv_state != NGX_QUIC_STREAM_RECV_SIZE_KNOWN) { return NGX_OK; } - if (ngx_quic_control_flow(sc, last) != NGX_OK) { + if (ngx_quic_control_flow(qs, last) != NGX_OK) { return NGX_ERROR; } - if (qs->final_size != (uint64_t) -1 && last > qs->final_size) { + if (qs->recv_final_size != (uint64_t) -1 && last > qs->recv_final_size) { qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; return NGX_ERROR; } @@ -1075,7 +1125,8 @@ ngx_quic_handle_stream_frame(ngx_connect } if (f->fin) { - if (qs->final_size != (uint64_t) -1 && qs->final_size != last) { + if (qs->recv_final_size != (uint64_t) -1 && qs->recv_final_size != last) + { qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; return NGX_ERROR; } @@ -1085,7 +1136,7 @@ ngx_quic_handle_stream_frame(ngx_connect return NGX_ERROR; } - qs->final_size = last; + qs->recv_final_size = last; qs->recv_state = NGX_QUIC_STREAM_RECV_SIZE_KNOWN; } @@ -1099,13 +1150,17 @@ ngx_quic_handle_stream_frame(ngx_connect qs->recv_size += size; if (qs->recv_state == NGX_QUIC_STREAM_RECV_SIZE_KNOWN - && qs->recv_size == qs->final_size) + && qs->recv_size == qs->recv_final_size) { qs->recv_state = NGX_QUIC_STREAM_RECV_DATA_RECVD; } + if (qs->connection == NULL) { + return ngx_quic_close_stream(qs); + } + if (f->offset == qs->recv_offset) { - ngx_quic_set_event(sc->read); + ngx_quic_set_event(qs->connection->read); } return NGX_OK; @@ -1128,20 +1183,26 @@ ngx_quic_handle_max_data_frame(ngx_conne return NGX_OK; } - if (tree->root != tree->sentinel - && qc->streams.sent >= qc->streams.send_max_data) + if (tree->root == tree->sentinel + || qc->streams.send_offset < qc->streams.send_max_data) { - - for (node = ngx_rbtree_min(tree->root, tree->sentinel); - node; - node = ngx_rbtree_next(tree, node)) - { - qs = (ngx_quic_stream_t *) node; - ngx_quic_set_event(qs->connection->write); - } + /* not blocked on MAX_DATA */ + qc->streams.send_max_data = f->max_data; + return NGX_OK; } qc->streams.send_max_data = f->max_data; + node = ngx_rbtree_min(tree->root, tree->sentinel); + + while (node && qc->streams.send_offset < qc->streams.send_max_data) { + + qs = (ngx_quic_stream_t *) node; + node = ngx_rbtree_next(tree, node); + + if (ngx_quic_stream_flush(qs) != NGX_OK) { + return NGX_ERROR; + } + } return NGX_OK; } @@ -1189,7 +1250,7 @@ ngx_quic_handle_stream_data_blocked_fram return NGX_OK; } - return ngx_quic_update_max_stream_data(qs->connection); + return ngx_quic_update_max_stream_data(qs); } @@ -1197,7 +1258,6 @@ ngx_int_t ngx_quic_handle_max_stream_data_frame(ngx_connection_t *c, ngx_quic_header_t *pkt, ngx_quic_max_stream_data_frame_t *f) { - uint64_t sent; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; @@ -1224,15 +1284,15 @@ ngx_quic_handle_max_stream_data_frame(ng return NGX_OK; } - sent = qs->connection->sent; - - if (sent >= qs->send_max_data) { - ngx_quic_set_event(qs->connection->write); + if (qs->send_offset < qs->send_max_data) { + /* not blocked on MAX_STREAM_DATA */ + qs->send_max_data = f->limit; + return NGX_OK; } qs->send_max_data = f->limit; - return NGX_OK; + return ngx_quic_stream_flush(qs); } @@ -1240,7 +1300,6 @@ ngx_int_t ngx_quic_handle_reset_stream_frame(ngx_connection_t *c, ngx_quic_header_t *pkt, ngx_quic_reset_stream_frame_t *f) { - ngx_connection_t *sc; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; @@ -1271,13 +1330,13 @@ ngx_quic_handle_reset_stream_frame(ngx_c qs->recv_state = NGX_QUIC_STREAM_RECV_RESET_RECVD; - sc = qs->connection; - - if (ngx_quic_control_flow(sc, f->final_size) != NGX_OK) { + if (ngx_quic_control_flow(qs, f->final_size) != NGX_OK) { return NGX_ERROR; } - if (qs->final_size != (uint64_t) -1 && qs->final_size != f->final_size) { + if (qs->recv_final_size != (uint64_t) -1 + && qs->recv_final_size != f->final_size) + { qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; return NGX_ERROR; } @@ -1287,12 +1346,16 @@ ngx_quic_handle_reset_stream_frame(ngx_c return NGX_ERROR; } - qs->final_size = f->final_size; + qs->recv_final_size = f->final_size; - if (ngx_quic_update_flow(sc, qs->final_size) != NGX_OK) { + if (ngx_quic_update_flow(qs, qs->recv_final_size) != NGX_OK) { return NGX_ERROR; } + if (qs->connection == NULL) { + return ngx_quic_close_stream(qs); + } + ngx_quic_set_event(qs->connection->read); return NGX_OK; @@ -1325,10 +1388,14 @@ ngx_quic_handle_stop_sending_frame(ngx_c return NGX_OK; } - if (ngx_quic_reset_stream(qs->connection, f->error_code) != NGX_OK) { + if (ngx_quic_do_reset_stream(qs, f->error_code) != NGX_OK) { return NGX_ERROR; } + if (qs->connection == NULL) { + return ngx_quic_close_stream(qs); + } + ngx_quic_set_event(qs->connection->write); return NGX_OK; @@ -1378,30 +1445,37 @@ ngx_quic_handle_stream_ack(ngx_connectio return; } + if (qs->connection == NULL) { + qs->acked += f->u.stream.length; + return; + } + sent = qs->connection->sent; unacked = sent - qs->acked; + qs->acked += f->u.stream.length; - if (unacked >= qc->conf->stream_buffer_size) { - ngx_quic_set_event(qs->connection->write); + ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic stream id:0x%xL ack len:%uL acked:%uL unacked:%uL", + qs->id, f->u.stream.length, qs->acked, sent - qs->acked); + + if (unacked != qc->conf->stream_buffer_size) { + /* not blocked on buffer size */ + return; } - qs->acked += f->u.stream.length; - - ngx_log_debug3(NGX_LOG_DEBUG_EVENT, qs->connection->log, 0, - "quic stream ack len:%uL acked:%uL unacked:%uL", - f->u.stream.length, qs->acked, sent - qs->acked); + ngx_quic_set_event(qs->connection->write); } static ngx_int_t -ngx_quic_control_flow(ngx_connection_t *c, uint64_t last) +ngx_quic_control_flow(ngx_quic_stream_t *qs, uint64_t last) { uint64_t len; - ngx_quic_stream_t *qs; + ngx_connection_t *pc; ngx_quic_connection_t *qc; - qs = c->quic; - qc = ngx_quic_get_connection(qs->parent); + pc = qs->parent; + qc = ngx_quic_get_connection(pc); if (last <= qs->recv_last) { return NGX_OK; @@ -1409,9 +1483,9 @@ ngx_quic_control_flow(ngx_connection_t * len = last - qs->recv_last; - ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic flow control msd:%uL/%uL md:%uL/%uL", - last, qs->recv_max_data, qc->streams.recv_last + len, + ngx_log_debug5(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL flow control msd:%uL/%uL md:%uL/%uL", + qs->id, last, qs->recv_max_data, qc->streams.recv_last + len, qc->streams.recv_max_data); qs->recv_last += len; @@ -1435,14 +1509,12 @@ ngx_quic_control_flow(ngx_connection_t * static ngx_int_t -ngx_quic_update_flow(ngx_connection_t *c, uint64_t last) +ngx_quic_update_flow(ngx_quic_stream_t *qs, uint64_t last) { uint64_t len; ngx_connection_t *pc; - ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; - qs = c->quic; pc = qs->parent; qc = ngx_quic_get_connection(pc); @@ -1452,13 +1524,13 @@ ngx_quic_update_flow(ngx_connection_t *c len = last - qs->recv_offset; - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic flow update %uL", last); + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL flow update %uL", qs->id, last); qs->recv_offset += len; if (qs->recv_max_data <= qs->recv_offset + qs->recv_window / 2) { - if (ngx_quic_update_max_stream_data(c) != NGX_OK) { + if (ngx_quic_update_max_stream_data(qs) != NGX_OK) { return NGX_ERROR; } } @@ -1478,15 +1550,13 @@ ngx_quic_update_flow(ngx_connection_t *c static ngx_int_t -ngx_quic_update_max_stream_data(ngx_connection_t *c) +ngx_quic_update_max_stream_data(ngx_quic_stream_t *qs) { uint64_t recv_max_data; ngx_connection_t *pc; ngx_quic_frame_t *frame; - ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; - qs = c->quic; pc = qs->parent; qc = ngx_quic_get_connection(pc); @@ -1502,8 +1572,9 @@ ngx_quic_update_max_stream_data(ngx_conn qs->recv_max_data = recv_max_data; - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic flow update msd:%uL", qs->recv_max_data); + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, + "quic stream id:0x%xL flow update msd:%uL", + qs->id, qs->recv_max_data); frame = ngx_quic_alloc_frame(pc); if (frame == NULL) { diff --git a/src/http/v3/ngx_http_v3_uni.c b/src/http/v3/ngx_http_v3_uni.c --- a/src/http/v3/ngx_http_v3_uni.c +++ b/src/http/v3/ngx_http_v3_uni.c @@ -295,8 +295,6 @@ ngx_http_v3_uni_dummy_write_handler(ngx_ } -/* XXX async & buffered stream writes */ - ngx_connection_t * ngx_http_v3_create_push_stream(ngx_connection_t *c, uint64_t push_id) { From i at morfi.ru Tue Feb 8 11:10:04 2022 From: i at morfi.ru (Andrey Kolyshkin) Date: Tue, 8 Feb 2022 14:10:04 +0300 Subject: [QUIC] padding of Initial packets In-Reply-To: References: Message-ID: Hello. This patch is strange. 1. ngx_quic_revert_send can set to ctx an uninitialized value from preserved_pnum. (example if min > len and i = 0, only 0 element is filled in preserved_pnum but restored all) 2. ngx_quic_revert_send will restored pnum for ctx that have already called ngx_quic_output_packet and the packet with this pnum will be queued. (example if min > len and i = 1) On Wed, Feb 2, 2022 at 2:07 PM Sergey Kandaurov wrote: > > > On 2 Feb 2022, at 13:55, Vladimir Homutov wrote: > > > > # HG changeset patch > > # User Vladimir Homutov > > # Date 1643796973 -10800 > > # Wed Feb 02 13:16:13 2022 +0300 > > # Branch quic > > # Node ID fbfbcf66990e8964bcf308f3869f37d1a1acceeb > > # Parent 8c6645ecaeb6cbf27976fd9035440bfcab943117 > > QUIC: fixed padding of initial packets in case of limited path. > > > > Previously, non-padded initial packet could be sent as a result of the > > following situation: > > > > - initial queue is not empty (so padding to 1200 is required) > > - handhsake queue is not empty (so padding is to be added after h/s > packet) > > handshake > > > - path is limited > > > > If serializing handshake packet would violate path limit, such packet was > > omitted, and the non-padded initial packet was sent. > > > > The fix is to avoid sending the packet at all in such case. This > follows the > > original intention introduced in c5155a0cb12f. > > > > diff --git a/src/event/quic/ngx_event_quic_output.c > b/src/event/quic/ngx_event_quic_output.c > > --- a/src/event/quic/ngx_event_quic_output.c > > +++ b/src/event/quic/ngx_event_quic_output.c > > @@ -158,7 +158,14 @@ ngx_quic_create_datagrams(ngx_connection > > ? NGX_QUIC_MIN_INITIAL_SIZE - (p - dst) : 0; > > > > if (min > len) { > > - continue; > > + /* padding can't be applied - avoid sending the packet > */ > > + > > + for (i = 0; i < NGX_QUIC_SEND_CTX_LAST; i++) { > > + ctx = &qc->send_ctx[i]; > > + ngx_quic_revert_send(c, ctx, preserved_pnum[i]); > > this could be simplified to reduce ctx variable: > ngx_quic_revert_send(c, &qc->send_ctx[i], preserved_pnum[i]); > > but it won't fit into 80 line, so that's good just as well > > > + } > > + > > + return NGX_OK; > > } > > > > n = ngx_quic_output_packet(c, ctx, p, len, min); > > > > -- > Sergey Kandaurov > > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org > -- Best regards, Andrey -------------- next part -------------- An HTML attachment was scrubbed... URL: From arut at nginx.com Tue Feb 8 11:16:35 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 8 Feb 2022 14:16:35 +0300 Subject: [PATCH 00 of 15] Serve all requests from single tempfile In-Reply-To: <8c1c3901-eb36-454a-db28-d0b058d7ac7e@cdn77.com> References: <20220207113143.jzskvfnm7uvamn2u@Romans-MacBook-Pro.local> <8c1c3901-eb36-454a-db28-d0b058d7ac7e@cdn77.com> Message-ID: <20220208111635.xuqsrrwcrcrtntma@Romans-MacBook-Pro.local> Hi, On Mon, Feb 07, 2022 at 01:27:15PM +0100, Jiří Setnička via nginx-devel wrote: > Hello, > > > Thanks for sharing your work. Indeed, nginx currently lacks a good solution > > for serving a file that's being downloaded from upstream. We tried to address > > this issue a few years ago. Our solution was similar to yours, but instead > > of sharing the temp file between workers, we moved the temp file to its > > destination right after writing the header. A new bit was added to the header > > signalling that this file is being updated. > > > > The biggest issue with this kind of solutions is how we wait for updates in > > a file. We believe that polling a file with a given time interval is not a > > perfect approach, even though nginx does that for cache locks. > > polling is done only on the ngx_http_file_cache_tf_node_t struct in the > shared memory (see patch 09 of 15, where c->length is updated from > c->tf_node->length and then this length is compared with > c->body_sent_bytes), not on the file itself. Length in the tf_node is > updated with each write from the primary request (see patch 05 of 15). > > It is better than polling individual files, but I agree it is still polling, > which isn't great. > > > [...] > > Another approach would be to create an > > inter-worker messaging system for signalling file updates. > > We were thinking about creating something like that but we buried this idea > because it seems quite complex to do it right and reliable. And polling the > tf_node in the shared memory (with very low proxy_cache_tempfile_loop) works > sufficiently good. > > > > It's good to know the solution works for you. Please keep us posted about > > future improvements especially the ones which would avoid polling and decrease > > complexity. > > We would be happy to get this patch to the mainline nginx in the future, so > that all nginx users could benefit from it. > > We will be thinking about avoiding polling and implementing some > inter-worker messaging, but it may be some time, because it seems quite > complex. Could you share some hints about how do you thing it would be best > to implement it in the worker's event loop? Even though I have some ideas, they need to be checked first. I can only say this should be a core nginx feature. > Jiří Setnička > CDN77 > > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org -- Roman Arutyunyan From vl at nginx.com Tue Feb 8 11:45:19 2022 From: vl at nginx.com (Vladimir Homutov) Date: Tue, 8 Feb 2022 14:45:19 +0300 Subject: [PATCH] QUIC: stream lingering In-Reply-To: <20220207141617.av7ft5sycecoce6r@Romans-MacBook-Pro.local> References: <20220207141617.av7ft5sycecoce6r@Romans-MacBook-Pro.local> Message-ID: On Mon, Feb 07, 2022 at 05:16:17PM +0300, Roman Arutyunyan wrote: > Hi, > > On Fri, Feb 04, 2022 at 04:56:23PM +0300, Vladimir Homutov wrote: > > On Tue, Feb 01, 2022 at 04:39:59PM +0300, Roman Arutyunyan wrote: > > > # HG changeset patch > > > # User Roman Arutyunyan > > > # Date 1643722727 -10800 > > > # Tue Feb 01 16:38:47 2022 +0300 > > > # Branch quic > > > # Node ID db31ae16c1f2050be9c9f6b1f117ab6725b97dd4 > > > # Parent 308ac307b3e6952ef0c5ccf10cc82904c59fa4c3 > > > QUIC: stream lingering. > > > > > > Now ngx_quic_stream_t is decoupled from ngx_connection_t in a way that it > > > can persist after connection is closed by application. During this period, > > > server is expecting stream final size from client for correct flow control. > > > Also, buffered output is sent to client as more flow control credit is granted. > > > > > [..] > > > > > +static ngx_int_t > > > +ngx_quic_stream_flush(ngx_quic_stream_t *qs) > > > +{ > > > + size_t limit, len; > > > + ngx_uint_t last; > > > + ngx_chain_t *out, *cl; > > > + ngx_quic_frame_t *frame; > > > + ngx_connection_t *pc; > > > + ngx_quic_connection_t *qc; > > > + > > > + if (qs->send_state != NGX_QUIC_STREAM_SEND_SEND) { > > > + return NGX_OK; > > > + } > > > + > > > + pc = qs->parent; > > > + qc = ngx_quic_get_connection(pc); > > > + > > > + limit = ngx_quic_max_stream_flow(qs); > > > + last = 0; > > > + > > > + out = ngx_quic_read_chain(pc, &qs->out, limit); > > > + if (out == NGX_CHAIN_ERROR) { > > > + return NGX_ERROR; > > > + } > > > + > > > + len = 0; > > > + last = 0; > > > > this assignment looks duplicate. > > Thanks, fixed. > > > [..] > > > > > +static ngx_int_t > > > +ngx_quic_close_stream(ngx_quic_stream_t *qs) > > > +{ > > > ngx_connection_t *pc; > > > ngx_quic_frame_t *frame; > > > - ngx_quic_stream_t *qs; > > > ngx_quic_connection_t *qc; > > > > > > - qs = c->quic; > > > pc = qs->parent; > > > qc = ngx_quic_get_connection(pc); > > > > > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > > - "quic stream id:0x%xL cleanup", qs->id); > > > + if (!qc->closing) { > > > + if (qs->recv_state == NGX_QUIC_STREAM_RECV_RECV > > > + || qs->send_state == NGX_QUIC_STREAM_SEND_READY > > > + || qs->send_state == NGX_QUIC_STREAM_SEND_SEND) > > > + { > > > > so basically this are the states where we need to wait for FIN? > > and thus avoid closing till we get it. > > I would add a comment here. > > On the receiving end we wait either for fin or for reset to have final size. > On the sending end we wait for everything that's buffered to be sent. > Added a comment about that. > > > [..] > > > + if (qs->connection == NULL) { > > > + return ngx_quic_close_stream(qs); > > > + } > > > + > > > ngx_quic_set_event(qs->connection->write); > > > > this pattern - check connection, close if NULL and set event seem to > > repeat. Maybe it's worth to try to put this check/action into > > ngx_quic_set_event somehow ? we could instead have > > set_read_event/set_write_event maybe. > > I thought about this too, but it's not always that simple. And even if it was, > the new function/macro would have unclear semantics. Let's just remember this > as a possible future optimiation. > > > > +static ngx_int_t > > > +ngx_quic_stream_flush(ngx_quic_stream_t *qs) > > > + > > [..] > > > + if (len == 0 && !last) { > > > + return NGX_OK; > > > + } > > > + > > > + frame = ngx_quic_alloc_frame(pc); > > > + if (frame == NULL) { > > > + return NGX_ERROR; > > > + } > > > + > > > + frame = ngx_quic_alloc_frame(pc); > > > + if (frame == NULL) { > > > + return NGX_ERROR; > > > + } > > > > one more dup here. > > Yes, thanks. > > > Overal, it looks good, but the testing revealed another issue: with big > > buffer sizes we run into issue of too long chains in ngx_quic_write_chain(). > > As discussed, this certainly needs optimization - probably adding some > > pointer to the end to facilitate appending, or something else. > > It's true ngx_quic_write_chain() needs to be optimized. When the buffered > chain is big, it takes too much time to find the write point. I'll address > this is a separate patch. Meanwhile, attached is an updated version of the > current one. > > In the new version of the patch I also eliminated the > ngx_quic_max_stream_flow() function and embedded its content in > ngx_quic_stream_flush(). yes, this looks correct - flow limit should not consider buffer as it was before. I think we should check for limit == 0 before doing read_chain and this is good place for debug logging about 'hit MAX_DATA/MAX_STREAM_DATA' that was removed by update. > > -- > Roman Arutyunyan > # HG changeset patch > # User Roman Arutyunyan > # Date 1644054894 -10800 > # Sat Feb 05 12:54:54 2022 +0300 > # Branch quic > # Node ID 6e1674c257709341a7508ae4bdab6f7f7d2e9284 > # Parent 6c1dfd072859022f830aeea49db7cbe3c9f7fb55 > QUIC: stream lingering. > > Now ngx_quic_stream_t is decoupled from ngx_connection_t in a way that it > can persist after connection is closed by application. During this period, > server is expecting stream final size from client for correct flow control. > Also, buffered output is sent to client as more flow control credit is granted. > > diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c > --- a/src/event/quic/ngx_event_quic.c > +++ b/src/event/quic/ngx_event_quic.c > @@ -303,6 +303,7 @@ ngx_quic_new_connection(ngx_connection_t > ctp->active_connection_id_limit = 2; > > ngx_queue_init(&qc->streams.uninitialized); > + ngx_queue_init(&qc->streams.free); > > qc->streams.recv_max_data = qc->tp.initial_max_data; > qc->streams.recv_window = qc->streams.recv_max_data; > diff --git a/src/event/quic/ngx_event_quic.h b/src/event/quic/ngx_event_quic.h > --- a/src/event/quic/ngx_event_quic.h > +++ b/src/event/quic/ngx_event_quic.h > @@ -78,12 +78,14 @@ struct ngx_quic_stream_s { > uint64_t id; > uint64_t acked; > uint64_t send_max_data; > + uint64_t send_offset; > + uint64_t send_final_size; > uint64_t recv_max_data; > uint64_t recv_offset; > uint64_t recv_window; > uint64_t recv_last; > uint64_t recv_size; > - uint64_t final_size; > + uint64_t recv_final_size; > ngx_chain_t *in; > ngx_chain_t *out; > ngx_uint_t cancelable; /* unsigned cancelable:1; */ > diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h > --- a/src/event/quic/ngx_event_quic_connection.h > +++ b/src/event/quic/ngx_event_quic_connection.h > @@ -114,13 +114,16 @@ struct ngx_quic_socket_s { > typedef struct { > ngx_rbtree_t tree; > ngx_rbtree_node_t sentinel; > + > ngx_queue_t uninitialized; > + ngx_queue_t free; > > uint64_t sent; > uint64_t recv_offset; > uint64_t recv_window; > uint64_t recv_last; > uint64_t recv_max_data; > + uint64_t send_offset; > uint64_t send_max_data; > > uint64_t server_max_streams_uni; > diff --git a/src/event/quic/ngx_event_quic_frames.c b/src/event/quic/ngx_event_quic_frames.c > --- a/src/event/quic/ngx_event_quic_frames.c > +++ b/src/event/quic/ngx_event_quic_frames.c > @@ -391,6 +391,10 @@ ngx_quic_split_frame(ngx_connection_t *c > return NGX_ERROR; > } > > + if (f->type == NGX_QUIC_FT_STREAM) { > + f->u.stream.fin = 0; > + } > + > ngx_queue_insert_after(&f->queue, &nf->queue); > > return NGX_OK; > diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c > --- a/src/event/quic/ngx_event_quic_streams.c > +++ b/src/event/quic/ngx_event_quic_streams.c > @@ -13,6 +13,8 @@ > #define NGX_QUIC_STREAM_GONE (void *) -1 > > > +static ngx_int_t ngx_quic_do_reset_stream(ngx_quic_stream_t *qs, > + ngx_uint_t err); > static ngx_int_t ngx_quic_shutdown_stream_send(ngx_connection_t *c); > static ngx_int_t ngx_quic_shutdown_stream_recv(ngx_connection_t *c); > static ngx_quic_stream_t *ngx_quic_get_stream(ngx_connection_t *c, uint64_t id); > @@ -28,11 +30,12 @@ static ssize_t ngx_quic_stream_send(ngx_ > size_t size); > static ngx_chain_t *ngx_quic_stream_send_chain(ngx_connection_t *c, > ngx_chain_t *in, off_t limit); > -static size_t ngx_quic_max_stream_flow(ngx_connection_t *c); > +static ngx_int_t ngx_quic_stream_flush(ngx_quic_stream_t *qs); > static void ngx_quic_stream_cleanup_handler(void *data); > -static ngx_int_t ngx_quic_control_flow(ngx_connection_t *c, uint64_t last); > -static ngx_int_t ngx_quic_update_flow(ngx_connection_t *c, uint64_t last); > -static ngx_int_t ngx_quic_update_max_stream_data(ngx_connection_t *c); > +static ngx_int_t ngx_quic_close_stream(ngx_quic_stream_t *qs); > +static ngx_int_t ngx_quic_control_flow(ngx_quic_stream_t *qs, uint64_t last); > +static ngx_int_t ngx_quic_update_flow(ngx_quic_stream_t *qs, uint64_t last); > +static ngx_int_t ngx_quic_update_max_stream_data(ngx_quic_stream_t *qs); > static ngx_int_t ngx_quic_update_max_data(ngx_connection_t *c); > static void ngx_quic_set_event(ngx_event_t *ev); > > @@ -186,15 +189,20 @@ ngx_quic_close_streams(ngx_connection_t > ns = 0; > #endif > > - for (node = ngx_rbtree_min(tree->root, tree->sentinel); > - node; > - node = ngx_rbtree_next(tree, node)) > - { > + node = ngx_rbtree_min(tree->root, tree->sentinel); > + > + while (node) { > qs = (ngx_quic_stream_t *) node; > + node = ngx_rbtree_next(tree, node); > > qs->recv_state = NGX_QUIC_STREAM_RECV_RESET_RECVD; > qs->send_state = NGX_QUIC_STREAM_SEND_RESET_SENT; > > + if (qs->connection == NULL) { > + ngx_quic_close_stream(qs); > + continue; > + } > + > ngx_quic_set_event(qs->connection->read); > ngx_quic_set_event(qs->connection->write); > > @@ -213,13 +221,17 @@ ngx_quic_close_streams(ngx_connection_t > ngx_int_t > ngx_quic_reset_stream(ngx_connection_t *c, ngx_uint_t err) > { > + return ngx_quic_do_reset_stream(c->quic, err); > +} > + > + > +static ngx_int_t > +ngx_quic_do_reset_stream(ngx_quic_stream_t *qs, ngx_uint_t err) > +{ > ngx_connection_t *pc; > ngx_quic_frame_t *frame; > - ngx_quic_stream_t *qs; > ngx_quic_connection_t *qc; > > - qs = c->quic; > - > if (qs->send_state == NGX_QUIC_STREAM_SEND_DATA_RECVD > || qs->send_state == NGX_QUIC_STREAM_SEND_RESET_SENT > || qs->send_state == NGX_QUIC_STREAM_SEND_RESET_RECVD) > @@ -228,10 +240,14 @@ ngx_quic_reset_stream(ngx_connection_t * > } > > qs->send_state = NGX_QUIC_STREAM_SEND_RESET_SENT; > + qs->send_final_size = qs->send_offset; > > pc = qs->parent; > qc = ngx_quic_get_connection(pc); > > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, > + "quic stream id:0x%xL reset", qs->id); > + > frame = ngx_quic_alloc_frame(pc); > if (frame == NULL) { > return NGX_ERROR; > @@ -241,10 +257,13 @@ ngx_quic_reset_stream(ngx_connection_t * > frame->type = NGX_QUIC_FT_RESET_STREAM; > frame->u.reset_stream.id = qs->id; > frame->u.reset_stream.error_code = err; > - frame->u.reset_stream.final_size = c->sent; > + frame->u.reset_stream.final_size = qs->send_offset; > > ngx_quic_queue_frame(qc, frame); > > + ngx_quic_free_chain(pc, qs->out); > + qs->out = NULL; > + > return NGX_OK; > } > > @@ -271,10 +290,7 @@ ngx_quic_shutdown_stream(ngx_connection_ > static ngx_int_t > ngx_quic_shutdown_stream_send(ngx_connection_t *c) > { > - ngx_connection_t *pc; > - ngx_quic_frame_t *frame; > - ngx_quic_stream_t *qs; > - ngx_quic_connection_t *qc; > + ngx_quic_stream_t *qs; > > qs = c->quic; > > @@ -284,32 +300,13 @@ ngx_quic_shutdown_stream_send(ngx_connec > return NGX_OK; > } > > - qs->send_state = NGX_QUIC_STREAM_SEND_DATA_SENT; > - > - pc = qs->parent; > - qc = ngx_quic_get_connection(pc); > + qs->send_state = NGX_QUIC_STREAM_SEND_SEND; > + qs->send_final_size = c->sent; > > - frame = ngx_quic_alloc_frame(pc); > - if (frame == NULL) { > - return NGX_ERROR; > - } > - > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, qs->parent->log, 0, > "quic stream id:0x%xL send shutdown", qs->id); > > - frame->level = ssl_encryption_application; > - frame->type = NGX_QUIC_FT_STREAM; > - frame->u.stream.off = 1; > - frame->u.stream.len = 1; > - frame->u.stream.fin = 1; > - > - frame->u.stream.stream_id = qs->id; > - frame->u.stream.offset = c->sent; > - frame->u.stream.length = 0; > - > - ngx_quic_queue_frame(qc, frame); > - > - return NGX_OK; > + return ngx_quic_stream_flush(qs); > } > > > @@ -341,7 +338,7 @@ ngx_quic_shutdown_stream_recv(ngx_connec > return NGX_ERROR; > } > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, > "quic stream id:0x%xL recv shutdown", qs->id); > > frame->level = ssl_encryption_application; > @@ -591,6 +588,7 @@ ngx_quic_create_stream(ngx_connection_t > { > ngx_log_t *log; > ngx_pool_t *pool; > + ngx_queue_t *q; > ngx_connection_t *sc; > ngx_quic_stream_t *qs; > ngx_pool_cleanup_t *cln; > @@ -601,25 +599,41 @@ ngx_quic_create_stream(ngx_connection_t > > qc = ngx_quic_get_connection(c); > > - pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, c->log); > - if (pool == NULL) { > - return NULL; > + if (!ngx_queue_empty(&qc->streams.free)) { > + q = ngx_queue_head(&qc->streams.free); > + qs = ngx_queue_data(q, ngx_quic_stream_t, queue); > + ngx_queue_remove(&qs->queue); > + > + } else { > + /* > + * the number of streams is limited by transport > + * parameters and application requirements > + */ > + > + qs = ngx_palloc(c->pool, sizeof(ngx_quic_stream_t)); > + if (qs == NULL) { > + return NULL; > + } > } > > - qs = ngx_pcalloc(pool, sizeof(ngx_quic_stream_t)); > - if (qs == NULL) { > - ngx_destroy_pool(pool); > - return NULL; > - } > + ngx_memzero(qs, sizeof(ngx_quic_stream_t)); > > qs->node.key = id; > qs->parent = c; > qs->id = id; > - qs->final_size = (uint64_t) -1; > + qs->send_final_size = (uint64_t) -1; > + qs->recv_final_size = (uint64_t) -1; > + > + pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, c->log); > + if (pool == NULL) { > + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); > + return NULL; > + } > > log = ngx_palloc(pool, sizeof(ngx_log_t)); > if (log == NULL) { > ngx_destroy_pool(pool); > + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); > return NULL; > } > > @@ -629,6 +643,7 @@ ngx_quic_create_stream(ngx_connection_t > sc = ngx_get_connection(c->fd, log); > if (sc == NULL) { > ngx_destroy_pool(pool); > + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); > return NULL; > } > > @@ -697,6 +712,7 @@ ngx_quic_create_stream(ngx_connection_t > if (cln == NULL) { > ngx_close_connection(sc); > ngx_destroy_pool(pool); > + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); > return NULL; > } > > @@ -737,7 +753,7 @@ ngx_quic_stream_recv(ngx_connection_t *c > return NGX_ERROR; > } > > - ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, > + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, > "quic stream id:0x%xL recv buf:%uz", qs->id, size); > > if (size == 0) { > @@ -763,7 +779,7 @@ ngx_quic_stream_recv(ngx_connection_t *c > rev->ready = 0; > > if (qs->recv_state == NGX_QUIC_STREAM_RECV_DATA_RECVD > - && qs->recv_offset == qs->final_size) > + && qs->recv_offset == qs->recv_final_size) > { > qs->recv_state = NGX_QUIC_STREAM_RECV_DATA_READ; > } > @@ -781,7 +797,7 @@ ngx_quic_stream_recv(ngx_connection_t *c > ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, > "quic stream id:0x%xL recv len:%z", qs->id, len); > > - if (ngx_quic_update_flow(c, qs->recv_offset + len) != NGX_OK) { > + if (ngx_quic_update_flow(qs, qs->recv_offset + len) != NGX_OK) { > return NGX_ERROR; > } > > @@ -822,9 +838,7 @@ ngx_quic_stream_send_chain(ngx_connectio > off_t flow; > size_t n; > ngx_event_t *wev; > - ngx_chain_t *out; > ngx_connection_t *pc; > - ngx_quic_frame_t *frame; > ngx_quic_stream_t *qs; > ngx_quic_connection_t *qc; > > @@ -842,7 +856,8 @@ ngx_quic_stream_send_chain(ngx_connectio > > qs->send_state = NGX_QUIC_STREAM_SEND_SEND; > > - flow = ngx_quic_max_stream_flow(c); > + flow = qs->acked + qc->conf->stream_buffer_size - c->sent; > + > if (flow == 0) { > wev->ready = 0; > return in; > @@ -852,37 +867,15 @@ ngx_quic_stream_send_chain(ngx_connectio > limit = flow; > } > > - in = ngx_quic_write_chain(pc, &qs->out, in, limit, 0, &n); > + in = ngx_quic_write_chain(pc, &qs->out, in, limit, > + c->sent - qs->send_offset, &n); > if (in == NGX_CHAIN_ERROR) { > return NGX_CHAIN_ERROR; > } > > - out = ngx_quic_read_chain(pc, &qs->out, n); > - if (out == NGX_CHAIN_ERROR) { > - return NGX_CHAIN_ERROR; > - } > - > - frame = ngx_quic_alloc_frame(pc); > - if (frame == NULL) { > - return NGX_CHAIN_ERROR; > - } > - > - frame->level = ssl_encryption_application; > - frame->type = NGX_QUIC_FT_STREAM; > - frame->data = out; > - frame->u.stream.off = 1; > - frame->u.stream.len = 1; > - frame->u.stream.fin = 0; > - > - frame->u.stream.stream_id = qs->id; > - frame->u.stream.offset = c->sent; > - frame->u.stream.length = n; > - > c->sent += n; > qc->streams.sent += n; > > - ngx_quic_queue_frame(qc, frame); > - > if (in) { > wev->ready = 0; > } > @@ -890,61 +883,96 @@ ngx_quic_stream_send_chain(ngx_connectio > ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > "quic send_chain sent:%uz", n); > > + if (ngx_quic_stream_flush(qs) != NGX_OK) { > + return NGX_CHAIN_ERROR; > + } > + > return in; > } > > > -static size_t > -ngx_quic_max_stream_flow(ngx_connection_t *c) > +static ngx_int_t > +ngx_quic_stream_flush(ngx_quic_stream_t *qs) > { > - size_t size; > - uint64_t sent, unacked; > - ngx_quic_stream_t *qs; > + off_t limit; > + size_t len; > + ngx_uint_t last; > + ngx_chain_t *out, *cl; > + ngx_quic_frame_t *frame; > + ngx_connection_t *pc; > ngx_quic_connection_t *qc; > > - qs = c->quic; > - qc = ngx_quic_get_connection(qs->parent); > + if (qs->send_state != NGX_QUIC_STREAM_SEND_SEND) { > + return NGX_OK; > + } > > - size = qc->conf->stream_buffer_size; > - sent = c->sent; > - unacked = sent - qs->acked; > + pc = qs->parent; > + qc = ngx_quic_get_connection(pc); > > if (qc->streams.send_max_data == 0) { > qc->streams.send_max_data = qc->ctp.initial_max_data; > } > > - if (unacked >= size) { > - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, > - "quic send flow hit buffer size"); > - return 0; > + limit = ngx_min(qc->streams.send_max_data - qc->streams.send_offset, > + qs->send_max_data - qs->send_offset); > + > + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, > + "quic stream id:0x%xL flush limit:%O", qs->id, limit); > + > + out = ngx_quic_read_chain(pc, &qs->out, limit); > + if (out == NGX_CHAIN_ERROR) { > + return NGX_ERROR; > } > > - size -= unacked; > + len = 0; > + last = 0; > + > + for (cl = out; cl; cl = cl->next) { > + len += cl->buf->last - cl->buf->pos; > + } > > - if (qc->streams.sent >= qc->streams.send_max_data) { > - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, > - "quic send flow hit MAX_DATA"); > - return 0; > + if (qs->send_final_size != (uint64_t) -1 > + && qs->send_final_size == qs->send_offset + len) > + { > + qs->send_state = NGX_QUIC_STREAM_SEND_DATA_SENT; > + last = 1; > + } > + > + if (len == 0 && !last) { > + return NGX_OK; > } > > - if (qc->streams.sent + size > qc->streams.send_max_data) { > - size = qc->streams.send_max_data - qc->streams.sent; > + frame = ngx_quic_alloc_frame(pc); > + if (frame == NULL) { > + return NGX_ERROR; > } > > - if (sent >= qs->send_max_data) { > - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, > - "quic send flow hit MAX_STREAM_DATA"); > - return 0; > + frame->level = ssl_encryption_application; > + frame->type = NGX_QUIC_FT_STREAM; > + frame->data = out; > + > + frame->u.stream.off = 1; > + frame->u.stream.len = 1; > + frame->u.stream.fin = last; > + > + frame->u.stream.stream_id = qs->id; > + frame->u.stream.offset = qs->send_offset; > + frame->u.stream.length = len; > + > + ngx_quic_queue_frame(qc, frame); > + > + qs->send_offset += len; > + qc->streams.send_offset += len; > + > + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, pc->log, 0, > + "quic stream id:0x%xL flush len:%uz last:%ui", > + qs->id, len, last); > + > + if (qs->connection == NULL) { > + return ngx_quic_close_stream(qs); > } > > - if (sent + size > qs->send_max_data) { > - size = qs->send_max_data - sent; > - } > - > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > - "quic send flow:%uz", size); > - > - return size; > + return NGX_OK; > } > > > @@ -953,40 +981,67 @@ ngx_quic_stream_cleanup_handler(void *da > { > ngx_connection_t *c = data; > > + ngx_quic_stream_t *qs; > + > + qs = c->quic; > + > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, qs->parent->log, 0, > + "quic stream id:0x%xL cleanup", qs->id); > + > + if (ngx_quic_shutdown_stream(c, NGX_RDWR_SHUTDOWN) != NGX_OK) { > + ngx_quic_close_connection(c, NGX_ERROR); > + return; > + } > + > + qs->connection = NULL; > + > + if (ngx_quic_close_stream(qs) != NGX_OK) { > + ngx_quic_close_connection(c, NGX_ERROR); > + return; > + } > +} > + > + > +static ngx_int_t > +ngx_quic_close_stream(ngx_quic_stream_t *qs) > +{ > ngx_connection_t *pc; > ngx_quic_frame_t *frame; > - ngx_quic_stream_t *qs; > ngx_quic_connection_t *qc; > > - qs = c->quic; > pc = qs->parent; > qc = ngx_quic_get_connection(pc); > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > - "quic stream id:0x%xL cleanup", qs->id); > + if (!qc->closing) { > + /* make sure everything is sent and final size is received */ > + > + if (qs->recv_state == NGX_QUIC_STREAM_RECV_RECV > + || qs->send_state == NGX_QUIC_STREAM_SEND_READY > + || qs->send_state == NGX_QUIC_STREAM_SEND_SEND) > + { > + return NGX_OK; > + } > + } > + > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, > + "quic stream id:0x%xL close", qs->id); > + > + ngx_quic_free_chain(pc, qs->in); > + ngx_quic_free_chain(pc, qs->out); > > ngx_rbtree_delete(&qc->streams.tree, &qs->node); > - ngx_quic_free_chain(pc, qs->in); > - ngx_quic_free_chain(pc, qs->out); > + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); > > if (qc->closing) { > /* schedule handler call to continue ngx_quic_close_connection() */ > ngx_post_event(pc->read, &ngx_posted_events); > - return; > + return NGX_OK; > } > > - if (qc->error) { > - goto done; > - } > - > - (void) ngx_quic_shutdown_stream(c, NGX_RDWR_SHUTDOWN); > - > - (void) ngx_quic_update_flow(c, qs->recv_last); > - > if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0) { > frame = ngx_quic_alloc_frame(pc); > if (frame == NULL) { > - goto done; > + return NGX_ERROR; > } > > frame->level = ssl_encryption_application; > @@ -1004,13 +1059,11 @@ ngx_quic_stream_cleanup_handler(void *da > ngx_quic_queue_frame(qc, frame); > } > > -done: > - > - (void) ngx_quic_output(pc); > - > if (qc->shutdown) { > ngx_post_event(pc->read, &ngx_posted_events); > } > + > + return NGX_OK; > } > > > @@ -1020,7 +1073,6 @@ ngx_quic_handle_stream_frame(ngx_connect > { > size_t size; > uint64_t last; > - ngx_connection_t *sc; > ngx_quic_stream_t *qs; > ngx_quic_connection_t *qc; > ngx_quic_stream_frame_t *f; > @@ -1048,19 +1100,17 @@ ngx_quic_handle_stream_frame(ngx_connect > return NGX_OK; > } > > - sc = qs->connection; > - > if (qs->recv_state != NGX_QUIC_STREAM_RECV_RECV > && qs->recv_state != NGX_QUIC_STREAM_RECV_SIZE_KNOWN) > { > return NGX_OK; > } > > - if (ngx_quic_control_flow(sc, last) != NGX_OK) { > + if (ngx_quic_control_flow(qs, last) != NGX_OK) { > return NGX_ERROR; > } > > - if (qs->final_size != (uint64_t) -1 && last > qs->final_size) { > + if (qs->recv_final_size != (uint64_t) -1 && last > qs->recv_final_size) { > qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; > return NGX_ERROR; > } > @@ -1075,7 +1125,8 @@ ngx_quic_handle_stream_frame(ngx_connect > } > > if (f->fin) { > - if (qs->final_size != (uint64_t) -1 && qs->final_size != last) { > + if (qs->recv_final_size != (uint64_t) -1 && qs->recv_final_size != last) > + { > qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; > return NGX_ERROR; > } > @@ -1085,7 +1136,7 @@ ngx_quic_handle_stream_frame(ngx_connect > return NGX_ERROR; > } > > - qs->final_size = last; > + qs->recv_final_size = last; > qs->recv_state = NGX_QUIC_STREAM_RECV_SIZE_KNOWN; > } > > @@ -1099,13 +1150,17 @@ ngx_quic_handle_stream_frame(ngx_connect > qs->recv_size += size; > > if (qs->recv_state == NGX_QUIC_STREAM_RECV_SIZE_KNOWN > - && qs->recv_size == qs->final_size) > + && qs->recv_size == qs->recv_final_size) > { > qs->recv_state = NGX_QUIC_STREAM_RECV_DATA_RECVD; > } > > + if (qs->connection == NULL) { > + return ngx_quic_close_stream(qs); > + } > + > if (f->offset == qs->recv_offset) { > - ngx_quic_set_event(sc->read); > + ngx_quic_set_event(qs->connection->read); > } > > return NGX_OK; > @@ -1128,20 +1183,26 @@ ngx_quic_handle_max_data_frame(ngx_conne > return NGX_OK; > } > > - if (tree->root != tree->sentinel > - && qc->streams.sent >= qc->streams.send_max_data) > + if (tree->root == tree->sentinel > + || qc->streams.send_offset < qc->streams.send_max_data) > { > - > - for (node = ngx_rbtree_min(tree->root, tree->sentinel); > - node; > - node = ngx_rbtree_next(tree, node)) > - { > - qs = (ngx_quic_stream_t *) node; > - ngx_quic_set_event(qs->connection->write); > - } > + /* not blocked on MAX_DATA */ > + qc->streams.send_max_data = f->max_data; > + return NGX_OK; > } > > qc->streams.send_max_data = f->max_data; > + node = ngx_rbtree_min(tree->root, tree->sentinel); > + > + while (node && qc->streams.send_offset < qc->streams.send_max_data) { > + > + qs = (ngx_quic_stream_t *) node; > + node = ngx_rbtree_next(tree, node); > + > + if (ngx_quic_stream_flush(qs) != NGX_OK) { > + return NGX_ERROR; > + } > + } > > return NGX_OK; > } > @@ -1189,7 +1250,7 @@ ngx_quic_handle_stream_data_blocked_fram > return NGX_OK; > } > > - return ngx_quic_update_max_stream_data(qs->connection); > + return ngx_quic_update_max_stream_data(qs); > } > > > @@ -1197,7 +1258,6 @@ ngx_int_t > ngx_quic_handle_max_stream_data_frame(ngx_connection_t *c, > ngx_quic_header_t *pkt, ngx_quic_max_stream_data_frame_t *f) > { > - uint64_t sent; > ngx_quic_stream_t *qs; > ngx_quic_connection_t *qc; > > @@ -1224,15 +1284,15 @@ ngx_quic_handle_max_stream_data_frame(ng > return NGX_OK; > } > > - sent = qs->connection->sent; > - > - if (sent >= qs->send_max_data) { > - ngx_quic_set_event(qs->connection->write); > + if (qs->send_offset < qs->send_max_data) { > + /* not blocked on MAX_STREAM_DATA */ > + qs->send_max_data = f->limit; > + return NGX_OK; > } > > qs->send_max_data = f->limit; > > - return NGX_OK; > + return ngx_quic_stream_flush(qs); > } > > > @@ -1240,7 +1300,6 @@ ngx_int_t > ngx_quic_handle_reset_stream_frame(ngx_connection_t *c, > ngx_quic_header_t *pkt, ngx_quic_reset_stream_frame_t *f) > { > - ngx_connection_t *sc; > ngx_quic_stream_t *qs; > ngx_quic_connection_t *qc; > > @@ -1271,13 +1330,13 @@ ngx_quic_handle_reset_stream_frame(ngx_c > > qs->recv_state = NGX_QUIC_STREAM_RECV_RESET_RECVD; > > - sc = qs->connection; > - > - if (ngx_quic_control_flow(sc, f->final_size) != NGX_OK) { > + if (ngx_quic_control_flow(qs, f->final_size) != NGX_OK) { > return NGX_ERROR; > } > > - if (qs->final_size != (uint64_t) -1 && qs->final_size != f->final_size) { > + if (qs->recv_final_size != (uint64_t) -1 > + && qs->recv_final_size != f->final_size) > + { > qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; > return NGX_ERROR; > } > @@ -1287,12 +1346,16 @@ ngx_quic_handle_reset_stream_frame(ngx_c > return NGX_ERROR; > } > > - qs->final_size = f->final_size; > + qs->recv_final_size = f->final_size; > > - if (ngx_quic_update_flow(sc, qs->final_size) != NGX_OK) { > + if (ngx_quic_update_flow(qs, qs->recv_final_size) != NGX_OK) { > return NGX_ERROR; > } > > + if (qs->connection == NULL) { > + return ngx_quic_close_stream(qs); > + } > + > ngx_quic_set_event(qs->connection->read); > > return NGX_OK; > @@ -1325,10 +1388,14 @@ ngx_quic_handle_stop_sending_frame(ngx_c > return NGX_OK; > } > > - if (ngx_quic_reset_stream(qs->connection, f->error_code) != NGX_OK) { > + if (ngx_quic_do_reset_stream(qs, f->error_code) != NGX_OK) { > return NGX_ERROR; > } > > + if (qs->connection == NULL) { > + return ngx_quic_close_stream(qs); > + } > + > ngx_quic_set_event(qs->connection->write); > > return NGX_OK; > @@ -1378,30 +1445,37 @@ ngx_quic_handle_stream_ack(ngx_connectio > return; > } > > + if (qs->connection == NULL) { > + qs->acked += f->u.stream.length; > + return; > + } > + > sent = qs->connection->sent; > unacked = sent - qs->acked; > + qs->acked += f->u.stream.length; > > - if (unacked >= qc->conf->stream_buffer_size) { > - ngx_quic_set_event(qs->connection->write); > + ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, > + "quic stream id:0x%xL ack len:%uL acked:%uL unacked:%uL", > + qs->id, f->u.stream.length, qs->acked, sent - qs->acked); > + > + if (unacked != qc->conf->stream_buffer_size) { > + /* not blocked on buffer size */ > + return; > } > > - qs->acked += f->u.stream.length; > - > - ngx_log_debug3(NGX_LOG_DEBUG_EVENT, qs->connection->log, 0, > - "quic stream ack len:%uL acked:%uL unacked:%uL", > - f->u.stream.length, qs->acked, sent - qs->acked); > + ngx_quic_set_event(qs->connection->write); > } > > > static ngx_int_t > -ngx_quic_control_flow(ngx_connection_t *c, uint64_t last) > +ngx_quic_control_flow(ngx_quic_stream_t *qs, uint64_t last) > { > uint64_t len; > - ngx_quic_stream_t *qs; > + ngx_connection_t *pc; > ngx_quic_connection_t *qc; > > - qs = c->quic; > - qc = ngx_quic_get_connection(qs->parent); > + pc = qs->parent; > + qc = ngx_quic_get_connection(pc); > > if (last <= qs->recv_last) { > return NGX_OK; > @@ -1409,9 +1483,9 @@ ngx_quic_control_flow(ngx_connection_t * > > len = last - qs->recv_last; > > - ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, > - "quic flow control msd:%uL/%uL md:%uL/%uL", > - last, qs->recv_max_data, qc->streams.recv_last + len, > + ngx_log_debug5(NGX_LOG_DEBUG_EVENT, pc->log, 0, > + "quic stream id:0x%xL flow control msd:%uL/%uL md:%uL/%uL", > + qs->id, last, qs->recv_max_data, qc->streams.recv_last + len, > qc->streams.recv_max_data); > > qs->recv_last += len; > @@ -1435,14 +1509,12 @@ ngx_quic_control_flow(ngx_connection_t * > > > static ngx_int_t > -ngx_quic_update_flow(ngx_connection_t *c, uint64_t last) > +ngx_quic_update_flow(ngx_quic_stream_t *qs, uint64_t last) > { > uint64_t len; > ngx_connection_t *pc; > - ngx_quic_stream_t *qs; > ngx_quic_connection_t *qc; > > - qs = c->quic; > pc = qs->parent; > qc = ngx_quic_get_connection(pc); > > @@ -1452,13 +1524,13 @@ ngx_quic_update_flow(ngx_connection_t *c > > len = last - qs->recv_offset; > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > - "quic flow update %uL", last); > + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, > + "quic stream id:0x%xL flow update %uL", qs->id, last); > > qs->recv_offset += len; > > if (qs->recv_max_data <= qs->recv_offset + qs->recv_window / 2) { > - if (ngx_quic_update_max_stream_data(c) != NGX_OK) { > + if (ngx_quic_update_max_stream_data(qs) != NGX_OK) { > return NGX_ERROR; > } > } > @@ -1478,15 +1550,13 @@ ngx_quic_update_flow(ngx_connection_t *c > > > static ngx_int_t > -ngx_quic_update_max_stream_data(ngx_connection_t *c) > +ngx_quic_update_max_stream_data(ngx_quic_stream_t *qs) > { > uint64_t recv_max_data; > ngx_connection_t *pc; > ngx_quic_frame_t *frame; > - ngx_quic_stream_t *qs; > ngx_quic_connection_t *qc; > > - qs = c->quic; > pc = qs->parent; > qc = ngx_quic_get_connection(pc); > > @@ -1502,8 +1572,9 @@ ngx_quic_update_max_stream_data(ngx_conn > > qs->recv_max_data = recv_max_data; > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > - "quic flow update msd:%uL", qs->recv_max_data); > + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, > + "quic stream id:0x%xL flow update msd:%uL", > + qs->id, qs->recv_max_data); > > frame = ngx_quic_alloc_frame(pc); > if (frame == NULL) { > diff --git a/src/http/v3/ngx_http_v3_uni.c b/src/http/v3/ngx_http_v3_uni.c > --- a/src/http/v3/ngx_http_v3_uni.c > +++ b/src/http/v3/ngx_http_v3_uni.c > @@ -295,8 +295,6 @@ ngx_http_v3_uni_dummy_write_handler(ngx_ > } > > > -/* XXX async & buffered stream writes */ > - > ngx_connection_t * > ngx_http_v3_create_push_stream(ngx_connection_t *c, uint64_t push_id) > { > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org From arut at nginx.com Tue Feb 8 12:18:13 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 8 Feb 2022 15:18:13 +0300 Subject: [PATCH] QUIC: stream lingering In-Reply-To: References: <20220207141617.av7ft5sycecoce6r@Romans-MacBook-Pro.local> Message-ID: <20220208121813.kamvpy53rxedadn7@Romans-MacBook-Pro.local> On Tue, Feb 08, 2022 at 02:45:19PM +0300, Vladimir Homutov wrote: > On Mon, Feb 07, 2022 at 05:16:17PM +0300, Roman Arutyunyan wrote: > > Hi, > > > > On Fri, Feb 04, 2022 at 04:56:23PM +0300, Vladimir Homutov wrote: > > > On Tue, Feb 01, 2022 at 04:39:59PM +0300, Roman Arutyunyan wrote: > > > > # HG changeset patch > > > > # User Roman Arutyunyan > > > > # Date 1643722727 -10800 > > > > # Tue Feb 01 16:38:47 2022 +0300 > > > > # Branch quic > > > > # Node ID db31ae16c1f2050be9c9f6b1f117ab6725b97dd4 > > > > # Parent 308ac307b3e6952ef0c5ccf10cc82904c59fa4c3 > > > > QUIC: stream lingering. > > > > > > > > Now ngx_quic_stream_t is decoupled from ngx_connection_t in a way that it > > > > can persist after connection is closed by application. During this period, > > > > server is expecting stream final size from client for correct flow control. > > > > Also, buffered output is sent to client as more flow control credit is granted. > > > > > > > [..] > > > > > > > +static ngx_int_t > > > > +ngx_quic_stream_flush(ngx_quic_stream_t *qs) > > > > +{ > > > > + size_t limit, len; > > > > + ngx_uint_t last; > > > > + ngx_chain_t *out, *cl; > > > > + ngx_quic_frame_t *frame; > > > > + ngx_connection_t *pc; > > > > + ngx_quic_connection_t *qc; > > > > + > > > > + if (qs->send_state != NGX_QUIC_STREAM_SEND_SEND) { > > > > + return NGX_OK; > > > > + } > > > > + > > > > + pc = qs->parent; > > > > + qc = ngx_quic_get_connection(pc); > > > > + > > > > + limit = ngx_quic_max_stream_flow(qs); > > > > + last = 0; > > > > + > > > > + out = ngx_quic_read_chain(pc, &qs->out, limit); > > > > + if (out == NGX_CHAIN_ERROR) { > > > > + return NGX_ERROR; > > > > + } > > > > + > > > > + len = 0; > > > > + last = 0; > > > > > > this assignment looks duplicate. > > > > Thanks, fixed. > > > > > [..] > > > > > > > +static ngx_int_t > > > > +ngx_quic_close_stream(ngx_quic_stream_t *qs) > > > > +{ > > > > ngx_connection_t *pc; > > > > ngx_quic_frame_t *frame; > > > > - ngx_quic_stream_t *qs; > > > > ngx_quic_connection_t *qc; > > > > > > > > - qs = c->quic; > > > > pc = qs->parent; > > > > qc = ngx_quic_get_connection(pc); > > > > > > > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > > > - "quic stream id:0x%xL cleanup", qs->id); > > > > + if (!qc->closing) { > > > > + if (qs->recv_state == NGX_QUIC_STREAM_RECV_RECV > > > > + || qs->send_state == NGX_QUIC_STREAM_SEND_READY > > > > + || qs->send_state == NGX_QUIC_STREAM_SEND_SEND) > > > > + { > > > > > > so basically this are the states where we need to wait for FIN? > > > and thus avoid closing till we get it. > > > I would add a comment here. > > > > On the receiving end we wait either for fin or for reset to have final size. > > On the sending end we wait for everything that's buffered to be sent. > > Added a comment about that. > > > > > [..] > > > > + if (qs->connection == NULL) { > > > > + return ngx_quic_close_stream(qs); > > > > + } > > > > + > > > > ngx_quic_set_event(qs->connection->write); > > > > > > this pattern - check connection, close if NULL and set event seem to > > > repeat. Maybe it's worth to try to put this check/action into > > > ngx_quic_set_event somehow ? we could instead have > > > set_read_event/set_write_event maybe. > > > > I thought about this too, but it's not always that simple. And even if it was, > > the new function/macro would have unclear semantics. Let's just remember this > > as a possible future optimiation. > > > > > > +static ngx_int_t > > > > +ngx_quic_stream_flush(ngx_quic_stream_t *qs) > > > > + > > > [..] > > > > + if (len == 0 && !last) { > > > > + return NGX_OK; > > > > + } > > > > + > > > > + frame = ngx_quic_alloc_frame(pc); > > > > + if (frame == NULL) { > > > > + return NGX_ERROR; > > > > + } > > > > + > > > > + frame = ngx_quic_alloc_frame(pc); > > > > + if (frame == NULL) { > > > > + return NGX_ERROR; > > > > + } > > > > > > one more dup here. > > > > Yes, thanks. > > > > > Overal, it looks good, but the testing revealed another issue: with big > > > buffer sizes we run into issue of too long chains in ngx_quic_write_chain(). > > > As discussed, this certainly needs optimization - probably adding some > > > pointer to the end to facilitate appending, or something else. > > > > It's true ngx_quic_write_chain() needs to be optimized. When the buffered > > chain is big, it takes too much time to find the write point. I'll address > > this is a separate patch. Meanwhile, attached is an updated version of the > > current one. > > > > In the new version of the patch I also eliminated the > > ngx_quic_max_stream_flow() function and embedded its content in > > ngx_quic_stream_flush(). > > yes, this looks correct - flow limit should not consider buffer as it > was before. > > I think we should check for limit == 0 before doing read_chain and this > is good place for debug logging about 'hit MAX_DATA/MAX_STREAM_DATA' that > was removed by update. I don't know how much do we really need those messages. What really needs to be added here is sending DATA_BLOCKED/STREAM_DATA_BLOCKED, for which I already have a separate patch. That patch also adds some logging. Once we finish with optimization, I'll send it out. Apart from logging, checking limit == 0 does not seem to make sense, because even if the limit is zero, we should still proceed, since we are still able to send fin. > > -- > > Roman Arutyunyan > > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1644054894 -10800 > > # Sat Feb 05 12:54:54 2022 +0300 > > # Branch quic > > # Node ID 6e1674c257709341a7508ae4bdab6f7f7d2e9284 > > # Parent 6c1dfd072859022f830aeea49db7cbe3c9f7fb55 > > QUIC: stream lingering. > > > > Now ngx_quic_stream_t is decoupled from ngx_connection_t in a way that it > > can persist after connection is closed by application. During this period, > > server is expecting stream final size from client for correct flow control. > > Also, buffered output is sent to client as more flow control credit is granted. > > > > diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c > > --- a/src/event/quic/ngx_event_quic.c > > +++ b/src/event/quic/ngx_event_quic.c > > @@ -303,6 +303,7 @@ ngx_quic_new_connection(ngx_connection_t > > ctp->active_connection_id_limit = 2; > > > > ngx_queue_init(&qc->streams.uninitialized); > > + ngx_queue_init(&qc->streams.free); > > > > qc->streams.recv_max_data = qc->tp.initial_max_data; > > qc->streams.recv_window = qc->streams.recv_max_data; > > diff --git a/src/event/quic/ngx_event_quic.h b/src/event/quic/ngx_event_quic.h > > --- a/src/event/quic/ngx_event_quic.h > > +++ b/src/event/quic/ngx_event_quic.h > > @@ -78,12 +78,14 @@ struct ngx_quic_stream_s { > > uint64_t id; > > uint64_t acked; > > uint64_t send_max_data; > > + uint64_t send_offset; > > + uint64_t send_final_size; > > uint64_t recv_max_data; > > uint64_t recv_offset; > > uint64_t recv_window; > > uint64_t recv_last; > > uint64_t recv_size; > > - uint64_t final_size; > > + uint64_t recv_final_size; > > ngx_chain_t *in; > > ngx_chain_t *out; > > ngx_uint_t cancelable; /* unsigned cancelable:1; */ > > diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h > > --- a/src/event/quic/ngx_event_quic_connection.h > > +++ b/src/event/quic/ngx_event_quic_connection.h > > @@ -114,13 +114,16 @@ struct ngx_quic_socket_s { > > typedef struct { > > ngx_rbtree_t tree; > > ngx_rbtree_node_t sentinel; > > + > > ngx_queue_t uninitialized; > > + ngx_queue_t free; > > > > uint64_t sent; > > uint64_t recv_offset; > > uint64_t recv_window; > > uint64_t recv_last; > > uint64_t recv_max_data; > > + uint64_t send_offset; > > uint64_t send_max_data; > > > > uint64_t server_max_streams_uni; > > diff --git a/src/event/quic/ngx_event_quic_frames.c b/src/event/quic/ngx_event_quic_frames.c > > --- a/src/event/quic/ngx_event_quic_frames.c > > +++ b/src/event/quic/ngx_event_quic_frames.c > > @@ -391,6 +391,10 @@ ngx_quic_split_frame(ngx_connection_t *c > > return NGX_ERROR; > > } > > > > + if (f->type == NGX_QUIC_FT_STREAM) { > > + f->u.stream.fin = 0; > > + } > > + > > ngx_queue_insert_after(&f->queue, &nf->queue); > > > > return NGX_OK; > > diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c > > --- a/src/event/quic/ngx_event_quic_streams.c > > +++ b/src/event/quic/ngx_event_quic_streams.c > > @@ -13,6 +13,8 @@ > > #define NGX_QUIC_STREAM_GONE (void *) -1 > > > > > > +static ngx_int_t ngx_quic_do_reset_stream(ngx_quic_stream_t *qs, > > + ngx_uint_t err); > > static ngx_int_t ngx_quic_shutdown_stream_send(ngx_connection_t *c); > > static ngx_int_t ngx_quic_shutdown_stream_recv(ngx_connection_t *c); > > static ngx_quic_stream_t *ngx_quic_get_stream(ngx_connection_t *c, uint64_t id); > > @@ -28,11 +30,12 @@ static ssize_t ngx_quic_stream_send(ngx_ > > size_t size); > > static ngx_chain_t *ngx_quic_stream_send_chain(ngx_connection_t *c, > > ngx_chain_t *in, off_t limit); > > -static size_t ngx_quic_max_stream_flow(ngx_connection_t *c); > > +static ngx_int_t ngx_quic_stream_flush(ngx_quic_stream_t *qs); > > static void ngx_quic_stream_cleanup_handler(void *data); > > -static ngx_int_t ngx_quic_control_flow(ngx_connection_t *c, uint64_t last); > > -static ngx_int_t ngx_quic_update_flow(ngx_connection_t *c, uint64_t last); > > -static ngx_int_t ngx_quic_update_max_stream_data(ngx_connection_t *c); > > +static ngx_int_t ngx_quic_close_stream(ngx_quic_stream_t *qs); > > +static ngx_int_t ngx_quic_control_flow(ngx_quic_stream_t *qs, uint64_t last); > > +static ngx_int_t ngx_quic_update_flow(ngx_quic_stream_t *qs, uint64_t last); > > +static ngx_int_t ngx_quic_update_max_stream_data(ngx_quic_stream_t *qs); > > static ngx_int_t ngx_quic_update_max_data(ngx_connection_t *c); > > static void ngx_quic_set_event(ngx_event_t *ev); > > > > @@ -186,15 +189,20 @@ ngx_quic_close_streams(ngx_connection_t > > ns = 0; > > #endif > > > > - for (node = ngx_rbtree_min(tree->root, tree->sentinel); > > - node; > > - node = ngx_rbtree_next(tree, node)) > > - { > > + node = ngx_rbtree_min(tree->root, tree->sentinel); > > + > > + while (node) { > > qs = (ngx_quic_stream_t *) node; > > + node = ngx_rbtree_next(tree, node); > > > > qs->recv_state = NGX_QUIC_STREAM_RECV_RESET_RECVD; > > qs->send_state = NGX_QUIC_STREAM_SEND_RESET_SENT; > > > > + if (qs->connection == NULL) { > > + ngx_quic_close_stream(qs); > > + continue; > > + } > > + > > ngx_quic_set_event(qs->connection->read); > > ngx_quic_set_event(qs->connection->write); > > > > @@ -213,13 +221,17 @@ ngx_quic_close_streams(ngx_connection_t > > ngx_int_t > > ngx_quic_reset_stream(ngx_connection_t *c, ngx_uint_t err) > > { > > + return ngx_quic_do_reset_stream(c->quic, err); > > +} > > + > > + > > +static ngx_int_t > > +ngx_quic_do_reset_stream(ngx_quic_stream_t *qs, ngx_uint_t err) > > +{ > > ngx_connection_t *pc; > > ngx_quic_frame_t *frame; > > - ngx_quic_stream_t *qs; > > ngx_quic_connection_t *qc; > > > > - qs = c->quic; > > - > > if (qs->send_state == NGX_QUIC_STREAM_SEND_DATA_RECVD > > || qs->send_state == NGX_QUIC_STREAM_SEND_RESET_SENT > > || qs->send_state == NGX_QUIC_STREAM_SEND_RESET_RECVD) > > @@ -228,10 +240,14 @@ ngx_quic_reset_stream(ngx_connection_t * > > } > > > > qs->send_state = NGX_QUIC_STREAM_SEND_RESET_SENT; > > + qs->send_final_size = qs->send_offset; > > > > pc = qs->parent; > > qc = ngx_quic_get_connection(pc); > > > > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, > > + "quic stream id:0x%xL reset", qs->id); > > + > > frame = ngx_quic_alloc_frame(pc); > > if (frame == NULL) { > > return NGX_ERROR; > > @@ -241,10 +257,13 @@ ngx_quic_reset_stream(ngx_connection_t * > > frame->type = NGX_QUIC_FT_RESET_STREAM; > > frame->u.reset_stream.id = qs->id; > > frame->u.reset_stream.error_code = err; > > - frame->u.reset_stream.final_size = c->sent; > > + frame->u.reset_stream.final_size = qs->send_offset; > > > > ngx_quic_queue_frame(qc, frame); > > > > + ngx_quic_free_chain(pc, qs->out); > > + qs->out = NULL; > > + > > return NGX_OK; > > } > > > > @@ -271,10 +290,7 @@ ngx_quic_shutdown_stream(ngx_connection_ > > static ngx_int_t > > ngx_quic_shutdown_stream_send(ngx_connection_t *c) > > { > > - ngx_connection_t *pc; > > - ngx_quic_frame_t *frame; > > - ngx_quic_stream_t *qs; > > - ngx_quic_connection_t *qc; > > + ngx_quic_stream_t *qs; > > > > qs = c->quic; > > > > @@ -284,32 +300,13 @@ ngx_quic_shutdown_stream_send(ngx_connec > > return NGX_OK; > > } > > > > - qs->send_state = NGX_QUIC_STREAM_SEND_DATA_SENT; > > - > > - pc = qs->parent; > > - qc = ngx_quic_get_connection(pc); > > + qs->send_state = NGX_QUIC_STREAM_SEND_SEND; > > + qs->send_final_size = c->sent; > > > > - frame = ngx_quic_alloc_frame(pc); > > - if (frame == NULL) { > > - return NGX_ERROR; > > - } > > - > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, qs->parent->log, 0, > > "quic stream id:0x%xL send shutdown", qs->id); > > > > - frame->level = ssl_encryption_application; > > - frame->type = NGX_QUIC_FT_STREAM; > > - frame->u.stream.off = 1; > > - frame->u.stream.len = 1; > > - frame->u.stream.fin = 1; > > - > > - frame->u.stream.stream_id = qs->id; > > - frame->u.stream.offset = c->sent; > > - frame->u.stream.length = 0; > > - > > - ngx_quic_queue_frame(qc, frame); > > - > > - return NGX_OK; > > + return ngx_quic_stream_flush(qs); > > } > > > > > > @@ -341,7 +338,7 @@ ngx_quic_shutdown_stream_recv(ngx_connec > > return NGX_ERROR; > > } > > > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, > > "quic stream id:0x%xL recv shutdown", qs->id); > > > > frame->level = ssl_encryption_application; > > @@ -591,6 +588,7 @@ ngx_quic_create_stream(ngx_connection_t > > { > > ngx_log_t *log; > > ngx_pool_t *pool; > > + ngx_queue_t *q; > > ngx_connection_t *sc; > > ngx_quic_stream_t *qs; > > ngx_pool_cleanup_t *cln; > > @@ -601,25 +599,41 @@ ngx_quic_create_stream(ngx_connection_t > > > > qc = ngx_quic_get_connection(c); > > > > - pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, c->log); > > - if (pool == NULL) { > > - return NULL; > > + if (!ngx_queue_empty(&qc->streams.free)) { > > + q = ngx_queue_head(&qc->streams.free); > > + qs = ngx_queue_data(q, ngx_quic_stream_t, queue); > > + ngx_queue_remove(&qs->queue); > > + > > + } else { > > + /* > > + * the number of streams is limited by transport > > + * parameters and application requirements > > + */ > > + > > + qs = ngx_palloc(c->pool, sizeof(ngx_quic_stream_t)); > > + if (qs == NULL) { > > + return NULL; > > + } > > } > > > > - qs = ngx_pcalloc(pool, sizeof(ngx_quic_stream_t)); > > - if (qs == NULL) { > > - ngx_destroy_pool(pool); > > - return NULL; > > - } > > + ngx_memzero(qs, sizeof(ngx_quic_stream_t)); > > > > qs->node.key = id; > > qs->parent = c; > > qs->id = id; > > - qs->final_size = (uint64_t) -1; > > + qs->send_final_size = (uint64_t) -1; > > + qs->recv_final_size = (uint64_t) -1; > > + > > + pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, c->log); > > + if (pool == NULL) { > > + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); > > + return NULL; > > + } > > > > log = ngx_palloc(pool, sizeof(ngx_log_t)); > > if (log == NULL) { > > ngx_destroy_pool(pool); > > + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); > > return NULL; > > } > > > > @@ -629,6 +643,7 @@ ngx_quic_create_stream(ngx_connection_t > > sc = ngx_get_connection(c->fd, log); > > if (sc == NULL) { > > ngx_destroy_pool(pool); > > + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); > > return NULL; > > } > > > > @@ -697,6 +712,7 @@ ngx_quic_create_stream(ngx_connection_t > > if (cln == NULL) { > > ngx_close_connection(sc); > > ngx_destroy_pool(pool); > > + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); > > return NULL; > > } > > > > @@ -737,7 +753,7 @@ ngx_quic_stream_recv(ngx_connection_t *c > > return NGX_ERROR; > > } > > > > - ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, > > + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, > > "quic stream id:0x%xL recv buf:%uz", qs->id, size); > > > > if (size == 0) { > > @@ -763,7 +779,7 @@ ngx_quic_stream_recv(ngx_connection_t *c > > rev->ready = 0; > > > > if (qs->recv_state == NGX_QUIC_STREAM_RECV_DATA_RECVD > > - && qs->recv_offset == qs->final_size) > > + && qs->recv_offset == qs->recv_final_size) > > { > > qs->recv_state = NGX_QUIC_STREAM_RECV_DATA_READ; > > } > > @@ -781,7 +797,7 @@ ngx_quic_stream_recv(ngx_connection_t *c > > ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, > > "quic stream id:0x%xL recv len:%z", qs->id, len); > > > > - if (ngx_quic_update_flow(c, qs->recv_offset + len) != NGX_OK) { > > + if (ngx_quic_update_flow(qs, qs->recv_offset + len) != NGX_OK) { > > return NGX_ERROR; > > } > > > > @@ -822,9 +838,7 @@ ngx_quic_stream_send_chain(ngx_connectio > > off_t flow; > > size_t n; > > ngx_event_t *wev; > > - ngx_chain_t *out; > > ngx_connection_t *pc; > > - ngx_quic_frame_t *frame; > > ngx_quic_stream_t *qs; > > ngx_quic_connection_t *qc; > > > > @@ -842,7 +856,8 @@ ngx_quic_stream_send_chain(ngx_connectio > > > > qs->send_state = NGX_QUIC_STREAM_SEND_SEND; > > > > - flow = ngx_quic_max_stream_flow(c); > > + flow = qs->acked + qc->conf->stream_buffer_size - c->sent; > > + > > if (flow == 0) { > > wev->ready = 0; > > return in; > > @@ -852,37 +867,15 @@ ngx_quic_stream_send_chain(ngx_connectio > > limit = flow; > > } > > > > - in = ngx_quic_write_chain(pc, &qs->out, in, limit, 0, &n); > > + in = ngx_quic_write_chain(pc, &qs->out, in, limit, > > + c->sent - qs->send_offset, &n); > > if (in == NGX_CHAIN_ERROR) { > > return NGX_CHAIN_ERROR; > > } > > > > - out = ngx_quic_read_chain(pc, &qs->out, n); > > - if (out == NGX_CHAIN_ERROR) { > > - return NGX_CHAIN_ERROR; > > - } > > - > > - frame = ngx_quic_alloc_frame(pc); > > - if (frame == NULL) { > > - return NGX_CHAIN_ERROR; > > - } > > - > > - frame->level = ssl_encryption_application; > > - frame->type = NGX_QUIC_FT_STREAM; > > - frame->data = out; > > - frame->u.stream.off = 1; > > - frame->u.stream.len = 1; > > - frame->u.stream.fin = 0; > > - > > - frame->u.stream.stream_id = qs->id; > > - frame->u.stream.offset = c->sent; > > - frame->u.stream.length = n; > > - > > c->sent += n; > > qc->streams.sent += n; > > > > - ngx_quic_queue_frame(qc, frame); > > - > > if (in) { > > wev->ready = 0; > > } > > @@ -890,61 +883,96 @@ ngx_quic_stream_send_chain(ngx_connectio > > ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > "quic send_chain sent:%uz", n); > > > > + if (ngx_quic_stream_flush(qs) != NGX_OK) { > > + return NGX_CHAIN_ERROR; > > + } > > + > > return in; > > } > > > > > > -static size_t > > -ngx_quic_max_stream_flow(ngx_connection_t *c) > > +static ngx_int_t > > +ngx_quic_stream_flush(ngx_quic_stream_t *qs) > > { > > - size_t size; > > - uint64_t sent, unacked; > > - ngx_quic_stream_t *qs; > > + off_t limit; > > + size_t len; > > + ngx_uint_t last; > > + ngx_chain_t *out, *cl; > > + ngx_quic_frame_t *frame; > > + ngx_connection_t *pc; > > ngx_quic_connection_t *qc; > > > > - qs = c->quic; > > - qc = ngx_quic_get_connection(qs->parent); > > + if (qs->send_state != NGX_QUIC_STREAM_SEND_SEND) { > > + return NGX_OK; > > + } > > > > - size = qc->conf->stream_buffer_size; > > - sent = c->sent; > > - unacked = sent - qs->acked; > > + pc = qs->parent; > > + qc = ngx_quic_get_connection(pc); > > > > if (qc->streams.send_max_data == 0) { > > qc->streams.send_max_data = qc->ctp.initial_max_data; > > } > > > > - if (unacked >= size) { > > - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, > > - "quic send flow hit buffer size"); > > - return 0; > > + limit = ngx_min(qc->streams.send_max_data - qc->streams.send_offset, > > + qs->send_max_data - qs->send_offset); > > + > > + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, > > + "quic stream id:0x%xL flush limit:%O", qs->id, limit); > > + > > + out = ngx_quic_read_chain(pc, &qs->out, limit); > > + if (out == NGX_CHAIN_ERROR) { > > + return NGX_ERROR; > > } > > > > - size -= unacked; > > + len = 0; > > + last = 0; > > + > > + for (cl = out; cl; cl = cl->next) { > > + len += cl->buf->last - cl->buf->pos; > > + } > > > > - if (qc->streams.sent >= qc->streams.send_max_data) { > > - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, > > - "quic send flow hit MAX_DATA"); > > - return 0; > > + if (qs->send_final_size != (uint64_t) -1 > > + && qs->send_final_size == qs->send_offset + len) > > + { > > + qs->send_state = NGX_QUIC_STREAM_SEND_DATA_SENT; > > + last = 1; > > + } > > + > > + if (len == 0 && !last) { > > + return NGX_OK; > > } > > > > - if (qc->streams.sent + size > qc->streams.send_max_data) { > > - size = qc->streams.send_max_data - qc->streams.sent; > > + frame = ngx_quic_alloc_frame(pc); > > + if (frame == NULL) { > > + return NGX_ERROR; > > } > > > > - if (sent >= qs->send_max_data) { > > - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, > > - "quic send flow hit MAX_STREAM_DATA"); > > - return 0; > > + frame->level = ssl_encryption_application; > > + frame->type = NGX_QUIC_FT_STREAM; > > + frame->data = out; > > + > > + frame->u.stream.off = 1; > > + frame->u.stream.len = 1; > > + frame->u.stream.fin = last; > > + > > + frame->u.stream.stream_id = qs->id; > > + frame->u.stream.offset = qs->send_offset; > > + frame->u.stream.length = len; > > + > > + ngx_quic_queue_frame(qc, frame); > > + > > + qs->send_offset += len; > > + qc->streams.send_offset += len; > > + > > + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, pc->log, 0, > > + "quic stream id:0x%xL flush len:%uz last:%ui", > > + qs->id, len, last); > > + > > + if (qs->connection == NULL) { > > + return ngx_quic_close_stream(qs); > > } > > > > - if (sent + size > qs->send_max_data) { > > - size = qs->send_max_data - sent; > > - } > > - > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > - "quic send flow:%uz", size); > > - > > - return size; > > + return NGX_OK; > > } > > > > > > @@ -953,40 +981,67 @@ ngx_quic_stream_cleanup_handler(void *da > > { > > ngx_connection_t *c = data; > > > > + ngx_quic_stream_t *qs; > > + > > + qs = c->quic; > > + > > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, qs->parent->log, 0, > > + "quic stream id:0x%xL cleanup", qs->id); > > + > > + if (ngx_quic_shutdown_stream(c, NGX_RDWR_SHUTDOWN) != NGX_OK) { > > + ngx_quic_close_connection(c, NGX_ERROR); > > + return; > > + } > > + > > + qs->connection = NULL; > > + > > + if (ngx_quic_close_stream(qs) != NGX_OK) { > > + ngx_quic_close_connection(c, NGX_ERROR); > > + return; > > + } > > +} > > + > > + > > +static ngx_int_t > > +ngx_quic_close_stream(ngx_quic_stream_t *qs) > > +{ > > ngx_connection_t *pc; > > ngx_quic_frame_t *frame; > > - ngx_quic_stream_t *qs; > > ngx_quic_connection_t *qc; > > > > - qs = c->quic; > > pc = qs->parent; > > qc = ngx_quic_get_connection(pc); > > > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > - "quic stream id:0x%xL cleanup", qs->id); > > + if (!qc->closing) { > > + /* make sure everything is sent and final size is received */ > > + > > + if (qs->recv_state == NGX_QUIC_STREAM_RECV_RECV > > + || qs->send_state == NGX_QUIC_STREAM_SEND_READY > > + || qs->send_state == NGX_QUIC_STREAM_SEND_SEND) > > + { > > + return NGX_OK; > > + } > > + } > > + > > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, > > + "quic stream id:0x%xL close", qs->id); > > + > > + ngx_quic_free_chain(pc, qs->in); > > + ngx_quic_free_chain(pc, qs->out); > > > > ngx_rbtree_delete(&qc->streams.tree, &qs->node); > > - ngx_quic_free_chain(pc, qs->in); > > - ngx_quic_free_chain(pc, qs->out); > > + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); > > > > if (qc->closing) { > > /* schedule handler call to continue ngx_quic_close_connection() */ > > ngx_post_event(pc->read, &ngx_posted_events); > > - return; > > + return NGX_OK; > > } > > > > - if (qc->error) { > > - goto done; > > - } > > - > > - (void) ngx_quic_shutdown_stream(c, NGX_RDWR_SHUTDOWN); > > - > > - (void) ngx_quic_update_flow(c, qs->recv_last); > > - > > if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0) { > > frame = ngx_quic_alloc_frame(pc); > > if (frame == NULL) { > > - goto done; > > + return NGX_ERROR; > > } > > > > frame->level = ssl_encryption_application; > > @@ -1004,13 +1059,11 @@ ngx_quic_stream_cleanup_handler(void *da > > ngx_quic_queue_frame(qc, frame); > > } > > > > -done: > > - > > - (void) ngx_quic_output(pc); > > - > > if (qc->shutdown) { > > ngx_post_event(pc->read, &ngx_posted_events); > > } > > + > > + return NGX_OK; > > } > > > > > > @@ -1020,7 +1073,6 @@ ngx_quic_handle_stream_frame(ngx_connect > > { > > size_t size; > > uint64_t last; > > - ngx_connection_t *sc; > > ngx_quic_stream_t *qs; > > ngx_quic_connection_t *qc; > > ngx_quic_stream_frame_t *f; > > @@ -1048,19 +1100,17 @@ ngx_quic_handle_stream_frame(ngx_connect > > return NGX_OK; > > } > > > > - sc = qs->connection; > > - > > if (qs->recv_state != NGX_QUIC_STREAM_RECV_RECV > > && qs->recv_state != NGX_QUIC_STREAM_RECV_SIZE_KNOWN) > > { > > return NGX_OK; > > } > > > > - if (ngx_quic_control_flow(sc, last) != NGX_OK) { > > + if (ngx_quic_control_flow(qs, last) != NGX_OK) { > > return NGX_ERROR; > > } > > > > - if (qs->final_size != (uint64_t) -1 && last > qs->final_size) { > > + if (qs->recv_final_size != (uint64_t) -1 && last > qs->recv_final_size) { > > qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; > > return NGX_ERROR; > > } > > @@ -1075,7 +1125,8 @@ ngx_quic_handle_stream_frame(ngx_connect > > } > > > > if (f->fin) { > > - if (qs->final_size != (uint64_t) -1 && qs->final_size != last) { > > + if (qs->recv_final_size != (uint64_t) -1 && qs->recv_final_size != last) > > + { > > qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; > > return NGX_ERROR; > > } > > @@ -1085,7 +1136,7 @@ ngx_quic_handle_stream_frame(ngx_connect > > return NGX_ERROR; > > } > > > > - qs->final_size = last; > > + qs->recv_final_size = last; > > qs->recv_state = NGX_QUIC_STREAM_RECV_SIZE_KNOWN; > > } > > > > @@ -1099,13 +1150,17 @@ ngx_quic_handle_stream_frame(ngx_connect > > qs->recv_size += size; > > > > if (qs->recv_state == NGX_QUIC_STREAM_RECV_SIZE_KNOWN > > - && qs->recv_size == qs->final_size) > > + && qs->recv_size == qs->recv_final_size) > > { > > qs->recv_state = NGX_QUIC_STREAM_RECV_DATA_RECVD; > > } > > > > + if (qs->connection == NULL) { > > + return ngx_quic_close_stream(qs); > > + } > > + > > if (f->offset == qs->recv_offset) { > > - ngx_quic_set_event(sc->read); > > + ngx_quic_set_event(qs->connection->read); > > } > > > > return NGX_OK; > > @@ -1128,20 +1183,26 @@ ngx_quic_handle_max_data_frame(ngx_conne > > return NGX_OK; > > } > > > > - if (tree->root != tree->sentinel > > - && qc->streams.sent >= qc->streams.send_max_data) > > + if (tree->root == tree->sentinel > > + || qc->streams.send_offset < qc->streams.send_max_data) > > { > > - > > - for (node = ngx_rbtree_min(tree->root, tree->sentinel); > > - node; > > - node = ngx_rbtree_next(tree, node)) > > - { > > - qs = (ngx_quic_stream_t *) node; > > - ngx_quic_set_event(qs->connection->write); > > - } > > + /* not blocked on MAX_DATA */ > > + qc->streams.send_max_data = f->max_data; > > + return NGX_OK; > > } > > > > qc->streams.send_max_data = f->max_data; > > + node = ngx_rbtree_min(tree->root, tree->sentinel); > > + > > + while (node && qc->streams.send_offset < qc->streams.send_max_data) { > > + > > + qs = (ngx_quic_stream_t *) node; > > + node = ngx_rbtree_next(tree, node); > > + > > + if (ngx_quic_stream_flush(qs) != NGX_OK) { > > + return NGX_ERROR; > > + } > > + } > > > > return NGX_OK; > > } > > @@ -1189,7 +1250,7 @@ ngx_quic_handle_stream_data_blocked_fram > > return NGX_OK; > > } > > > > - return ngx_quic_update_max_stream_data(qs->connection); > > + return ngx_quic_update_max_stream_data(qs); > > } > > > > > > @@ -1197,7 +1258,6 @@ ngx_int_t > > ngx_quic_handle_max_stream_data_frame(ngx_connection_t *c, > > ngx_quic_header_t *pkt, ngx_quic_max_stream_data_frame_t *f) > > { > > - uint64_t sent; > > ngx_quic_stream_t *qs; > > ngx_quic_connection_t *qc; > > > > @@ -1224,15 +1284,15 @@ ngx_quic_handle_max_stream_data_frame(ng > > return NGX_OK; > > } > > > > - sent = qs->connection->sent; > > - > > - if (sent >= qs->send_max_data) { > > - ngx_quic_set_event(qs->connection->write); > > + if (qs->send_offset < qs->send_max_data) { > > + /* not blocked on MAX_STREAM_DATA */ > > + qs->send_max_data = f->limit; > > + return NGX_OK; > > } > > > > qs->send_max_data = f->limit; > > > > - return NGX_OK; > > + return ngx_quic_stream_flush(qs); > > } > > > > > > @@ -1240,7 +1300,6 @@ ngx_int_t > > ngx_quic_handle_reset_stream_frame(ngx_connection_t *c, > > ngx_quic_header_t *pkt, ngx_quic_reset_stream_frame_t *f) > > { > > - ngx_connection_t *sc; > > ngx_quic_stream_t *qs; > > ngx_quic_connection_t *qc; > > > > @@ -1271,13 +1330,13 @@ ngx_quic_handle_reset_stream_frame(ngx_c > > > > qs->recv_state = NGX_QUIC_STREAM_RECV_RESET_RECVD; > > > > - sc = qs->connection; > > - > > - if (ngx_quic_control_flow(sc, f->final_size) != NGX_OK) { > > + if (ngx_quic_control_flow(qs, f->final_size) != NGX_OK) { > > return NGX_ERROR; > > } > > > > - if (qs->final_size != (uint64_t) -1 && qs->final_size != f->final_size) { > > + if (qs->recv_final_size != (uint64_t) -1 > > + && qs->recv_final_size != f->final_size) > > + { > > qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; > > return NGX_ERROR; > > } > > @@ -1287,12 +1346,16 @@ ngx_quic_handle_reset_stream_frame(ngx_c > > return NGX_ERROR; > > } > > > > - qs->final_size = f->final_size; > > + qs->recv_final_size = f->final_size; > > > > - if (ngx_quic_update_flow(sc, qs->final_size) != NGX_OK) { > > + if (ngx_quic_update_flow(qs, qs->recv_final_size) != NGX_OK) { > > return NGX_ERROR; > > } > > > > + if (qs->connection == NULL) { > > + return ngx_quic_close_stream(qs); > > + } > > + > > ngx_quic_set_event(qs->connection->read); > > > > return NGX_OK; > > @@ -1325,10 +1388,14 @@ ngx_quic_handle_stop_sending_frame(ngx_c > > return NGX_OK; > > } > > > > - if (ngx_quic_reset_stream(qs->connection, f->error_code) != NGX_OK) { > > + if (ngx_quic_do_reset_stream(qs, f->error_code) != NGX_OK) { > > return NGX_ERROR; > > } > > > > + if (qs->connection == NULL) { > > + return ngx_quic_close_stream(qs); > > + } > > + > > ngx_quic_set_event(qs->connection->write); > > > > return NGX_OK; > > @@ -1378,30 +1445,37 @@ ngx_quic_handle_stream_ack(ngx_connectio > > return; > > } > > > > + if (qs->connection == NULL) { > > + qs->acked += f->u.stream.length; > > + return; > > + } > > + > > sent = qs->connection->sent; > > unacked = sent - qs->acked; > > + qs->acked += f->u.stream.length; > > > > - if (unacked >= qc->conf->stream_buffer_size) { > > - ngx_quic_set_event(qs->connection->write); > > + ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, > > + "quic stream id:0x%xL ack len:%uL acked:%uL unacked:%uL", > > + qs->id, f->u.stream.length, qs->acked, sent - qs->acked); > > + > > + if (unacked != qc->conf->stream_buffer_size) { > > + /* not blocked on buffer size */ > > + return; > > } > > > > - qs->acked += f->u.stream.length; > > - > > - ngx_log_debug3(NGX_LOG_DEBUG_EVENT, qs->connection->log, 0, > > - "quic stream ack len:%uL acked:%uL unacked:%uL", > > - f->u.stream.length, qs->acked, sent - qs->acked); > > + ngx_quic_set_event(qs->connection->write); > > } > > > > > > static ngx_int_t > > -ngx_quic_control_flow(ngx_connection_t *c, uint64_t last) > > +ngx_quic_control_flow(ngx_quic_stream_t *qs, uint64_t last) > > { > > uint64_t len; > > - ngx_quic_stream_t *qs; > > + ngx_connection_t *pc; > > ngx_quic_connection_t *qc; > > > > - qs = c->quic; > > - qc = ngx_quic_get_connection(qs->parent); > > + pc = qs->parent; > > + qc = ngx_quic_get_connection(pc); > > > > if (last <= qs->recv_last) { > > return NGX_OK; > > @@ -1409,9 +1483,9 @@ ngx_quic_control_flow(ngx_connection_t * > > > > len = last - qs->recv_last; > > > > - ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, > > - "quic flow control msd:%uL/%uL md:%uL/%uL", > > - last, qs->recv_max_data, qc->streams.recv_last + len, > > + ngx_log_debug5(NGX_LOG_DEBUG_EVENT, pc->log, 0, > > + "quic stream id:0x%xL flow control msd:%uL/%uL md:%uL/%uL", > > + qs->id, last, qs->recv_max_data, qc->streams.recv_last + len, > > qc->streams.recv_max_data); > > > > qs->recv_last += len; > > @@ -1435,14 +1509,12 @@ ngx_quic_control_flow(ngx_connection_t * > > > > > > static ngx_int_t > > -ngx_quic_update_flow(ngx_connection_t *c, uint64_t last) > > +ngx_quic_update_flow(ngx_quic_stream_t *qs, uint64_t last) > > { > > uint64_t len; > > ngx_connection_t *pc; > > - ngx_quic_stream_t *qs; > > ngx_quic_connection_t *qc; > > > > - qs = c->quic; > > pc = qs->parent; > > qc = ngx_quic_get_connection(pc); > > > > @@ -1452,13 +1524,13 @@ ngx_quic_update_flow(ngx_connection_t *c > > > > len = last - qs->recv_offset; > > > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > - "quic flow update %uL", last); > > + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, > > + "quic stream id:0x%xL flow update %uL", qs->id, last); > > > > qs->recv_offset += len; > > > > if (qs->recv_max_data <= qs->recv_offset + qs->recv_window / 2) { > > - if (ngx_quic_update_max_stream_data(c) != NGX_OK) { > > + if (ngx_quic_update_max_stream_data(qs) != NGX_OK) { > > return NGX_ERROR; > > } > > } > > @@ -1478,15 +1550,13 @@ ngx_quic_update_flow(ngx_connection_t *c > > > > > > static ngx_int_t > > -ngx_quic_update_max_stream_data(ngx_connection_t *c) > > +ngx_quic_update_max_stream_data(ngx_quic_stream_t *qs) > > { > > uint64_t recv_max_data; > > ngx_connection_t *pc; > > ngx_quic_frame_t *frame; > > - ngx_quic_stream_t *qs; > > ngx_quic_connection_t *qc; > > > > - qs = c->quic; > > pc = qs->parent; > > qc = ngx_quic_get_connection(pc); > > > > @@ -1502,8 +1572,9 @@ ngx_quic_update_max_stream_data(ngx_conn > > > > qs->recv_max_data = recv_max_data; > > > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > - "quic flow update msd:%uL", qs->recv_max_data); > > + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, > > + "quic stream id:0x%xL flow update msd:%uL", > > + qs->id, qs->recv_max_data); > > > > frame = ngx_quic_alloc_frame(pc); > > if (frame == NULL) { > > diff --git a/src/http/v3/ngx_http_v3_uni.c b/src/http/v3/ngx_http_v3_uni.c > > --- a/src/http/v3/ngx_http_v3_uni.c > > +++ b/src/http/v3/ngx_http_v3_uni.c > > @@ -295,8 +295,6 @@ ngx_http_v3_uni_dummy_write_handler(ngx_ > > } > > > > > > -/* XXX async & buffered stream writes */ > > - > > ngx_connection_t * > > ngx_http_v3_create_push_stream(ngx_connection_t *c, uint64_t push_id) > > { > > > _______________________________________________ > > nginx-devel mailing list -- nginx-devel at nginx.org > > To unsubscribe send an email to nginx-devel-leave at nginx.org > > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org -- Roman Arutyunyan From vl at nginx.com Tue Feb 8 12:23:35 2022 From: vl at nginx.com (Vladimir Homutov) Date: Tue, 8 Feb 2022 15:23:35 +0300 Subject: [PATCH] QUIC: stream lingering In-Reply-To: <20220208121813.kamvpy53rxedadn7@Romans-MacBook-Pro.local> References: <20220207141617.av7ft5sycecoce6r@Romans-MacBook-Pro.local> <20220208121813.kamvpy53rxedadn7@Romans-MacBook-Pro.local> Message-ID: <4e69749f-cfd7-8ea6-67b0-f0b595434b82@nginx.com> On 2/8/22 15:18, Roman Arutyunyan wrote: > On Tue, Feb 08, 2022 at 02:45:19PM +0300, Vladimir Homutov wrote: >> On Mon, Feb 07, 2022 at 05:16:17PM +0300, Roman Arutyunyan wrote: >>> Hi, >>> >>> On Fri, Feb 04, 2022 at 04:56:23PM +0300, Vladimir Homutov wrote: >>>> On Tue, Feb 01, 2022 at 04:39:59PM +0300, Roman Arutyunyan wrote: >>>>> # HG changeset patch >>>>> # User Roman Arutyunyan >>>>> # Date 1643722727 -10800 >>>>> # Tue Feb 01 16:38:47 2022 +0300 >>>>> # Branch quic >>>>> # Node ID db31ae16c1f2050be9c9f6b1f117ab6725b97dd4 >>>>> # Parent 308ac307b3e6952ef0c5ccf10cc82904c59fa4c3 >>>>> QUIC: stream lingering. >>>>> >>>>> Now ngx_quic_stream_t is decoupled from ngx_connection_t in a way that it >>>>> can persist after connection is closed by application. During this period, >>>>> server is expecting stream final size from client for correct flow control. >>>>> Also, buffered output is sent to client as more flow control credit is granted. >>>>> >>>> [..] >>>> >>>>> +static ngx_int_t >>>>> +ngx_quic_stream_flush(ngx_quic_stream_t *qs) >>>>> +{ >>>>> + size_t limit, len; >>>>> + ngx_uint_t last; >>>>> + ngx_chain_t *out, *cl; >>>>> + ngx_quic_frame_t *frame; >>>>> + ngx_connection_t *pc; >>>>> + ngx_quic_connection_t *qc; >>>>> + >>>>> + if (qs->send_state != NGX_QUIC_STREAM_SEND_SEND) { >>>>> + return NGX_OK; >>>>> + } >>>>> + >>>>> + pc = qs->parent; >>>>> + qc = ngx_quic_get_connection(pc); >>>>> + >>>>> + limit = ngx_quic_max_stream_flow(qs); >>>>> + last = 0; >>>>> + >>>>> + out = ngx_quic_read_chain(pc, &qs->out, limit); >>>>> + if (out == NGX_CHAIN_ERROR) { >>>>> + return NGX_ERROR; >>>>> + } >>>>> + >>>>> + len = 0; >>>>> + last = 0; >>>> >>>> this assignment looks duplicate. >>> >>> Thanks, fixed. >>> >>>> [..] >>>> >>>>> +static ngx_int_t >>>>> +ngx_quic_close_stream(ngx_quic_stream_t *qs) >>>>> +{ >>>>> ngx_connection_t *pc; >>>>> ngx_quic_frame_t *frame; >>>>> - ngx_quic_stream_t *qs; >>>>> ngx_quic_connection_t *qc; >>>>> >>>>> - qs = c->quic; >>>>> pc = qs->parent; >>>>> qc = ngx_quic_get_connection(pc); >>>>> >>>>> - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, >>>>> - "quic stream id:0x%xL cleanup", qs->id); >>>>> + if (!qc->closing) { >>>>> + if (qs->recv_state == NGX_QUIC_STREAM_RECV_RECV >>>>> + || qs->send_state == NGX_QUIC_STREAM_SEND_READY >>>>> + || qs->send_state == NGX_QUIC_STREAM_SEND_SEND) >>>>> + { >>>> >>>> so basically this are the states where we need to wait for FIN? >>>> and thus avoid closing till we get it. >>>> I would add a comment here. >>> >>> On the receiving end we wait either for fin or for reset to have final size. >>> On the sending end we wait for everything that's buffered to be sent. >>> Added a comment about that. >>> >>>> [..] >>>>> + if (qs->connection == NULL) { >>>>> + return ngx_quic_close_stream(qs); >>>>> + } >>>>> + >>>>> ngx_quic_set_event(qs->connection->write); >>>> >>>> this pattern - check connection, close if NULL and set event seem to >>>> repeat. Maybe it's worth to try to put this check/action into >>>> ngx_quic_set_event somehow ? we could instead have >>>> set_read_event/set_write_event maybe. >>> >>> I thought about this too, but it's not always that simple. And even if it was, >>> the new function/macro would have unclear semantics. Let's just remember this >>> as a possible future optimiation. >>> >>>>> +static ngx_int_t >>>>> +ngx_quic_stream_flush(ngx_quic_stream_t *qs) >>>>> + >>>> [..] >>>>> + if (len == 0 && !last) { >>>>> + return NGX_OK; >>>>> + } >>>>> + >>>>> + frame = ngx_quic_alloc_frame(pc); >>>>> + if (frame == NULL) { >>>>> + return NGX_ERROR; >>>>> + } >>>>> + >>>>> + frame = ngx_quic_alloc_frame(pc); >>>>> + if (frame == NULL) { >>>>> + return NGX_ERROR; >>>>> + } >>>> >>>> one more dup here. >>> >>> Yes, thanks. >>> >>>> Overal, it looks good, but the testing revealed another issue: with big >>>> buffer sizes we run into issue of too long chains in ngx_quic_write_chain(). >>>> As discussed, this certainly needs optimization - probably adding some >>>> pointer to the end to facilitate appending, or something else. >>> >>> It's true ngx_quic_write_chain() needs to be optimized. When the buffered >>> chain is big, it takes too much time to find the write point. I'll address >>> this is a separate patch. Meanwhile, attached is an updated version of the >>> current one. >>> >>> In the new version of the patch I also eliminated the >>> ngx_quic_max_stream_flow() function and embedded its content in >>> ngx_quic_stream_flush(). >> >> yes, this looks correct - flow limit should not consider buffer as it >> was before. >> >> I think we should check for limit == 0 before doing read_chain and this >> is good place for debug logging about 'hit MAX_DATA/MAX_STREAM_DATA' that >> was removed by update. > > I don't know how much do we really need those messages. What really needs to > be added here is sending DATA_BLOCKED/STREAM_DATA_BLOCKED, for which I > already have a separate patch. That patch also adds some logging. > Once we finish with optimization, I'll send it out. ok, good. > > Apart from logging, checking limit == 0 does not seem to make sense, because > even if the limit is zero, we should still proceed, since we are still able to > send fin. yes, exactly. I have no more concerns regarding this patch, updated version looks good (considering further patches adressing related issues). > >>> -- >>> Roman Arutyunyan >> >>> # HG changeset patch >>> # User Roman Arutyunyan >>> # Date 1644054894 -10800 >>> # Sat Feb 05 12:54:54 2022 +0300 >>> # Branch quic >>> # Node ID 6e1674c257709341a7508ae4bdab6f7f7d2e9284 >>> # Parent 6c1dfd072859022f830aeea49db7cbe3c9f7fb55 >>> QUIC: stream lingering. >>> >>> Now ngx_quic_stream_t is decoupled from ngx_connection_t in a way that it >>> can persist after connection is closed by application. During this period, >>> server is expecting stream final size from client for correct flow control. >>> Also, buffered output is sent to client as more flow control credit is granted. >>> >>> diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c >>> --- a/src/event/quic/ngx_event_quic.c >>> +++ b/src/event/quic/ngx_event_quic.c >>> @@ -303,6 +303,7 @@ ngx_quic_new_connection(ngx_connection_t >>> ctp->active_connection_id_limit = 2; >>> >>> ngx_queue_init(&qc->streams.uninitialized); >>> + ngx_queue_init(&qc->streams.free); >>> >>> qc->streams.recv_max_data = qc->tp.initial_max_data; >>> qc->streams.recv_window = qc->streams.recv_max_data; >>> diff --git a/src/event/quic/ngx_event_quic.h b/src/event/quic/ngx_event_quic.h >>> --- a/src/event/quic/ngx_event_quic.h >>> +++ b/src/event/quic/ngx_event_quic.h >>> @@ -78,12 +78,14 @@ struct ngx_quic_stream_s { >>> uint64_t id; >>> uint64_t acked; >>> uint64_t send_max_data; >>> + uint64_t send_offset; >>> + uint64_t send_final_size; >>> uint64_t recv_max_data; >>> uint64_t recv_offset; >>> uint64_t recv_window; >>> uint64_t recv_last; >>> uint64_t recv_size; >>> - uint64_t final_size; >>> + uint64_t recv_final_size; >>> ngx_chain_t *in; >>> ngx_chain_t *out; >>> ngx_uint_t cancelable; /* unsigned cancelable:1; */ >>> diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h >>> --- a/src/event/quic/ngx_event_quic_connection.h >>> +++ b/src/event/quic/ngx_event_quic_connection.h >>> @@ -114,13 +114,16 @@ struct ngx_quic_socket_s { >>> typedef struct { >>> ngx_rbtree_t tree; >>> ngx_rbtree_node_t sentinel; >>> + >>> ngx_queue_t uninitialized; >>> + ngx_queue_t free; >>> >>> uint64_t sent; >>> uint64_t recv_offset; >>> uint64_t recv_window; >>> uint64_t recv_last; >>> uint64_t recv_max_data; >>> + uint64_t send_offset; >>> uint64_t send_max_data; >>> >>> uint64_t server_max_streams_uni; >>> diff --git a/src/event/quic/ngx_event_quic_frames.c b/src/event/quic/ngx_event_quic_frames.c >>> --- a/src/event/quic/ngx_event_quic_frames.c >>> +++ b/src/event/quic/ngx_event_quic_frames.c >>> @@ -391,6 +391,10 @@ ngx_quic_split_frame(ngx_connection_t *c >>> return NGX_ERROR; >>> } >>> >>> + if (f->type == NGX_QUIC_FT_STREAM) { >>> + f->u.stream.fin = 0; >>> + } >>> + >>> ngx_queue_insert_after(&f->queue, &nf->queue); >>> >>> return NGX_OK; >>> diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c >>> --- a/src/event/quic/ngx_event_quic_streams.c >>> +++ b/src/event/quic/ngx_event_quic_streams.c >>> @@ -13,6 +13,8 @@ >>> #define NGX_QUIC_STREAM_GONE (void *) -1 >>> >>> >>> +static ngx_int_t ngx_quic_do_reset_stream(ngx_quic_stream_t *qs, >>> + ngx_uint_t err); >>> static ngx_int_t ngx_quic_shutdown_stream_send(ngx_connection_t *c); >>> static ngx_int_t ngx_quic_shutdown_stream_recv(ngx_connection_t *c); >>> static ngx_quic_stream_t *ngx_quic_get_stream(ngx_connection_t *c, uint64_t id); >>> @@ -28,11 +30,12 @@ static ssize_t ngx_quic_stream_send(ngx_ >>> size_t size); >>> static ngx_chain_t *ngx_quic_stream_send_chain(ngx_connection_t *c, >>> ngx_chain_t *in, off_t limit); >>> -static size_t ngx_quic_max_stream_flow(ngx_connection_t *c); >>> +static ngx_int_t ngx_quic_stream_flush(ngx_quic_stream_t *qs); >>> static void ngx_quic_stream_cleanup_handler(void *data); >>> -static ngx_int_t ngx_quic_control_flow(ngx_connection_t *c, uint64_t last); >>> -static ngx_int_t ngx_quic_update_flow(ngx_connection_t *c, uint64_t last); >>> -static ngx_int_t ngx_quic_update_max_stream_data(ngx_connection_t *c); >>> +static ngx_int_t ngx_quic_close_stream(ngx_quic_stream_t *qs); >>> +static ngx_int_t ngx_quic_control_flow(ngx_quic_stream_t *qs, uint64_t last); >>> +static ngx_int_t ngx_quic_update_flow(ngx_quic_stream_t *qs, uint64_t last); >>> +static ngx_int_t ngx_quic_update_max_stream_data(ngx_quic_stream_t *qs); >>> static ngx_int_t ngx_quic_update_max_data(ngx_connection_t *c); >>> static void ngx_quic_set_event(ngx_event_t *ev); >>> >>> @@ -186,15 +189,20 @@ ngx_quic_close_streams(ngx_connection_t >>> ns = 0; >>> #endif >>> >>> - for (node = ngx_rbtree_min(tree->root, tree->sentinel); >>> - node; >>> - node = ngx_rbtree_next(tree, node)) >>> - { >>> + node = ngx_rbtree_min(tree->root, tree->sentinel); >>> + >>> + while (node) { >>> qs = (ngx_quic_stream_t *) node; >>> + node = ngx_rbtree_next(tree, node); >>> >>> qs->recv_state = NGX_QUIC_STREAM_RECV_RESET_RECVD; >>> qs->send_state = NGX_QUIC_STREAM_SEND_RESET_SENT; >>> >>> + if (qs->connection == NULL) { >>> + ngx_quic_close_stream(qs); >>> + continue; >>> + } >>> + >>> ngx_quic_set_event(qs->connection->read); >>> ngx_quic_set_event(qs->connection->write); >>> >>> @@ -213,13 +221,17 @@ ngx_quic_close_streams(ngx_connection_t >>> ngx_int_t >>> ngx_quic_reset_stream(ngx_connection_t *c, ngx_uint_t err) >>> { >>> + return ngx_quic_do_reset_stream(c->quic, err); >>> +} >>> + >>> + >>> +static ngx_int_t >>> +ngx_quic_do_reset_stream(ngx_quic_stream_t *qs, ngx_uint_t err) >>> +{ >>> ngx_connection_t *pc; >>> ngx_quic_frame_t *frame; >>> - ngx_quic_stream_t *qs; >>> ngx_quic_connection_t *qc; >>> >>> - qs = c->quic; >>> - >>> if (qs->send_state == NGX_QUIC_STREAM_SEND_DATA_RECVD >>> || qs->send_state == NGX_QUIC_STREAM_SEND_RESET_SENT >>> || qs->send_state == NGX_QUIC_STREAM_SEND_RESET_RECVD) >>> @@ -228,10 +240,14 @@ ngx_quic_reset_stream(ngx_connection_t * >>> } >>> >>> qs->send_state = NGX_QUIC_STREAM_SEND_RESET_SENT; >>> + qs->send_final_size = qs->send_offset; >>> >>> pc = qs->parent; >>> qc = ngx_quic_get_connection(pc); >>> >>> + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, >>> + "quic stream id:0x%xL reset", qs->id); >>> + >>> frame = ngx_quic_alloc_frame(pc); >>> if (frame == NULL) { >>> return NGX_ERROR; >>> @@ -241,10 +257,13 @@ ngx_quic_reset_stream(ngx_connection_t * >>> frame->type = NGX_QUIC_FT_RESET_STREAM; >>> frame->u.reset_stream.id = qs->id; >>> frame->u.reset_stream.error_code = err; >>> - frame->u.reset_stream.final_size = c->sent; >>> + frame->u.reset_stream.final_size = qs->send_offset; >>> >>> ngx_quic_queue_frame(qc, frame); >>> >>> + ngx_quic_free_chain(pc, qs->out); >>> + qs->out = NULL; >>> + >>> return NGX_OK; >>> } >>> >>> @@ -271,10 +290,7 @@ ngx_quic_shutdown_stream(ngx_connection_ >>> static ngx_int_t >>> ngx_quic_shutdown_stream_send(ngx_connection_t *c) >>> { >>> - ngx_connection_t *pc; >>> - ngx_quic_frame_t *frame; >>> - ngx_quic_stream_t *qs; >>> - ngx_quic_connection_t *qc; >>> + ngx_quic_stream_t *qs; >>> >>> qs = c->quic; >>> >>> @@ -284,32 +300,13 @@ ngx_quic_shutdown_stream_send(ngx_connec >>> return NGX_OK; >>> } >>> >>> - qs->send_state = NGX_QUIC_STREAM_SEND_DATA_SENT; >>> - >>> - pc = qs->parent; >>> - qc = ngx_quic_get_connection(pc); >>> + qs->send_state = NGX_QUIC_STREAM_SEND_SEND; >>> + qs->send_final_size = c->sent; >>> >>> - frame = ngx_quic_alloc_frame(pc); >>> - if (frame == NULL) { >>> - return NGX_ERROR; >>> - } >>> - >>> - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, qs->parent->log, 0, >>> "quic stream id:0x%xL send shutdown", qs->id); >>> >>> - frame->level = ssl_encryption_application; >>> - frame->type = NGX_QUIC_FT_STREAM; >>> - frame->u.stream.off = 1; >>> - frame->u.stream.len = 1; >>> - frame->u.stream.fin = 1; >>> - >>> - frame->u.stream.stream_id = qs->id; >>> - frame->u.stream.offset = c->sent; >>> - frame->u.stream.length = 0; >>> - >>> - ngx_quic_queue_frame(qc, frame); >>> - >>> - return NGX_OK; >>> + return ngx_quic_stream_flush(qs); >>> } >>> >>> >>> @@ -341,7 +338,7 @@ ngx_quic_shutdown_stream_recv(ngx_connec >>> return NGX_ERROR; >>> } >>> >>> - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, >>> "quic stream id:0x%xL recv shutdown", qs->id); >>> >>> frame->level = ssl_encryption_application; >>> @@ -591,6 +588,7 @@ ngx_quic_create_stream(ngx_connection_t >>> { >>> ngx_log_t *log; >>> ngx_pool_t *pool; >>> + ngx_queue_t *q; >>> ngx_connection_t *sc; >>> ngx_quic_stream_t *qs; >>> ngx_pool_cleanup_t *cln; >>> @@ -601,25 +599,41 @@ ngx_quic_create_stream(ngx_connection_t >>> >>> qc = ngx_quic_get_connection(c); >>> >>> - pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, c->log); >>> - if (pool == NULL) { >>> - return NULL; >>> + if (!ngx_queue_empty(&qc->streams.free)) { >>> + q = ngx_queue_head(&qc->streams.free); >>> + qs = ngx_queue_data(q, ngx_quic_stream_t, queue); >>> + ngx_queue_remove(&qs->queue); >>> + >>> + } else { >>> + /* >>> + * the number of streams is limited by transport >>> + * parameters and application requirements >>> + */ >>> + >>> + qs = ngx_palloc(c->pool, sizeof(ngx_quic_stream_t)); >>> + if (qs == NULL) { >>> + return NULL; >>> + } >>> } >>> >>> - qs = ngx_pcalloc(pool, sizeof(ngx_quic_stream_t)); >>> - if (qs == NULL) { >>> - ngx_destroy_pool(pool); >>> - return NULL; >>> - } >>> + ngx_memzero(qs, sizeof(ngx_quic_stream_t)); >>> >>> qs->node.key = id; >>> qs->parent = c; >>> qs->id = id; >>> - qs->final_size = (uint64_t) -1; >>> + qs->send_final_size = (uint64_t) -1; >>> + qs->recv_final_size = (uint64_t) -1; >>> + >>> + pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, c->log); >>> + if (pool == NULL) { >>> + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); >>> + return NULL; >>> + } >>> >>> log = ngx_palloc(pool, sizeof(ngx_log_t)); >>> if (log == NULL) { >>> ngx_destroy_pool(pool); >>> + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); >>> return NULL; >>> } >>> >>> @@ -629,6 +643,7 @@ ngx_quic_create_stream(ngx_connection_t >>> sc = ngx_get_connection(c->fd, log); >>> if (sc == NULL) { >>> ngx_destroy_pool(pool); >>> + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); >>> return NULL; >>> } >>> >>> @@ -697,6 +712,7 @@ ngx_quic_create_stream(ngx_connection_t >>> if (cln == NULL) { >>> ngx_close_connection(sc); >>> ngx_destroy_pool(pool); >>> + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); >>> return NULL; >>> } >>> >>> @@ -737,7 +753,7 @@ ngx_quic_stream_recv(ngx_connection_t *c >>> return NGX_ERROR; >>> } >>> >>> - ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, >>> "quic stream id:0x%xL recv buf:%uz", qs->id, size); >>> >>> if (size == 0) { >>> @@ -763,7 +779,7 @@ ngx_quic_stream_recv(ngx_connection_t *c >>> rev->ready = 0; >>> >>> if (qs->recv_state == NGX_QUIC_STREAM_RECV_DATA_RECVD >>> - && qs->recv_offset == qs->final_size) >>> + && qs->recv_offset == qs->recv_final_size) >>> { >>> qs->recv_state = NGX_QUIC_STREAM_RECV_DATA_READ; >>> } >>> @@ -781,7 +797,7 @@ ngx_quic_stream_recv(ngx_connection_t *c >>> ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> "quic stream id:0x%xL recv len:%z", qs->id, len); >>> >>> - if (ngx_quic_update_flow(c, qs->recv_offset + len) != NGX_OK) { >>> + if (ngx_quic_update_flow(qs, qs->recv_offset + len) != NGX_OK) { >>> return NGX_ERROR; >>> } >>> >>> @@ -822,9 +838,7 @@ ngx_quic_stream_send_chain(ngx_connectio >>> off_t flow; >>> size_t n; >>> ngx_event_t *wev; >>> - ngx_chain_t *out; >>> ngx_connection_t *pc; >>> - ngx_quic_frame_t *frame; >>> ngx_quic_stream_t *qs; >>> ngx_quic_connection_t *qc; >>> >>> @@ -842,7 +856,8 @@ ngx_quic_stream_send_chain(ngx_connectio >>> >>> qs->send_state = NGX_QUIC_STREAM_SEND_SEND; >>> >>> - flow = ngx_quic_max_stream_flow(c); >>> + flow = qs->acked + qc->conf->stream_buffer_size - c->sent; >>> + >>> if (flow == 0) { >>> wev->ready = 0; >>> return in; >>> @@ -852,37 +867,15 @@ ngx_quic_stream_send_chain(ngx_connectio >>> limit = flow; >>> } >>> >>> - in = ngx_quic_write_chain(pc, &qs->out, in, limit, 0, &n); >>> + in = ngx_quic_write_chain(pc, &qs->out, in, limit, >>> + c->sent - qs->send_offset, &n); >>> if (in == NGX_CHAIN_ERROR) { >>> return NGX_CHAIN_ERROR; >>> } >>> >>> - out = ngx_quic_read_chain(pc, &qs->out, n); >>> - if (out == NGX_CHAIN_ERROR) { >>> - return NGX_CHAIN_ERROR; >>> - } >>> - >>> - frame = ngx_quic_alloc_frame(pc); >>> - if (frame == NULL) { >>> - return NGX_CHAIN_ERROR; >>> - } >>> - >>> - frame->level = ssl_encryption_application; >>> - frame->type = NGX_QUIC_FT_STREAM; >>> - frame->data = out; >>> - frame->u.stream.off = 1; >>> - frame->u.stream.len = 1; >>> - frame->u.stream.fin = 0; >>> - >>> - frame->u.stream.stream_id = qs->id; >>> - frame->u.stream.offset = c->sent; >>> - frame->u.stream.length = n; >>> - >>> c->sent += n; >>> qc->streams.sent += n; >>> >>> - ngx_quic_queue_frame(qc, frame); >>> - >>> if (in) { >>> wev->ready = 0; >>> } >>> @@ -890,61 +883,96 @@ ngx_quic_stream_send_chain(ngx_connectio >>> ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> "quic send_chain sent:%uz", n); >>> >>> + if (ngx_quic_stream_flush(qs) != NGX_OK) { >>> + return NGX_CHAIN_ERROR; >>> + } >>> + >>> return in; >>> } >>> >>> >>> -static size_t >>> -ngx_quic_max_stream_flow(ngx_connection_t *c) >>> +static ngx_int_t >>> +ngx_quic_stream_flush(ngx_quic_stream_t *qs) >>> { >>> - size_t size; >>> - uint64_t sent, unacked; >>> - ngx_quic_stream_t *qs; >>> + off_t limit; >>> + size_t len; >>> + ngx_uint_t last; >>> + ngx_chain_t *out, *cl; >>> + ngx_quic_frame_t *frame; >>> + ngx_connection_t *pc; >>> ngx_quic_connection_t *qc; >>> >>> - qs = c->quic; >>> - qc = ngx_quic_get_connection(qs->parent); >>> + if (qs->send_state != NGX_QUIC_STREAM_SEND_SEND) { >>> + return NGX_OK; >>> + } >>> >>> - size = qc->conf->stream_buffer_size; >>> - sent = c->sent; >>> - unacked = sent - qs->acked; >>> + pc = qs->parent; >>> + qc = ngx_quic_get_connection(pc); >>> >>> if (qc->streams.send_max_data == 0) { >>> qc->streams.send_max_data = qc->ctp.initial_max_data; >>> } >>> >>> - if (unacked >= size) { >>> - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> - "quic send flow hit buffer size"); >>> - return 0; >>> + limit = ngx_min(qc->streams.send_max_data - qc->streams.send_offset, >>> + qs->send_max_data - qs->send_offset); >>> + >>> + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, >>> + "quic stream id:0x%xL flush limit:%O", qs->id, limit); >>> + >>> + out = ngx_quic_read_chain(pc, &qs->out, limit); >>> + if (out == NGX_CHAIN_ERROR) { >>> + return NGX_ERROR; >>> } >>> >>> - size -= unacked; >>> + len = 0; >>> + last = 0; >>> + >>> + for (cl = out; cl; cl = cl->next) { >>> + len += cl->buf->last - cl->buf->pos; >>> + } >>> >>> - if (qc->streams.sent >= qc->streams.send_max_data) { >>> - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> - "quic send flow hit MAX_DATA"); >>> - return 0; >>> + if (qs->send_final_size != (uint64_t) -1 >>> + && qs->send_final_size == qs->send_offset + len) >>> + { >>> + qs->send_state = NGX_QUIC_STREAM_SEND_DATA_SENT; >>> + last = 1; >>> + } >>> + >>> + if (len == 0 && !last) { >>> + return NGX_OK; >>> } >>> >>> - if (qc->streams.sent + size > qc->streams.send_max_data) { >>> - size = qc->streams.send_max_data - qc->streams.sent; >>> + frame = ngx_quic_alloc_frame(pc); >>> + if (frame == NULL) { >>> + return NGX_ERROR; >>> } >>> >>> - if (sent >= qs->send_max_data) { >>> - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> - "quic send flow hit MAX_STREAM_DATA"); >>> - return 0; >>> + frame->level = ssl_encryption_application; >>> + frame->type = NGX_QUIC_FT_STREAM; >>> + frame->data = out; >>> + >>> + frame->u.stream.off = 1; >>> + frame->u.stream.len = 1; >>> + frame->u.stream.fin = last; >>> + >>> + frame->u.stream.stream_id = qs->id; >>> + frame->u.stream.offset = qs->send_offset; >>> + frame->u.stream.length = len; >>> + >>> + ngx_quic_queue_frame(qc, frame); >>> + >>> + qs->send_offset += len; >>> + qc->streams.send_offset += len; >>> + >>> + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, pc->log, 0, >>> + "quic stream id:0x%xL flush len:%uz last:%ui", >>> + qs->id, len, last); >>> + >>> + if (qs->connection == NULL) { >>> + return ngx_quic_close_stream(qs); >>> } >>> >>> - if (sent + size > qs->send_max_data) { >>> - size = qs->send_max_data - sent; >>> - } >>> - >>> - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> - "quic send flow:%uz", size); >>> - >>> - return size; >>> + return NGX_OK; >>> } >>> >>> >>> @@ -953,40 +981,67 @@ ngx_quic_stream_cleanup_handler(void *da >>> { >>> ngx_connection_t *c = data; >>> >>> + ngx_quic_stream_t *qs; >>> + >>> + qs = c->quic; >>> + >>> + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, qs->parent->log, 0, >>> + "quic stream id:0x%xL cleanup", qs->id); >>> + >>> + if (ngx_quic_shutdown_stream(c, NGX_RDWR_SHUTDOWN) != NGX_OK) { >>> + ngx_quic_close_connection(c, NGX_ERROR); >>> + return; >>> + } >>> + >>> + qs->connection = NULL; >>> + >>> + if (ngx_quic_close_stream(qs) != NGX_OK) { >>> + ngx_quic_close_connection(c, NGX_ERROR); >>> + return; >>> + } >>> +} >>> + >>> + >>> +static ngx_int_t >>> +ngx_quic_close_stream(ngx_quic_stream_t *qs) >>> +{ >>> ngx_connection_t *pc; >>> ngx_quic_frame_t *frame; >>> - ngx_quic_stream_t *qs; >>> ngx_quic_connection_t *qc; >>> >>> - qs = c->quic; >>> pc = qs->parent; >>> qc = ngx_quic_get_connection(pc); >>> >>> - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> - "quic stream id:0x%xL cleanup", qs->id); >>> + if (!qc->closing) { >>> + /* make sure everything is sent and final size is received */ >>> + >>> + if (qs->recv_state == NGX_QUIC_STREAM_RECV_RECV >>> + || qs->send_state == NGX_QUIC_STREAM_SEND_READY >>> + || qs->send_state == NGX_QUIC_STREAM_SEND_SEND) >>> + { >>> + return NGX_OK; >>> + } >>> + } >>> + >>> + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, >>> + "quic stream id:0x%xL close", qs->id); >>> + >>> + ngx_quic_free_chain(pc, qs->in); >>> + ngx_quic_free_chain(pc, qs->out); >>> >>> ngx_rbtree_delete(&qc->streams.tree, &qs->node); >>> - ngx_quic_free_chain(pc, qs->in); >>> - ngx_quic_free_chain(pc, qs->out); >>> + ngx_queue_insert_tail(&qc->streams.free, &qs->queue); >>> >>> if (qc->closing) { >>> /* schedule handler call to continue ngx_quic_close_connection() */ >>> ngx_post_event(pc->read, &ngx_posted_events); >>> - return; >>> + return NGX_OK; >>> } >>> >>> - if (qc->error) { >>> - goto done; >>> - } >>> - >>> - (void) ngx_quic_shutdown_stream(c, NGX_RDWR_SHUTDOWN); >>> - >>> - (void) ngx_quic_update_flow(c, qs->recv_last); >>> - >>> if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0) { >>> frame = ngx_quic_alloc_frame(pc); >>> if (frame == NULL) { >>> - goto done; >>> + return NGX_ERROR; >>> } >>> >>> frame->level = ssl_encryption_application; >>> @@ -1004,13 +1059,11 @@ ngx_quic_stream_cleanup_handler(void *da >>> ngx_quic_queue_frame(qc, frame); >>> } >>> >>> -done: >>> - >>> - (void) ngx_quic_output(pc); >>> - >>> if (qc->shutdown) { >>> ngx_post_event(pc->read, &ngx_posted_events); >>> } >>> + >>> + return NGX_OK; >>> } >>> >>> >>> @@ -1020,7 +1073,6 @@ ngx_quic_handle_stream_frame(ngx_connect >>> { >>> size_t size; >>> uint64_t last; >>> - ngx_connection_t *sc; >>> ngx_quic_stream_t *qs; >>> ngx_quic_connection_t *qc; >>> ngx_quic_stream_frame_t *f; >>> @@ -1048,19 +1100,17 @@ ngx_quic_handle_stream_frame(ngx_connect >>> return NGX_OK; >>> } >>> >>> - sc = qs->connection; >>> - >>> if (qs->recv_state != NGX_QUIC_STREAM_RECV_RECV >>> && qs->recv_state != NGX_QUIC_STREAM_RECV_SIZE_KNOWN) >>> { >>> return NGX_OK; >>> } >>> >>> - if (ngx_quic_control_flow(sc, last) != NGX_OK) { >>> + if (ngx_quic_control_flow(qs, last) != NGX_OK) { >>> return NGX_ERROR; >>> } >>> >>> - if (qs->final_size != (uint64_t) -1 && last > qs->final_size) { >>> + if (qs->recv_final_size != (uint64_t) -1 && last > qs->recv_final_size) { >>> qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; >>> return NGX_ERROR; >>> } >>> @@ -1075,7 +1125,8 @@ ngx_quic_handle_stream_frame(ngx_connect >>> } >>> >>> if (f->fin) { >>> - if (qs->final_size != (uint64_t) -1 && qs->final_size != last) { >>> + if (qs->recv_final_size != (uint64_t) -1 && qs->recv_final_size != last) >>> + { >>> qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; >>> return NGX_ERROR; >>> } >>> @@ -1085,7 +1136,7 @@ ngx_quic_handle_stream_frame(ngx_connect >>> return NGX_ERROR; >>> } >>> >>> - qs->final_size = last; >>> + qs->recv_final_size = last; >>> qs->recv_state = NGX_QUIC_STREAM_RECV_SIZE_KNOWN; >>> } >>> >>> @@ -1099,13 +1150,17 @@ ngx_quic_handle_stream_frame(ngx_connect >>> qs->recv_size += size; >>> >>> if (qs->recv_state == NGX_QUIC_STREAM_RECV_SIZE_KNOWN >>> - && qs->recv_size == qs->final_size) >>> + && qs->recv_size == qs->recv_final_size) >>> { >>> qs->recv_state = NGX_QUIC_STREAM_RECV_DATA_RECVD; >>> } >>> >>> + if (qs->connection == NULL) { >>> + return ngx_quic_close_stream(qs); >>> + } >>> + >>> if (f->offset == qs->recv_offset) { >>> - ngx_quic_set_event(sc->read); >>> + ngx_quic_set_event(qs->connection->read); >>> } >>> >>> return NGX_OK; >>> @@ -1128,20 +1183,26 @@ ngx_quic_handle_max_data_frame(ngx_conne >>> return NGX_OK; >>> } >>> >>> - if (tree->root != tree->sentinel >>> - && qc->streams.sent >= qc->streams.send_max_data) >>> + if (tree->root == tree->sentinel >>> + || qc->streams.send_offset < qc->streams.send_max_data) >>> { >>> - >>> - for (node = ngx_rbtree_min(tree->root, tree->sentinel); >>> - node; >>> - node = ngx_rbtree_next(tree, node)) >>> - { >>> - qs = (ngx_quic_stream_t *) node; >>> - ngx_quic_set_event(qs->connection->write); >>> - } >>> + /* not blocked on MAX_DATA */ >>> + qc->streams.send_max_data = f->max_data; >>> + return NGX_OK; >>> } >>> >>> qc->streams.send_max_data = f->max_data; >>> + node = ngx_rbtree_min(tree->root, tree->sentinel); >>> + >>> + while (node && qc->streams.send_offset < qc->streams.send_max_data) { >>> + >>> + qs = (ngx_quic_stream_t *) node; >>> + node = ngx_rbtree_next(tree, node); >>> + >>> + if (ngx_quic_stream_flush(qs) != NGX_OK) { >>> + return NGX_ERROR; >>> + } >>> + } >>> >>> return NGX_OK; >>> } >>> @@ -1189,7 +1250,7 @@ ngx_quic_handle_stream_data_blocked_fram >>> return NGX_OK; >>> } >>> >>> - return ngx_quic_update_max_stream_data(qs->connection); >>> + return ngx_quic_update_max_stream_data(qs); >>> } >>> >>> >>> @@ -1197,7 +1258,6 @@ ngx_int_t >>> ngx_quic_handle_max_stream_data_frame(ngx_connection_t *c, >>> ngx_quic_header_t *pkt, ngx_quic_max_stream_data_frame_t *f) >>> { >>> - uint64_t sent; >>> ngx_quic_stream_t *qs; >>> ngx_quic_connection_t *qc; >>> >>> @@ -1224,15 +1284,15 @@ ngx_quic_handle_max_stream_data_frame(ng >>> return NGX_OK; >>> } >>> >>> - sent = qs->connection->sent; >>> - >>> - if (sent >= qs->send_max_data) { >>> - ngx_quic_set_event(qs->connection->write); >>> + if (qs->send_offset < qs->send_max_data) { >>> + /* not blocked on MAX_STREAM_DATA */ >>> + qs->send_max_data = f->limit; >>> + return NGX_OK; >>> } >>> >>> qs->send_max_data = f->limit; >>> >>> - return NGX_OK; >>> + return ngx_quic_stream_flush(qs); >>> } >>> >>> >>> @@ -1240,7 +1300,6 @@ ngx_int_t >>> ngx_quic_handle_reset_stream_frame(ngx_connection_t *c, >>> ngx_quic_header_t *pkt, ngx_quic_reset_stream_frame_t *f) >>> { >>> - ngx_connection_t *sc; >>> ngx_quic_stream_t *qs; >>> ngx_quic_connection_t *qc; >>> >>> @@ -1271,13 +1330,13 @@ ngx_quic_handle_reset_stream_frame(ngx_c >>> >>> qs->recv_state = NGX_QUIC_STREAM_RECV_RESET_RECVD; >>> >>> - sc = qs->connection; >>> - >>> - if (ngx_quic_control_flow(sc, f->final_size) != NGX_OK) { >>> + if (ngx_quic_control_flow(qs, f->final_size) != NGX_OK) { >>> return NGX_ERROR; >>> } >>> >>> - if (qs->final_size != (uint64_t) -1 && qs->final_size != f->final_size) { >>> + if (qs->recv_final_size != (uint64_t) -1 >>> + && qs->recv_final_size != f->final_size) >>> + { >>> qc->error = NGX_QUIC_ERR_FINAL_SIZE_ERROR; >>> return NGX_ERROR; >>> } >>> @@ -1287,12 +1346,16 @@ ngx_quic_handle_reset_stream_frame(ngx_c >>> return NGX_ERROR; >>> } >>> >>> - qs->final_size = f->final_size; >>> + qs->recv_final_size = f->final_size; >>> >>> - if (ngx_quic_update_flow(sc, qs->final_size) != NGX_OK) { >>> + if (ngx_quic_update_flow(qs, qs->recv_final_size) != NGX_OK) { >>> return NGX_ERROR; >>> } >>> >>> + if (qs->connection == NULL) { >>> + return ngx_quic_close_stream(qs); >>> + } >>> + >>> ngx_quic_set_event(qs->connection->read); >>> >>> return NGX_OK; >>> @@ -1325,10 +1388,14 @@ ngx_quic_handle_stop_sending_frame(ngx_c >>> return NGX_OK; >>> } >>> >>> - if (ngx_quic_reset_stream(qs->connection, f->error_code) != NGX_OK) { >>> + if (ngx_quic_do_reset_stream(qs, f->error_code) != NGX_OK) { >>> return NGX_ERROR; >>> } >>> >>> + if (qs->connection == NULL) { >>> + return ngx_quic_close_stream(qs); >>> + } >>> + >>> ngx_quic_set_event(qs->connection->write); >>> >>> return NGX_OK; >>> @@ -1378,30 +1445,37 @@ ngx_quic_handle_stream_ack(ngx_connectio >>> return; >>> } >>> >>> + if (qs->connection == NULL) { >>> + qs->acked += f->u.stream.length; >>> + return; >>> + } >>> + >>> sent = qs->connection->sent; >>> unacked = sent - qs->acked; >>> + qs->acked += f->u.stream.length; >>> >>> - if (unacked >= qc->conf->stream_buffer_size) { >>> - ngx_quic_set_event(qs->connection->write); >>> + ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> + "quic stream id:0x%xL ack len:%uL acked:%uL unacked:%uL", >>> + qs->id, f->u.stream.length, qs->acked, sent - qs->acked); >>> + >>> + if (unacked != qc->conf->stream_buffer_size) { >>> + /* not blocked on buffer size */ >>> + return; >>> } >>> >>> - qs->acked += f->u.stream.length; >>> - >>> - ngx_log_debug3(NGX_LOG_DEBUG_EVENT, qs->connection->log, 0, >>> - "quic stream ack len:%uL acked:%uL unacked:%uL", >>> - f->u.stream.length, qs->acked, sent - qs->acked); >>> + ngx_quic_set_event(qs->connection->write); >>> } >>> >>> >>> static ngx_int_t >>> -ngx_quic_control_flow(ngx_connection_t *c, uint64_t last) >>> +ngx_quic_control_flow(ngx_quic_stream_t *qs, uint64_t last) >>> { >>> uint64_t len; >>> - ngx_quic_stream_t *qs; >>> + ngx_connection_t *pc; >>> ngx_quic_connection_t *qc; >>> >>> - qs = c->quic; >>> - qc = ngx_quic_get_connection(qs->parent); >>> + pc = qs->parent; >>> + qc = ngx_quic_get_connection(pc); >>> >>> if (last <= qs->recv_last) { >>> return NGX_OK; >>> @@ -1409,9 +1483,9 @@ ngx_quic_control_flow(ngx_connection_t * >>> >>> len = last - qs->recv_last; >>> >>> - ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> - "quic flow control msd:%uL/%uL md:%uL/%uL", >>> - last, qs->recv_max_data, qc->streams.recv_last + len, >>> + ngx_log_debug5(NGX_LOG_DEBUG_EVENT, pc->log, 0, >>> + "quic stream id:0x%xL flow control msd:%uL/%uL md:%uL/%uL", >>> + qs->id, last, qs->recv_max_data, qc->streams.recv_last + len, >>> qc->streams.recv_max_data); >>> >>> qs->recv_last += len; >>> @@ -1435,14 +1509,12 @@ ngx_quic_control_flow(ngx_connection_t * >>> >>> >>> static ngx_int_t >>> -ngx_quic_update_flow(ngx_connection_t *c, uint64_t last) >>> +ngx_quic_update_flow(ngx_quic_stream_t *qs, uint64_t last) >>> { >>> uint64_t len; >>> ngx_connection_t *pc; >>> - ngx_quic_stream_t *qs; >>> ngx_quic_connection_t *qc; >>> >>> - qs = c->quic; >>> pc = qs->parent; >>> qc = ngx_quic_get_connection(pc); >>> >>> @@ -1452,13 +1524,13 @@ ngx_quic_update_flow(ngx_connection_t *c >>> >>> len = last - qs->recv_offset; >>> >>> - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> - "quic flow update %uL", last); >>> + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, >>> + "quic stream id:0x%xL flow update %uL", qs->id, last); >>> >>> qs->recv_offset += len; >>> >>> if (qs->recv_max_data <= qs->recv_offset + qs->recv_window / 2) { >>> - if (ngx_quic_update_max_stream_data(c) != NGX_OK) { >>> + if (ngx_quic_update_max_stream_data(qs) != NGX_OK) { >>> return NGX_ERROR; >>> } >>> } >>> @@ -1478,15 +1550,13 @@ ngx_quic_update_flow(ngx_connection_t *c >>> >>> >>> static ngx_int_t >>> -ngx_quic_update_max_stream_data(ngx_connection_t *c) >>> +ngx_quic_update_max_stream_data(ngx_quic_stream_t *qs) >>> { >>> uint64_t recv_max_data; >>> ngx_connection_t *pc; >>> ngx_quic_frame_t *frame; >>> - ngx_quic_stream_t *qs; >>> ngx_quic_connection_t *qc; >>> >>> - qs = c->quic; >>> pc = qs->parent; >>> qc = ngx_quic_get_connection(pc); >>> >>> @@ -1502,8 +1572,9 @@ ngx_quic_update_max_stream_data(ngx_conn >>> >>> qs->recv_max_data = recv_max_data; >>> >>> - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> - "quic flow update msd:%uL", qs->recv_max_data); >>> + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, >>> + "quic stream id:0x%xL flow update msd:%uL", >>> + qs->id, qs->recv_max_data); >>> >>> frame = ngx_quic_alloc_frame(pc); >>> if (frame == NULL) { >>> diff --git a/src/http/v3/ngx_http_v3_uni.c b/src/http/v3/ngx_http_v3_uni.c >>> --- a/src/http/v3/ngx_http_v3_uni.c >>> +++ b/src/http/v3/ngx_http_v3_uni.c >>> @@ -295,8 +295,6 @@ ngx_http_v3_uni_dummy_write_handler(ngx_ >>> } >>> >>> >>> -/* XXX async & buffered stream writes */ >>> - >>> ngx_connection_t * >>> ngx_http_v3_create_push_stream(ngx_connection_t *c, uint64_t push_id) >>> { >> >>> _______________________________________________ >>> nginx-devel mailing list -- nginx-devel at nginx.org >>> To unsubscribe send an email to nginx-devel-leave at nginx.org >> >> _______________________________________________ >> nginx-devel mailing list -- nginx-devel at nginx.org >> To unsubscribe send an email to nginx-devel-leave at nginx.org > From vl at nginx.com Tue Feb 8 12:42:54 2022 From: vl at nginx.com (Vladimir Homutov) Date: Tue, 8 Feb 2022 15:42:54 +0300 Subject: [QUIC] padding of Initial packets In-Reply-To: References: Message-ID: On Tue, Feb 08, 2022 at 02:10:04PM +0300, Andrey Kolyshkin wrote: > Hello. > > This patch is strange. > 1. ngx_quic_revert_send can set to ctx an uninitialized value from > preserved_pnum. (example if min > len and i = 0, only 0 element is filled > in preserved_pnum but restored all) > 2. ngx_quic_revert_send will restored pnum for ctx that have already called > ngx_quic_output_packet and the packet with this pnum will be queued. > (example if min > len and i = 1) thank you for noticing. indeed, this needs to be fixed. we don't want to restore contexts we didn't yet touch. > > > On Wed, Feb 2, 2022 at 2:07 PM Sergey Kandaurov wrote: > > > > > > On 2 Feb 2022, at 13:55, Vladimir Homutov wrote: > > > > > > # HG changeset patch > > > # User Vladimir Homutov > > > # Date 1643796973 -10800 > > > # Wed Feb 02 13:16:13 2022 +0300 > > > # Branch quic > > > # Node ID fbfbcf66990e8964bcf308f3869f37d1a1acceeb > > > # Parent 8c6645ecaeb6cbf27976fd9035440bfcab943117 > > > QUIC: fixed padding of initial packets in case of limited path. > > > > > > Previously, non-padded initial packet could be sent as a result of the > > > following situation: > > > > > > - initial queue is not empty (so padding to 1200 is required) > > > - handhsake queue is not empty (so padding is to be added after h/s > > packet) > > > > handshake > > > > > - path is limited > > > > > > If serializing handshake packet would violate path limit, such packet was > > > omitted, and the non-padded initial packet was sent. > > > > > > The fix is to avoid sending the packet at all in such case. This > > follows the > > > original intention introduced in c5155a0cb12f. > > > > > > diff --git a/src/event/quic/ngx_event_quic_output.c > > b/src/event/quic/ngx_event_quic_output.c > > > --- a/src/event/quic/ngx_event_quic_output.c > > > +++ b/src/event/quic/ngx_event_quic_output.c > > > @@ -158,7 +158,14 @@ ngx_quic_create_datagrams(ngx_connection > > > ? NGX_QUIC_MIN_INITIAL_SIZE - (p - dst) : 0; > > > > > > if (min > len) { > > > - continue; > > > + /* padding can't be applied - avoid sending the packet > > */ > > > + > > > + for (i = 0; i < NGX_QUIC_SEND_CTX_LAST; i++) { > > > + ctx = &qc->send_ctx[i]; > > > + ngx_quic_revert_send(c, ctx, preserved_pnum[i]); > > > > this could be simplified to reduce ctx variable: > > ngx_quic_revert_send(c, &qc->send_ctx[i], preserved_pnum[i]); > > > > but it won't fit into 80 line, so that's good just as well > > > > > + } > > > + > > > + return NGX_OK; > > > } > > > > > > n = ngx_quic_output_packet(c, ctx, p, len, min); > > > > > > > -- > > Sergey Kandaurov > > > > _______________________________________________ > > nginx-devel mailing list -- nginx-devel at nginx.org > > To unsubscribe send an email to nginx-devel-leave at nginx.org > > > > > -- > Best regards, Andrey > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org From aep at exys.org Tue Feb 8 12:48:54 2022 From: aep at exys.org (Arvid Picciani) Date: Tue, 8 Feb 2022 13:48:54 +0100 Subject: rfc: ssl_client_pubkey variable Message-ID: Hi, we would like to authenticate clients by their pubkey instead of using a common trust root. Would a patch be acceptable that adds the ssl_client_pubkey to the http_ssl module? i've never touched nginx code, but as far as i understand all it needs is another function in ngx_event_openssl.c that uses X509_get_pubkey and encodes that as hex. /b/ Arvid From gaoyan09 at baidu.com Wed Feb 9 06:51:04 2022 From: gaoyan09 at baidu.com (=?utf-8?B?R2FvLFlhbijlqpLkvZPkupEp?=) Date: Wed, 9 Feb 2022 06:51:04 +0000 Subject: Why use sendmsg in loop instead of sendmmsg Message-ID: <5DBA0522-6E17-4272-AA83-986D787D9AAC@baidu.com> HI ngx_quic_create_datagrams use sendmsg in loop when without gso. Can use sendmmsg directly? Gao,Yan(ACG VCP) -------------- next part -------------- An HTML attachment was scrubbed... URL: From gaoyan09 at baidu.com Wed Feb 9 06:51:26 2022 From: gaoyan09 at baidu.com (=?utf-8?B?R2FvLFlhbijlqpLkvZPkupEp?=) Date: Wed, 9 Feb 2022 06:51:26 +0000 Subject: [quic] Why use sendmsg in loop instead of sendmmsg Message-ID: HI ngx_quic_create_datagrams use sendmsg in loop when without gso. Can use sendmmsg directly? Gao,Yan(ACG VCP) -------------- next part -------------- An HTML attachment was scrubbed... URL: From vl at nginx.com Wed Feb 9 11:36:31 2022 From: vl at nginx.com (Vladimir Homutov) Date: Wed, 9 Feb 2022 14:36:31 +0300 Subject: [quic] Why use sendmsg in loop instead of sendmmsg In-Reply-To: References: Message-ID: On Wed, Feb 09, 2022 at 06:51:26AM +0000, Gao,Yan(媒体云) wrote: > HI > ngx_quic_create_datagrams use sendmsg in loop when without gso. Can use sendmmsg directly? there are some reasons we don't do it: first, attempt to send multiple packets at once makes the code more complex, especially when you have to deal with multiple encryption levels. Typically, this is an initial stage of the connection (i.e. handshake) and you won't get much performance boost from sending multiple packets at once. That's why we switch to GSO only for application-level packets. second, sendmmsg() (while being useful) still doesn't provide breakthrough performance gain. Probably, it would be beneficial to have sendmmsg() support as well, but currently this is not a top priority. From vl at nginx.com Wed Feb 9 13:34:41 2022 From: vl at nginx.com (Vladimir Homutov) Date: Wed, 9 Feb 2022 16:34:41 +0300 Subject: [QUIC] padding of Initial packets In-Reply-To: References: Message-ID: On Tue, Feb 08, 2022 at 03:42:54PM +0300, Vladimir Homutov wrote: > On Tue, Feb 08, 2022 at 02:10:04PM +0300, Andrey Kolyshkin wrote: > > Hello. > > > > This patch is strange. > > 1. ngx_quic_revert_send can set to ctx an uninitialized value from > > preserved_pnum. (example if min > len and i = 0, only 0 element is filled > > in preserved_pnum but restored all) > > 2. ngx_quic_revert_send will restored pnum for ctx that have already called > > ngx_quic_output_packet and the packet with this pnum will be queued. > > (example if min > len and i = 1) > > thank you for noticing. > indeed, this needs to be fixed. we don't want to restore contexts we > didn't yet touch. The suggested fix is below. Also, while investigating the issue thoroughly, we found that it is also possible to run into negative ctx->inflight when discarding context. This is addressed by a second patch. # HG changeset patch # User Vladimir Homutov # Date 1644411201 -10800 # Wed Feb 09 15:53:21 2022 +0300 # Branch quic # Node ID a4fb28741e19af426228e64b8d2c02ed3950b538 # Parent dde5cb0205ef8c2a2a3255e7bd369a9c644f2049 QUIC: fixed output context restoring. The cd8018bc81a5 fixed unintended send of non-padded initial packets, but failed to restore context properly: only processed contexts need to be restored. As a consequence, a packet number could be restored from uninitialized value. diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c --- a/src/event/quic/ngx_event_quic_output.c +++ b/src/event/quic/ngx_event_quic_output.c @@ -165,7 +165,7 @@ ngx_quic_create_datagrams(ngx_connection if (min > len) { /* padding can't be applied - avoid sending the packet */ - for (i = 0; i < NGX_QUIC_SEND_CTX_LAST; i++) { + while (i-- > 0) { ctx = &qc->send_ctx[i]; ngx_quic_revert_send(c, ctx, preserved_pnum[i]); } # HG changeset patch # User Vladimir Homutov # Date 1644411102 -10800 # Wed Feb 09 15:51:42 2022 +0300 # Branch quic # Node ID 2e27c45e2edb2c9540b211040d314b1748865820 # Parent a4fb28741e19af426228e64b8d2c02ed3950b538 QUIC: fixed in-flight bytes accounting. Initially, frames are genereated and stored in ctx->frames. Next, ngx_quic_output() collects frames to be sent in in ctx->sending. On failure, ngx_quic_revert_sned() returns frames into ctx->frames. On success, the ngx_quic_commit_send() moves ack-eliciting frames into ctx->sent and frees non-ack-eliciting frames. This function also updates in-flight bytes counter, so only actually sent frames are accounted. The counter is decremented in the following cases: - acknowledgment is received - packet was declared lost - we are discarding context completely In each of this cases frame is removed from ctx->sent queue and in-flight counter is accordingly decremented. The patch fixes the case of discarding context - only removing frames from ctx->sent must be followed by in-flight bytes counter decrement, otherwise cg->in_flight could experience type underflow. The issue appeared in b1676cd64dc9. diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -1092,7 +1092,6 @@ ngx_quic_discard_ctx(ngx_connection_t *c ngx_queue_remove(q); f = ngx_queue_data(q, ngx_quic_frame_t, queue); - ngx_quic_congestion_ack(c, f); ngx_quic_free_frame(c, f); } From pluknet at nginx.com Wed Feb 9 13:58:27 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 09 Feb 2022 13:58:27 +0000 Subject: [nginx] SSL: logging level of "application data after close notify". Message-ID: details: https://hg.nginx.org/nginx/rev/a736a7a613ea branches: changeset: 8009:a736a7a613ea user: Sergey Kandaurov date: Tue Feb 08 17:35:27 2022 +0300 description: SSL: logging level of "application data after close notify". Such fatal errors are reported by OpenSSL 1.1.1, and similarly by BoringSSL, if application data is encountered during SSL shutdown, which started to be observed on the second SSL_shutdown() call after SSL shutdown fixes made in 09fb2135a589 (1.19.2). The error means that the client continues to send application data after receiving the "close_notify" alert (ticket #2318). Previously it was reported as SSL_shutdown() error of SSL_ERROR_SYSCALL. diffstat: src/event/ngx_event_openssl.c | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diffs (16 lines): diff -r 1add55d23652 -r a736a7a613ea src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Fri Feb 04 13:29:31 2022 +0300 +++ b/src/event/ngx_event_openssl.c Tue Feb 08 17:35:27 2022 +0300 @@ -3385,6 +3385,12 @@ ngx_ssl_connection_error(ngx_connection_ #endif || n == SSL_R_WRONG_VERSION_NUMBER /* 267 */ || n == SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC /* 281 */ +#ifdef SSL_R_APPLICATION_DATA_AFTER_CLOSE_NOTIFY + || n == SSL_R_APPLICATION_DATA_AFTER_CLOSE_NOTIFY /* 291 */ +#endif +#ifdef SSL_R_APPLICATION_DATA_ON_SHUTDOWN + || n == SSL_R_APPLICATION_DATA_ON_SHUTDOWN /* 291 */ +#endif #ifdef SSL_R_RENEGOTIATE_EXT_TOO_LONG || n == SSL_R_RENEGOTIATE_EXT_TOO_LONG /* 335 */ || n == SSL_R_RENEGOTIATION_ENCODING_ERR /* 336 */ From xeioex at nginx.com Mon Feb 14 14:28:27 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 14 Feb 2022 14:28:27 +0000 Subject: [njs] Fixed backtraces while traversing imported user modules. Message-ID: details: https://hg.nginx.org/njs/rev/bede4b8a693a branches: changeset: 1823:bede4b8a693a user: Dmitry Volyntsev date: Mon Feb 14 14:10:04 2022 +0000 description: Fixed backtraces while traversing imported user modules. Previously, njs_builtin_match_native_function(), which is used to build a backtrace for an exception, assumed that user modules always return object values, which is not the case. As a result, njs_object_traverse() may receive incorrect pointer. This fix is to only traverse object values. diffstat: src/njs_builtin.c | 12 +++++++----- test/js/import_native_module_exception.t.js | 12 ++++++++++++ 2 files changed, 19 insertions(+), 5 deletions(-) diffs (40 lines): diff -r 4d38ea471228 -r bede4b8a693a src/njs_builtin.c --- a/src/njs_builtin.c Thu Jan 27 13:01:55 2022 +0000 +++ b/src/njs_builtin.c Mon Feb 14 14:10:04 2022 +0000 @@ -761,13 +761,15 @@ njs_builtin_match_native_function(njs_vm break; } - ctx.match = module->name; + if (njs_is_object(&module->value)) { + ctx.match = module->name; - ret = njs_object_traverse(vm, njs_object(&module->value), &ctx, - njs_builtin_traverse); + ret = njs_object_traverse(vm, njs_object(&module->value), &ctx, + njs_builtin_traverse); - if (ret == NJS_DONE) { - goto found; + if (ret == NJS_DONE) { + goto found; + } } } diff -r 4d38ea471228 -r bede4b8a693a test/js/import_native_module_exception.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_native_module_exception.t.js Mon Feb 14 14:10:04 2022 +0000 @@ -0,0 +1,12 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module, test/js/module/libs] +negative: + phase: runtime +---*/ + +import fs from 'fs'; +import lib from 'lib3.js'; + +fs.readFileSync({}.a.a); From xeioex at nginx.com Mon Feb 14 14:28:29 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 14 Feb 2022 14:28:29 +0000 Subject: [njs] Fixed backtraces for native modules imported with import statement. Message-ID: details: https://hg.nginx.org/njs/rev/040d1b318a15 branches: changeset: 1824:040d1b318a15 user: Dmitry Volyntsev date: Mon Feb 14 14:10:26 2022 +0000 description: Fixed backtraces for native modules imported with import statement. Previously, the module name was missing when exception is reported for a native module function imported with import statement. diffstat: external/njs_crypto_module.c | 8 ++++++++ external/njs_fs_module.c | 8 ++++++++ external/njs_query_string_module.c | 8 ++++++++ src/test/njs_unit_test.c | 5 +++++ 4 files changed, 29 insertions(+), 0 deletions(-) diffs (69 lines): diff -r bede4b8a693a -r 040d1b318a15 external/njs_crypto_module.c --- a/external/njs_crypto_module.c Mon Feb 14 14:10:04 2022 +0000 +++ b/external/njs_crypto_module.c Mon Feb 14 14:10:26 2022 +0000 @@ -231,6 +231,14 @@ static njs_external_t njs_ext_crypto_hm static njs_external_t njs_ext_crypto_crypto[] = { { + .flags = NJS_EXTERN_PROPERTY | NJS_EXTERN_SYMBOL, + .name.symbol = NJS_SYMBOL_TO_STRING_TAG, + .u.property = { + .value = "crypto", + } + }, + + { .flags = NJS_EXTERN_METHOD, .name.string = njs_str("createHash"), .writable = 1, diff -r bede4b8a693a -r 040d1b318a15 external/njs_fs_module.c --- a/external/njs_fs_module.c Mon Feb 14 14:10:04 2022 +0000 +++ b/external/njs_fs_module.c Mon Feb 14 14:10:26 2022 +0000 @@ -228,6 +228,14 @@ static njs_fs_entry_t njs_flags_table[] static njs_external_t njs_ext_fs[] = { { + .flags = NJS_EXTERN_PROPERTY | NJS_EXTERN_SYMBOL, + .name.symbol = NJS_SYMBOL_TO_STRING_TAG, + .u.property = { + .value = "fs", + } + }, + + { .flags = NJS_EXTERN_METHOD, .name.string = njs_str("access"), .writable = 1, diff -r bede4b8a693a -r 040d1b318a15 external/njs_query_string_module.c --- a/external/njs_query_string_module.c Mon Feb 14 14:10:04 2022 +0000 +++ b/external/njs_query_string_module.c Mon Feb 14 14:10:26 2022 +0000 @@ -33,6 +33,14 @@ static njs_int_t njs_query_string_init(n static njs_external_t njs_ext_query_string[] = { { + .flags = NJS_EXTERN_PROPERTY | NJS_EXTERN_SYMBOL, + .name.symbol = NJS_SYMBOL_TO_STRING_TAG, + .u.property = { + .value = "querystring", + } + }, + + { .flags = NJS_EXTERN_METHOD, .name.string = njs_str("parse"), .writable = 1, diff -r bede4b8a693a -r 040d1b318a15 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Mon Feb 14 14:10:04 2022 +0000 +++ b/src/test/njs_unit_test.c Mon Feb 14 14:10:26 2022 +0000 @@ -21243,6 +21243,11 @@ static njs_unit_test_t njs_shared_test[ " at fs.readFileSync (native)\n" " at main (:1)\n") }, + { njs_str("import fs from 'fs'; fs.readFileSync()"), + njs_str("TypeError: \"path\" must be a string or Buffer\n" + " at fs.readFileSync (native)\n" + " at main (:1)\n") }, + { njs_str("var f = new Function('return 1;'); f();"), njs_str("1") }, From xeioex at nginx.com Mon Feb 14 14:28:31 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 14 Feb 2022 14:28:31 +0000 Subject: [njs] Fixed Function constructor in CLI. Message-ID: details: https://hg.nginx.org/njs/rev/4e045c33a32e branches: changeset: 1825:4e045c33a32e user: Dmitry Volyntsev date: Mon Feb 14 14:10:47 2022 +0000 description: Fixed Function constructor in CLI. Previously, Function constructor exported its local variables to vm->variables_hash. vm->variables_hash is used in njs CLI to query global variables during console input completion. The exporting is incorrect because it pollutes the global scope. diffstat: src/njs_function.c | 11 ++--------- src/njs_variable.c | 29 ----------------------------- src/njs_variable.h | 2 -- 3 files changed, 2 insertions(+), 40 deletions(-) diffs (85 lines): diff -r 040d1b318a15 -r 4e045c33a32e src/njs_function.c --- a/src/njs_function.c Mon Feb 14 14:10:26 2022 +0000 +++ b/src/njs_function.c Mon Feb 14 14:10:47 2022 +0000 @@ -1109,7 +1109,6 @@ njs_function_constructor(njs_vm_t *vm, n njs_function_t *function; njs_generator_t generator; njs_parser_node_t *node; - njs_parser_scope_t *scope; njs_function_lambda_t *lambda; const njs_token_type_t *type; @@ -1215,20 +1214,14 @@ njs_function_constructor(njs_vm_t *vm, n } } - scope = parser.scope; - - ret = njs_variables_copy(vm, &scope->variables, vm->variables_hash); - if (njs_slow_path(ret != NJS_OK)) { - return ret; - } - ret = njs_generator_init(&generator, 0, 1); if (njs_slow_path(ret != NJS_OK)) { njs_internal_error(vm, "njs_generator_init() failed"); return NJS_ERROR; } - code = njs_generate_scope(vm, &generator, scope, &njs_entry_anonymous); + code = njs_generate_scope(vm, &generator, parser.scope, + &njs_entry_anonymous); if (njs_slow_path(code == NULL)) { if (!njs_is_error(&vm->retval)) { njs_internal_error(vm, "njs_generate_scope() failed"); diff -r 040d1b318a15 -r 4e045c33a32e src/njs_variable.c --- a/src/njs_variable.c Mon Feb 14 14:10:26 2022 +0000 +++ b/src/njs_variable.c Mon Feb 14 14:10:47 2022 +0000 @@ -106,35 +106,6 @@ njs_variable_scope_function_add(njs_pars } - -njs_int_t -njs_variables_copy(njs_vm_t *vm, njs_rbtree_t *variables, - njs_rbtree_t *prev_variables) -{ - njs_rbtree_node_t *node; - njs_variable_node_t *var_node; - - node = njs_rbtree_min(prev_variables); - - while (njs_rbtree_is_there_successor(prev_variables, node)) { - var_node = (njs_variable_node_t *) node; - - var_node = njs_variable_node_alloc(vm, var_node->variable, - var_node->key); - if (njs_slow_path(var_node == NULL)) { - njs_memory_error(vm); - return NJS_ERROR; - } - - njs_rbtree_insert(variables, &var_node->node); - - node = njs_rbtree_node_successor(prev_variables, node); - } - - return NJS_OK; -} - - static njs_parser_scope_t * njs_variable_scope(njs_parser_scope_t *scope, uintptr_t unique_id, njs_variable_t **retvar, njs_variable_type_t type) diff -r 040d1b318a15 -r 4e045c33a32e src/njs_variable.h --- a/src/njs_variable.h Mon Feb 14 14:10:26 2022 +0000 +++ b/src/njs_variable.h Mon Feb 14 14:10:47 2022 +0000 @@ -63,8 +63,6 @@ njs_variable_t *njs_variable_add(njs_par njs_parser_scope_t *scope, uintptr_t unique_id, njs_variable_type_t type); njs_variable_t *njs_variable_function_add(njs_parser_t *parser, njs_parser_scope_t *scope, uintptr_t unique_id, njs_variable_type_t type); -njs_int_t njs_variables_copy(njs_vm_t *vm, njs_rbtree_t *variables, - njs_rbtree_t *prev_variables); njs_variable_t * njs_label_add(njs_vm_t *vm, njs_parser_scope_t *scope, uintptr_t unique_id); njs_variable_t *njs_label_find(njs_vm_t *vm, njs_parser_scope_t *scope, From xeioex at nginx.com Mon Feb 14 14:28:33 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 14 Feb 2022 14:28:33 +0000 Subject: [njs] Introduced njs_parser_init(). Message-ID: details: https://hg.nginx.org/njs/rev/26fd49ea3f72 branches: changeset: 1826:26fd49ea3f72 user: Dmitry Volyntsev date: Mon Feb 14 14:10:59 2022 +0000 description: Introduced njs_parser_init(). diffstat: src/njs_function.c | 9 ++------- src/njs_parser.c | 27 +++++++++++++++++++++++++++ src/njs_parser.h | 4 ++++ src/njs_vm.c | 14 ++++---------- 4 files changed, 37 insertions(+), 17 deletions(-) diffs (131 lines): diff -r 4e045c33a32e -r 26fd49ea3f72 src/njs_function.c --- a/src/njs_function.c Mon Feb 14 14:10:47 2022 +0000 +++ b/src/njs_function.c Mon Feb 14 14:10:59 2022 +0000 @@ -1103,7 +1103,6 @@ njs_function_constructor(njs_vm_t *vm, n njs_int_t ret; njs_str_t str, file; njs_uint_t i; - njs_lexer_t lexer; njs_parser_t parser; njs_vm_code_t *code; njs_function_t *function; @@ -1171,16 +1170,12 @@ njs_function_constructor(njs_vm_t *vm, n file = njs_str_value("runtime"); - ret = njs_lexer_init(vm, &lexer, &file, str.start, str.start + str.length, - 1); + ret = njs_parser_init(vm, &parser, NULL, &file, str.start, + str.start + str.length, 1); if (njs_slow_path(ret != NJS_OK)) { return ret; } - njs_memzero(&parser, sizeof(njs_parser_t)); - - parser.lexer = &lexer; - ret = njs_parser(vm, &parser); if (njs_slow_path(ret != NJS_OK)) { return ret; diff -r 4e045c33a32e -r 26fd49ea3f72 src/njs_parser.c --- a/src/njs_parser.c Mon Feb 14 14:10:47 2022 +0000 +++ b/src/njs_parser.c Mon Feb 14 14:10:59 2022 +0000 @@ -519,6 +519,33 @@ njs_parser_reject(njs_parser_t *parser) njs_int_t +njs_parser_init(njs_vm_t *vm, njs_parser_t *parser, njs_parser_scope_t *scope, + njs_str_t *file, u_char *start, u_char *end, njs_uint_t runtime) +{ + njs_lexer_t *lexer; + + njs_memzero(parser, sizeof(njs_parser_t)); + + parser->scope = scope; + + lexer = &parser->lexer0; + parser->lexer = lexer; + + lexer->file = *file; + lexer->start = start; + lexer->end = end; + lexer->line = 1; + lexer->keywords_hash = (runtime) ? &vm->keywords_hash + : &vm->shared->keywords_hash; + lexer->mem_pool = vm->mem_pool; + + njs_queue_init(&lexer->preread); + + return NJS_OK; +} + + +njs_int_t njs_parser(njs_vm_t *vm, njs_parser_t *parser) { njs_int_t ret; diff -r 4e045c33a32e -r 26fd49ea3f72 src/njs_parser.h --- a/src/njs_parser.h Mon Feb 14 14:10:47 2022 +0000 +++ b/src/njs_parser.h Mon Feb 14 14:10:59 2022 +0000 @@ -74,6 +74,7 @@ typedef njs_int_t (*njs_parser_state_fun struct njs_parser_s { njs_parser_state_func_t state; njs_queue_t stack; + njs_lexer_t lexer0; njs_lexer_t *lexer; njs_vm_t *vm; njs_parser_node_t *node; @@ -119,6 +120,9 @@ njs_int_t njs_parser_failed_state(njs_pa intptr_t njs_parser_scope_rbtree_compare(njs_rbtree_node_t *node1, njs_rbtree_node_t *node2); +njs_int_t njs_parser_init(njs_vm_t *vm, njs_parser_t *parser, + njs_parser_scope_t *scope, njs_str_t *file, u_char *start, u_char *end, + njs_uint_t runtime); njs_int_t njs_parser(njs_vm_t *vm, njs_parser_t *parser); njs_int_t njs_parser_module_lambda(njs_parser_t *parser, diff -r 4e045c33a32e -r 26fd49ea3f72 src/njs_vm.c --- a/src/njs_vm.c Mon Feb 14 14:10:47 2022 +0000 +++ b/src/njs_vm.c Mon Feb 14 14:10:59 2022 +0000 @@ -143,27 +143,21 @@ njs_vm_compile(njs_vm_t *vm, u_char **st njs_str_t ast; njs_chb_t chain; njs_value_t **global, **new; - njs_lexer_t lexer; njs_parser_t parser; njs_vm_code_t *code; njs_generator_t generator; njs_parser_scope_t *scope; - njs_memzero(&parser, sizeof(njs_parser_t)); - - parser.scope = vm->global_scope; - - if (parser.scope != NULL && vm->modules != NULL) { + if (vm->modules != NULL) { njs_module_reset(vm); } - ret = njs_lexer_init(vm, &lexer, &vm->options.file, *start, end, 0); + ret = njs_parser_init(vm, &parser, vm->global_scope, &vm->options.file, + *start, end, 0); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } - parser.lexer = &lexer; - ret = njs_parser(vm, &parser); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; @@ -186,7 +180,7 @@ njs_vm_compile(njs_vm_t *vm, u_char **st njs_mp_free(vm->mem_pool, ast.start); } - *start = lexer.start; + *start = parser.lexer->start; scope = parser.scope; ret = njs_generator_init(&generator, 0, 0); From ru at nginx.com Wed Feb 16 12:30:55 2022 From: ru at nginx.com (Ruslan Ermilov) Date: Wed, 16 Feb 2022 15:30:55 +0300 Subject: [PATCH] Add ipv4=off option in resolver like ipv6=off (ticket #1330) In-Reply-To: References: Message-ID: <20220216123055.GA18027@lo0.su> Hi Lukas, On Wed, Jan 19, 2022 at 07:47:44PM +0100, Lukas Lihotzki via nginx-devel wrote: > # HG changeset patch > # User Lukas Lihotzki > # Date 1642618053 -3600 > # Wed Jan 19 19:47:33 2022 +0100 > # Node ID e9f06dc2d6a4a1aa61c15009b84ceedcaf5983b2 > # Parent aeab41dfd2606dd36cabbf01f1472726e27e8aea > Add ipv4=off option in resolver like ipv6=off (ticket #1330). > > IPv6-only hosts (ticket #1330) and upstreams with IPv6 bind address > (ticket #1535) need to disable resolving to IPv4 addresses. > > Ticket #1330 mentions ipv4=off is the proper fix. There's a number of problems in your patch. Please try this one instead: # HG changeset patch # User Ruslan Ermilov # Date 1644873563 -10800 # Tue Feb 15 00:19:23 2022 +0300 # Node ID 5d2cb60a78dd32a10a0010ccff39974fd7605867 # Parent 1add55d236522616ce34ffaa4dc697a76d3d41a4 The "ipv4=" parameter of the "resolver" directive (ticket #2196). When set to "off", only IPv6 addresses will be resolved, and no A queries are ever sent. diff --git a/src/core/ngx_resolver.c b/src/core/ngx_resolver.c --- a/src/core/ngx_resolver.c +++ b/src/core/ngx_resolver.c @@ -157,6 +157,8 @@ ngx_resolver_create(ngx_conf_t *cf, ngx_ cln->handler = ngx_resolver_cleanup; cln->data = r; + r->ipv4 = 1; + ngx_rbtree_init(&r->name_rbtree, &r->name_sentinel, ngx_resolver_rbtree_insert_value); @@ -225,6 +227,23 @@ ngx_resolver_create(ngx_conf_t *cf, ngx_ } #if (NGX_HAVE_INET6) + if (ngx_strncmp(names[i].data, "ipv4=", 5) == 0) { + + if (ngx_strcmp(&names[i].data[5], "on") == 0) { + r->ipv4 = 1; + + } else if (ngx_strcmp(&names[i].data[5], "off") == 0) { + r->ipv4 = 0; + + } else { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid parameter: %V", &names[i]); + return NULL; + } + + continue; + } + if (ngx_strncmp(names[i].data, "ipv6=", 5) == 0) { if (ngx_strcmp(&names[i].data[5], "on") == 0) { @@ -273,6 +292,14 @@ ngx_resolver_create(ngx_conf_t *cf, ngx_ } } +#if (NGX_HAVE_INET6) + if (r->ipv4 + r->ipv6 == 0) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "\"ipv4\" and \"ipv6\" cannot both be \"off\""); + return NULL; + } +#endif + if (n && r->connections.nelts == 0) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "no name servers defined"); return NULL; @@ -836,7 +863,7 @@ ngx_resolve_name_locked(ngx_resolver_t * r->last_connection = 0; } - rn->naddrs = (u_short) -1; + rn->naddrs = r->ipv4 ? (u_short) -1 : 0; rn->tcp = 0; #if (NGX_HAVE_INET6) rn->naddrs6 = r->ipv6 ? (u_short) -1 : 0; @@ -1263,7 +1290,7 @@ ngx_resolver_send_query(ngx_resolver_t * rec->log.action = "resolving"; } - if (rn->naddrs == (u_short) -1) { + if (rn->query && rn->naddrs == (u_short) -1) { rc = rn->tcp ? ngx_resolver_send_tcp_query(r, rec, rn->query, rn->qlen) : ngx_resolver_send_udp_query(r, rec, rn->query, rn->qlen); @@ -1764,10 +1791,13 @@ ngx_resolver_process_response(ngx_resolv q = ngx_queue_next(q)) { rn = ngx_queue_data(q, ngx_resolver_node_t, queue); - qident = (rn->query[0] << 8) + rn->query[1]; - - if (qident == ident) { - goto dns_error_name; + + if (rn->query) { + qident = (rn->query[0] << 8) + rn->query[1]; + + if (qident == ident) { + goto dns_error_name; + } } #if (NGX_HAVE_INET6) @@ -3644,7 +3674,7 @@ ngx_resolver_create_name_query(ngx_resol len = sizeof(ngx_resolver_hdr_t) + nlen + sizeof(ngx_resolver_qs_t); #if (NGX_HAVE_INET6) - p = ngx_resolver_alloc(r, r->ipv6 ? len * 2 : len); + p = ngx_resolver_alloc(r, len * (r->ipv4 + r->ipv6)); #else p = ngx_resolver_alloc(r, len); #endif @@ -3653,23 +3683,28 @@ ngx_resolver_create_name_query(ngx_resol } rn->qlen = (u_short) len; - rn->query = p; + + if (r->ipv4) { + rn->query = p; + } #if (NGX_HAVE_INET6) if (r->ipv6) { - rn->query6 = p + len; + rn->query6 = r->ipv4 ? (p + len) : p; } #endif query = (ngx_resolver_hdr_t *) p; - ident = ngx_random(); - - ngx_log_debug2(NGX_LOG_DEBUG_CORE, r->log, 0, - "resolve: \"%V\" A %i", name, ident & 0xffff); - - query->ident_hi = (u_char) ((ident >> 8) & 0xff); - query->ident_lo = (u_char) (ident & 0xff); + if (r->ipv4) { + ident = ngx_random(); + + ngx_log_debug2(NGX_LOG_DEBUG_CORE, r->log, 0, + "resolve: \"%V\" A %i", name, ident & 0xffff); + + query->ident_hi = (u_char) ((ident >> 8) & 0xff); + query->ident_lo = (u_char) (ident & 0xff); + } /* recursion query */ query->flags_hi = 1; query->flags_lo = 0; @@ -3730,7 +3765,9 @@ ngx_resolver_create_name_query(ngx_resol p = rn->query6; - ngx_memcpy(p, rn->query, rn->qlen); + if (r->ipv4) { + ngx_memcpy(p, rn->query, rn->qlen); + } query = (ngx_resolver_hdr_t *) p; diff --git a/src/core/ngx_resolver.h b/src/core/ngx_resolver.h --- a/src/core/ngx_resolver.h +++ b/src/core/ngx_resolver.h @@ -175,8 +175,10 @@ struct ngx_resolver_s { ngx_queue_t srv_expire_queue; ngx_queue_t addr_expire_queue; + unsigned ipv4:1; + #if (NGX_HAVE_INET6) - ngx_uint_t ipv6; /* unsigned ipv6:1; */ + unsigned ipv6:1; ngx_rbtree_t addr6_rbtree; ngx_rbtree_node_t addr6_sentinel; ngx_queue_t addr6_resend_queue; From xeioex at nginx.com Wed Feb 16 15:25:11 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Wed, 16 Feb 2022 15:25:11 +0000 Subject: [njs] Tests: splitting large import tests into several simple ones. Message-ID: details: https://hg.nginx.org/njs/rev/7a08ed3e9cb8 branches: changeset: 1827:7a08ed3e9cb8 user: Dmitry Volyntsev date: Tue Feb 15 13:17:52 2022 +0000 description: Tests: splitting large import tests into several simple ones. diffstat: test/js/import_chain.t.js | 13 ++++++++++++ test/js/import_comma_expression.t.js | 9 ++++++++ test/js/import_empty.t.js | 9 ++++++++ test/js/import_export_comma_expression.t.js | 9 -------- test/js/import_export_empty.t.js | 9 -------- test/js/import_export_expression.t.js | 9 -------- test/js/import_export_multi_default.t.js | 9 -------- test/js/import_export_non_assignment.t.js | 9 -------- test/js/import_export_non_default.t.js | 9 -------- test/js/import_export_object.t.js | 9 -------- test/js/import_export_ref_exception.t.js | 9 -------- test/js/import_export_return.t.js | 9 -------- test/js/import_expression.t.js | 9 ++++++++ test/js/import_function_expression.t.js | 7 ++++++ test/js/import_multi_default.t.js | 9 ++++++++ test/js/import_non_assignment.t.js | 9 ++++++++ test/js/import_non_default.t.js | 9 ++++++++ test/js/import_normal.t.js | 31 ----------------------------- test/js/import_not_enough.t.js | 3 +- test/js/import_object.t.js | 9 ++++++++ test/js/import_ref_exception.t.js | 9 ++++++++ test/js/import_relative_path.t.js | 10 +++++++++ test/js/import_return.t.js | 9 ++++++++ test/js/import_scalar.t.js | 9 ++++++++ test/js/import_singleton.t.js | 16 ++++++++++++++ test/js/module/function_expression.js | 7 ++++++ test/js/module/lib1.js | 17 +-------------- test/js/module/lib3.js | 2 +- test/shell_test.exp | 2 +- 29 files changed, 147 insertions(+), 132 deletions(-) diffs (421 lines): diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_chain.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_chain.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,13 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module/, test/js/module/libs/] +---*/ + +import lib2 from 'lib2.js'; + +import crypto from 'crypto'; +var h = crypto.createHash('md5'); +var hash = h.update('AB').digest('hex'); + +assert.sameValue(lib2.hash(), hash); diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_comma_expression.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_comma_expression.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,9 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module] +---*/ + +import m from 'export_comma_expression.js'; + +assert.sameValue(m.prod(3,5), 15); diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_empty.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_empty.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,9 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module, test/js/module/libs] +negative: + phase: runtime +---*/ + +import m from 'empty.js'; diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_export_comma_expression.t.js --- a/test/js/import_export_comma_expression.t.js Mon Feb 14 14:10:59 2022 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,9 +0,0 @@ -/*--- -includes: [] -flags: [] -paths: [test/js/module] ----*/ - -import m from 'export_comma_expression.js'; - -assert.sameValue(m.prod(3,5), 15); diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_export_empty.t.js --- a/test/js/import_export_empty.t.js Mon Feb 14 14:10:59 2022 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,9 +0,0 @@ -/*--- -includes: [] -flags: [] -paths: [test/js/module, test/js/module/libs] -negative: - phase: runtime ----*/ - -import m from 'empty.js'; diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_export_expression.t.js --- a/test/js/import_export_expression.t.js Mon Feb 14 14:10:59 2022 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,9 +0,0 @@ -/*--- -includes: [] -flags: [] -paths: [test/js/module] ----*/ - -import m from 'export_expression.js'; - -assert.sameValue(m.sum(3,4), 7); diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_export_multi_default.t.js --- a/test/js/import_export_multi_default.t.js Mon Feb 14 14:10:59 2022 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,9 +0,0 @@ -/*--- -includes: [] -flags: [] -paths: [test/js/module, test/js/module/libs] -negative: - phase: runtime ----*/ - -import m from 'export.js'; diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_export_non_assignment.t.js --- a/test/js/import_export_non_assignment.t.js Mon Feb 14 14:10:59 2022 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,9 +0,0 @@ -/*--- -includes: [] -flags: [] -paths: [test/js/module, test/js/module/libs] -negative: - phase: runtime ----*/ - -import m from 'export_non_assignment.js'; diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_export_non_default.t.js --- a/test/js/import_export_non_default.t.js Mon Feb 14 14:10:59 2022 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,9 +0,0 @@ -/*--- -includes: [] -flags: [] -paths: [test/js/module, test/js/module/libs] -negative: - phase: runtime ----*/ - -import m from 'export_non_default.js'; diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_export_object.t.js --- a/test/js/import_export_object.t.js Mon Feb 14 14:10:59 2022 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,9 +0,0 @@ -/*--- -includes: [] -flags: [] -paths: [test/js/module] ----*/ - -import m from 'export_name.js'; - -assert.sameValue(m.prod(3,4), 12); diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_export_ref_exception.t.js --- a/test/js/import_export_ref_exception.t.js Mon Feb 14 14:10:59 2022 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,9 +0,0 @@ -/*--- -includes: [] -flags: [] -paths: [test/js/module, test/js/module/libs] -negative: - phase: runtime ----*/ - -import m from 'ref_exception.js'; diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_export_return.t.js --- a/test/js/import_export_return.t.js Mon Feb 14 14:10:59 2022 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,9 +0,0 @@ -/*--- -includes: [] -flags: [] -paths: [test/js/module, test/js/module/libs] -negative: - phase: runtime ----*/ - -import m from 'return.js' diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_expression.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_expression.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,9 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module] +---*/ + +import m from 'export_expression.js'; + +assert.sameValue(m.sum(3,4), 7); diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_function_expression.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_function_expression.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,7 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module/] +---*/ + +import _ from 'function_expression.js'; diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_multi_default.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_multi_default.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,9 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module, test/js/module/libs] +negative: + phase: runtime +---*/ + +import m from 'export.js'; diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_non_assignment.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_non_assignment.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,9 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module, test/js/module/libs] +negative: + phase: runtime +---*/ + +import m from 'export_non_assignment.js'; diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_non_default.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_non_default.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,9 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module, test/js/module/libs] +negative: + phase: runtime +---*/ + +import m from 'export_non_default.js'; diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_normal.t.js --- a/test/js/import_normal.t.js Mon Feb 14 14:10:59 2022 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,31 +0,0 @@ -/*--- -includes: [] -flags: [] -paths: [test/js/module/, test/js/module/libs/] ----*/ - -import name from 'name.js'; -import lib1 from 'lib1.js'; -import lib2 from 'lib2.js'; -import lib1_2 from 'lib1.js'; - -import crypto from 'crypto'; -var h = crypto.createHash('md5'); -var hash = h.update('AB').digest('hex'); - -assert.sameValue(name, "name"); - -assert.sameValue(lib1.name, "libs.name"); - -assert.sameValue(lib1.hash(), hash); -assert.sameValue(lib2.hash(), hash); - -assert.sameValue(lib1.get(), 0); - -assert.sameValue(lib1_2.get(), 0); - -lib1.inc(); - -assert.sameValue(lib1.get(), 1); - -assert.sameValue(lib1_2.get(), 1); diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_not_enough.t.js --- a/test/js/import_not_enough.t.js Mon Feb 14 14:10:59 2022 +0000 +++ b/test/js/import_not_enough.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -1,10 +1,9 @@ /*--- includes: [] flags: [] -paths: [test/js/module] +paths: [] negative: phase: runtime ---*/ import name from 'name.js'; -import lib1 from 'lib1.js'; diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_object.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_object.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,9 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module] +---*/ + +import m from 'export_name.js'; + +assert.sameValue(m.prod(3,4), 12); diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_ref_exception.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_ref_exception.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,9 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module, test/js/module/libs] +negative: + phase: runtime +---*/ + +import m from 'ref_exception.js'; diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_relative_path.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_relative_path.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,10 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module/] +---*/ + +import name from 'name.js'; +import hash from 'libs/hash.js'; + +assert.sameValue(hash.name, "libs.name"); diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_return.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_return.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,9 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module, test/js/module/libs] +negative: + phase: runtime +---*/ + +import m from 'return.js' diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_scalar.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_scalar.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,9 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module/] +---*/ + +import name from 'name.js'; + +assert.sameValue(name, "name"); diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/import_singleton.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/import_singleton.t.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,16 @@ +/*--- +includes: [] +flags: [] +paths: [test/js/module/, test/js/module/libs/] +---*/ + +import lib1 from 'lib1.js'; +import lib1_2 from 'lib1.js'; + +assert.sameValue(lib1.get(), 0); +assert.sameValue(lib1_2.get(), 0); + +lib1.inc(); + +assert.sameValue(lib1.get(), 1); +assert.sameValue(lib1_2.get(), 1); diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/module/function_expression.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/module/function_expression.js Tue Feb 15 13:17:52 2022 +0000 @@ -0,0 +1,7 @@ +var foo = (function(){ + return (function f() {}) +}); + +foo()({1:[]}) + +export default {foo}; diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/module/lib1.js --- a/test/js/module/lib1.js Mon Feb 14 14:10:59 2022 +0000 +++ b/test/js/module/lib1.js Tue Feb 15 13:17:52 2022 +0000 @@ -1,18 +1,3 @@ -var foo = (function(){ - return (function f() {}) -}); - -foo()({1:[]}) - -function hash() { - var h = crypto.createHash('md5'); - var v = h.update('AB').digest('hex'); - return v; -} - -import hashlib from 'hash.js'; -import crypto from 'crypto'; - var state = {count:0} function inc() { @@ -23,4 +8,4 @@ function get() { return state.count; } -export default {hash, inc, get, name: hashlib.name} +export default {inc, get} diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/js/module/lib3.js --- a/test/js/module/lib3.js Mon Feb 14 14:10:59 2022 +0000 +++ b/test/js/module/lib3.js Tue Feb 15 13:17:52 2022 +0000 @@ -6,6 +6,6 @@ function exception() { return sub.error(); } -import sub from './sub/sub1.js'; +import sub from 'sub/sub1.js'; export default {hash, exception}; diff -r 26fd49ea3f72 -r 7a08ed3e9cb8 test/shell_test.exp --- a/test/shell_test.exp Mon Feb 14 14:10:59 2022 +0000 +++ b/test/shell_test.exp Tue Feb 15 13:17:52 2022 +0000 @@ -548,7 +548,7 @@ njs_test { # quiet mode -njs_run {"-q" "test/js/import_normal.t.js"} \ +njs_run {"-q" "test/js/import_relative_path.t.js"} \ "SyntaxError: Cannot find module \"name.js\" in 7" # sandboxing From vl at nginx.com Mon Feb 21 11:10:30 2022 From: vl at nginx.com (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 21 Feb 2022 14:10:30 +0300 Subject: [PATCH 0 of 4] [QUIC] avoid pool allocations Message-ID: it is desirable to avoid pool allocations at early stages of quic connection processing. Currently, code in protection.c and tokens.c allocates memory dynamically, while this is not strictly necessary, as allocated objects have fixed size and sometimes short lifetime. The patchset revises this cases and removes pool usage. This patchset prepares base to more lightweight early packet processing (parsing, retry and rejection with error without creating connection object and memory allocations) From vl at nginx.com Mon Feb 21 11:10:31 2022 From: vl at nginx.com (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 21 Feb 2022 14:10:31 +0300 Subject: [PATCH 1 of 4] QUIC: fixed-length buffers for secrets In-Reply-To: References: Message-ID: <1a0a12bef7f00b5422d4.1645441831@vl.krasnogorsk.ru> Patch subject is complete summary. src/event/quic/ngx_event_quic_protection.c | 202 +++++++++++++++------------- 1 files changed, 105 insertions(+), 97 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-quic-1.patch Type: text/x-patch Size: 11683 bytes Desc: not available URL: From vl at nginx.com Mon Feb 21 11:10:32 2022 From: vl at nginx.com (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 21 Feb 2022 14:10:32 +0300 Subject: [PATCH 2 of 4] QUIC: avoided pool usage in ngx_quic_protection.c In-Reply-To: References: Message-ID: <950a45270e862b02f43e.1645441832@vl.krasnogorsk.ru> Patch subject is complete summary. src/event/quic/ngx_event_quic.c | 2 +- src/event/quic/ngx_event_quic_output.c | 2 +- src/event/quic/ngx_event_quic_protection.c | 37 ++++++++++++----------------- src/event/quic/ngx_event_quic_protection.h | 6 ++-- src/event/quic/ngx_event_quic_ssl.c | 8 +++--- 5 files changed, 24 insertions(+), 31 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-quic-2.patch Type: text/x-patch Size: 8294 bytes Desc: not available URL: From vl at nginx.com Mon Feb 21 11:10:33 2022 From: vl at nginx.com (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 21 Feb 2022 14:10:33 +0300 Subject: [PATCH 3 of 4] QUIC: removed ngx_quic_keys_new() In-Reply-To: References: Message-ID: The ngx_quic_keys_t structure is now exposed. This allows to use it in contexts where no pool/connection is available, i.e. early packet processing. src/event/quic/ngx_event_quic.c | 2 +- src/event/quic/ngx_event_quic_output.c | 8 ++-- src/event/quic/ngx_event_quic_protection.c | 53 ------------------------------ src/event/quic/ngx_event_quic_protection.h | 48 ++++++++++++++++++++++++++- 4 files changed, 52 insertions(+), 59 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-quic-3.patch Type: text/x-patch Size: 5613 bytes Desc: not available URL: From vl at nginx.com Mon Feb 21 11:10:34 2022 From: vl at nginx.com (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 21 Feb 2022 14:10:34 +0300 Subject: [PATCH 4 of 4] QUIC: avoided pool usage in token calculation In-Reply-To: References: Message-ID: Patch subject is complete summary. src/event/quic/ngx_event_quic_output.c | 11 +++++++++-- src/event/quic/ngx_event_quic_tokens.c | 22 ++++------------------ src/event/quic/ngx_event_quic_tokens.h | 14 +++++++++++++- src/event/quic/ngx_event_quic_transport.h | 1 + 4 files changed, 27 insertions(+), 21 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-quic-4.patch Type: text/x-patch Size: 5439 bytes Desc: not available URL: From pluknet at nginx.com Mon Feb 21 14:51:42 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 21 Feb 2022 17:51:42 +0300 Subject: [PATCH 1 of 4] QUIC: fixed-length buffers for secrets In-Reply-To: <1a0a12bef7f00b5422d4.1645441831@vl.krasnogorsk.ru> References: <1a0a12bef7f00b5422d4.1645441831@vl.krasnogorsk.ru> Message-ID: <20220221145142.pic3fkrhd6dkm5xa@MacBook-Air-Sergey.local> On Mon, Feb 21, 2022 at 02:10:31PM +0300, Vladimir Homutov wrote: > Patch subject is complete summary. > > > src/event/quic/ngx_event_quic_protection.c | 202 +++++++++++++++------------- > 1 files changed, 105 insertions(+), 97 deletions(-) > > > # HG changeset patch > # User Vladimir Homutov > # Date 1645440604 -10800 > # Mon Feb 21 13:50:04 2022 +0300 > # Branch quic > # Node ID 1a0a12bef7f00b5422d449b2d4642fff39e0a47e > # Parent 55b38514729b8f848709b31295e72d6886a7a433 > QUIC: fixed-length buffers for secrets. > > diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c > --- a/src/event/quic/ngx_event_quic_protection.c > +++ b/src/event/quic/ngx_event_quic_protection.c > @@ -17,6 +17,8 @@ > > #define NGX_QUIC_AES_128_KEY_LEN 16 > > +#define NGX_QUIC_KEY_LEN 32 > + > #define NGX_AES_128_GCM_SHA256 0x1301 > #define NGX_AES_256_GCM_SHA384 0x1302 > #define NGX_CHACHA20_POLY1305_SHA256 0x1303 > @@ -30,6 +32,27 @@ > > > typedef struct { > + size_t len; > + u_char data[SHA256_DIGEST_LENGTH]; > +} ngx_quic_okm_t; > + > +typedef struct { > + size_t len; > + u_char data[NGX_QUIC_KEY_LEN]; > +} ngx_quic_key_t; > + > +typedef struct { > + size_t len; > + u_char data[NGX_QUIC_KEY_LEN]; > +} ngx_quic_hp_t; > + > +typedef struct { > + size_t len; > + u_char data[NGX_QUIC_IV_LEN]; > +} ngx_quic_iv_t; Style: two empty lines between struct declarations. > + > + > +typedef struct { > const ngx_quic_cipher_t *c; > const EVP_CIPHER *hp; > const EVP_MD *d; > @@ -37,10 +60,10 @@ typedef struct { > > > typedef struct ngx_quic_secret_s { > - ngx_str_t secret; > - ngx_str_t key; > - ngx_str_t iv; > - ngx_str_t hp; > + ngx_quic_okm_t secret; > + ngx_quic_key_t key; > + ngx_quic_iv_t iv; > + ngx_quic_hp_t hp; > } ngx_quic_secret_t; > > > @@ -57,6 +80,29 @@ struct ngx_quic_keys_s { > }; > > > +typedef struct { > + size_t out_len; > + u_char *out; > + > + size_t prk_len; > + const uint8_t *prk; > + > + size_t label_len; > + const u_char *label; > + > + size_t info_len; > + uint8_t info[20]; > +} ngx_quic_hkdf_t; > + > +#define ngx_quic_hkdf_set(label, out, prk) \ > + { \ > + (out)->len, (out)->data, \ > + (prk)->len, (prk)->data, \ > + (sizeof(label) - 1), (u_char *)(label), \ > + 0, { 0 } \ > + } > + > + > static ngx_int_t ngx_hkdf_expand(u_char *out_key, size_t out_len, > const EVP_MD *digest, const u_char *prk, size_t prk_len, > const u_char *info, size_t info_len); > @@ -78,8 +124,8 @@ static ngx_int_t ngx_quic_tls_seal(const > ngx_str_t *ad, ngx_log_t *log); > static ngx_int_t ngx_quic_tls_hp(ngx_log_t *log, const EVP_CIPHER *cipher, > ngx_quic_secret_t *s, u_char *out, u_char *in); > -static ngx_int_t ngx_quic_hkdf_expand(ngx_pool_t *pool, const EVP_MD *digest, > - ngx_str_t *out, ngx_str_t *label, const uint8_t *prk, size_t prk_len); > +static ngx_int_t ngx_quic_hkdf_expand(ngx_quic_hkdf_t *hkdf, > + const EVP_MD *digest, ngx_pool_t *pool); > > static ngx_int_t ngx_quic_create_packet(ngx_quic_header_t *pkt, > ngx_str_t *res); > @@ -204,28 +250,20 @@ ngx_quic_keys_set_initial_secret(ngx_poo > client->iv.len = NGX_QUIC_IV_LEN; > server->iv.len = NGX_QUIC_IV_LEN; > > - struct { > - ngx_str_t label; > - ngx_str_t *key; > - ngx_str_t *prk; > - } seq[] = { > + ngx_quic_hkdf_t seq[] = { > /* labels per RFC 9001, 5.1. Packet Protection Keys */ > - { ngx_string("tls13 client in"), &client->secret, &iss }, > - { ngx_string("tls13 quic key"), &client->key, &client->secret }, > - { ngx_string("tls13 quic iv"), &client->iv, &client->secret }, > - { ngx_string("tls13 quic hp"), &client->hp, &client->secret }, > - { ngx_string("tls13 server in"), &server->secret, &iss }, > - { ngx_string("tls13 quic key"), &server->key, &server->secret }, > - { ngx_string("tls13 quic iv"), &server->iv, &server->secret }, > - { ngx_string("tls13 quic hp"), &server->hp, &server->secret }, > + ngx_quic_hkdf_set("tls13 client in", &client->secret, &iss), > + ngx_quic_hkdf_set("tls13 quic key", &client->key, &client->secret), > + ngx_quic_hkdf_set("tls13 quic iv", &client->iv, &client->secret), > + ngx_quic_hkdf_set("tls13 quic hp", &client->hp, &client->secret), > + ngx_quic_hkdf_set("tls13 server in", &server->secret, &iss), > + ngx_quic_hkdf_set("tls13 quic key", &server->key, &server->secret), > + ngx_quic_hkdf_set("tls13 quic iv", &server->iv, &server->secret), > + ngx_quic_hkdf_set("tls13 quic hp", &server->hp, &server->secret), > }; > > for (i = 0; i < (sizeof(seq) / sizeof(seq[0])); i++) { > - > - if (ngx_quic_hkdf_expand(pool, digest, seq[i].key, &seq[i].label, > - seq[i].prk->data, seq[i].prk->len) > - != NGX_OK) > - { > + if (ngx_quic_hkdf_expand(&seq[i], digest, pool) != NGX_OK) { > return NGX_ERROR; > } > } > @@ -235,40 +273,39 @@ ngx_quic_keys_set_initial_secret(ngx_poo > > > static ngx_int_t > -ngx_quic_hkdf_expand(ngx_pool_t *pool, const EVP_MD *digest, ngx_str_t *out, > - ngx_str_t *label, const uint8_t *prk, size_t prk_len) > +ngx_quic_hkdf_expand(ngx_quic_hkdf_t *h, const EVP_MD *digest, ngx_pool_t *pool) > { > - size_t info_len; > uint8_t *p; > - uint8_t info[20]; > > - if (out->data == NULL) { > - out->data = ngx_pnalloc(pool, out->len); > - if (out->data == NULL) { > + if (h->out == NULL) { > + h->out = ngx_pnalloc(pool, h->out_len); > + if (h->out == NULL) { > return NGX_ERROR; > } > } > > - info_len = 2 + 1 + label->len + 1; > + h->info_len = 2 + 1 + h->label_len + 1; > > - info[0] = 0; > - info[1] = out->len; > - info[2] = label->len; > - p = ngx_cpymem(&info[3], label->data, label->len); > + h->info[0] = 0; > + h->info[1] = h->out_len; > + h->info[2] = h->label_len; Why? info/info_len aren't used/useful outside of ngx_quic_hkdf_expand(), they are barely one-time local storages to produce traffic secrets. > + > + p = ngx_cpymem(&h->info[3], h->label, h->label_len); > *p = '\0'; > > - if (ngx_hkdf_expand(out->data, out->len, digest, > - prk, prk_len, info, info_len) > + if (ngx_hkdf_expand(h->out, h->out_len, digest, > + h->prk, h->prk_len, h->info, h->info_len) > != NGX_OK) > { > ngx_ssl_error(NGX_LOG_INFO, pool->log, 0, > - "ngx_hkdf_expand(%V) failed", label); > + "ngx_hkdf_expand(%*s) failed", h->label_len, h->label); > return NGX_ERROR; > } > > #ifdef NGX_QUIC_DEBUG_CRYPTO > - ngx_log_debug3(NGX_LOG_DEBUG_EVENT, pool->log, 0, > - "quic expand %V key len:%uz %xV", label, out->len, out); > + ngx_log_debug5(NGX_LOG_DEBUG_EVENT, pool->log, 0, > + "quic expand \"%*s\" key len:%uz %*xs", > + h->label_len, h->label, h->out_len, h->out_len, h->out); > #endif > > return NGX_OK; > @@ -652,6 +689,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ > const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len) > { > ngx_int_t key_len; > + ngx_str_t secret_str; > ngx_uint_t i; > ngx_quic_secret_t *peer_secret; > ngx_quic_ciphers_t ciphers; > @@ -668,8 +706,9 @@ ngx_quic_keys_set_encryption_secret(ngx_ > return NGX_ERROR; > } > > - peer_secret->secret.data = ngx_pnalloc(pool, secret_len); > - if (peer_secret->secret.data == NULL) { > + if (sizeof(peer_secret->secret.data) < secret_len) { > + ngx_log_error(NGX_LOG_ERR, pool->log, 0, > + "unexpected secret len: %uz", secret_len); > return NGX_ERROR; > } This won't work with cipher suite hash algorithms used to produce HKDF Hash.length (read: secret length) above SHA256_DIGEST_LENGTH, such as TLS_AES_256_GCM_SHA384 or any future TLSv1.3 cipher suites with SHA384 or above. The same for hardcoding NGX_QUIC_KEY_LEN. The error, if ever leave it there, deserves rasing logging level to "alert" as clearly a programmatic error. [..] From xeioex at nginx.com Mon Feb 21 15:06:05 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 21 Feb 2022 15:06:05 +0000 Subject: [njs] Refactoring of user modules importing. Message-ID: details: https://hg.nginx.org/njs/rev/77c398f26d7e branches: changeset: 1828:77c398f26d7e user: Dmitry Volyntsev date: Mon Feb 21 14:49:38 2022 +0000 description: Refactoring of user modules importing. Previously, user modules were compiled as as anonymous functions in a global scope. This is incorrect, because modules should be compiled in their own scope. In addition, this patch introduces HostResolveImportedModule support. When vm->options.ops->module_loader is provided, a module lookup and compilation is delegated to this callback. This closes #443 issue on Github. diffstat: external/njs_crypto_module.c | 2 +- external/njs_fs_module.c | 2 +- external/njs_query_string_module.c | 2 +- nginx/ngx_http_js_module.c | 20 +- nginx/ngx_stream_js_module.c | 19 +- src/njs.h | 6 + src/njs_buffer.c | 2 +- src/njs_builtin.c | 4 +- src/njs_disassembler.c | 22 +- src/njs_function.c | 2 +- src/njs_generator.c | 87 +--- src/njs_generator.h | 5 +- src/njs_module.c | 481 +++++++--------------------- src/njs_module.h | 13 +- src/njs_parser.c | 238 ++++--------- src/njs_parser.h | 31 +- src/njs_shell.c | 21 +- src/njs_variable.c | 2 +- src/njs_vm.c | 84 ++++- src/njs_vm.h | 1 + src/njs_vmcode.c | 68 ++++ src/njs_vmcode.h | 8 + test/js/import_cyclic.t.js | 11 + test/js/import_expression.t.js | 1 + test/js/import_global_ref.t.js | 13 + test/js/import_global_ref_var.t.js | 10 + test/js/import_order.t.js | 18 + test/js/import_recursive.t.js | 6 +- test/js/import_recursive_early_access.t.js | 9 + test/js/import_recursive_relative.t.js | 9 + test/js/import_sinking_export_default.t.js | 10 + test/js/module/cyclic_a.js | 2 + test/js/module/cyclic_b.js | 2 + test/js/module/export_global_a.js | 5 + test/js/module/http.js | 7 + test/js/module/jwt.js | 7 + test/js/module/lib1.js | 5 + test/js/module/order.js | 7 + test/js/module/order2.js | 7 + test/js/module/recursive.js | 2 + test/js/module/recursive_early_access.js | 5 + test/js/module/recursive_relative.js | 2 + test/js/module/sinking_export_default.js | 5 + 43 files changed, 598 insertions(+), 665 deletions(-) diffs (truncated from 2098 to 1000 lines): diff -r 7a08ed3e9cb8 -r 77c398f26d7e external/njs_crypto_module.c --- a/external/njs_crypto_module.c Tue Feb 15 13:17:52 2022 +0000 +++ b/external/njs_crypto_module.c Mon Feb 21 14:49:38 2022 +0000 @@ -646,7 +646,7 @@ njs_crypto_init(njs_vm_t *vm) return NJS_ERROR; } - module = njs_module_add(vm, &njs_str_value("crypto"), 1); + module = njs_module_add(vm, &njs_str_value("crypto")); if (njs_slow_path(module == NULL)) { return NJS_ERROR; } diff -r 7a08ed3e9cb8 -r 77c398f26d7e external/njs_fs_module.c --- a/external/njs_fs_module.c Tue Feb 15 13:17:52 2022 +0000 +++ b/external/njs_fs_module.c Mon Feb 21 14:49:38 2022 +0000 @@ -3090,7 +3090,7 @@ njs_fs_init(njs_vm_t *vm) return NJS_ERROR; } - module = njs_module_add(vm, &njs_str_value("fs"), 1); + module = njs_module_add(vm, &njs_str_value("fs")); if (njs_slow_path(module == NULL)) { return NJS_ERROR; } diff -r 7a08ed3e9cb8 -r 77c398f26d7e external/njs_query_string_module.c --- a/external/njs_query_string_module.c Tue Feb 15 13:17:52 2022 +0000 +++ b/external/njs_query_string_module.c Mon Feb 21 14:49:38 2022 +0000 @@ -967,7 +967,7 @@ njs_query_string_init(njs_vm_t *vm) return NJS_ERROR; } - module = njs_module_add(vm, &njs_str_value("querystring"), 1); + module = njs_module_add(vm, &njs_str_value("querystring")); if (njs_slow_path(module == NULL)) { return NJS_ERROR; } diff -r 7a08ed3e9cb8 -r 77c398f26d7e nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Tue Feb 15 13:17:52 2022 +0000 +++ b/nginx/ngx_http_js_module.c Mon Feb 21 14:49:38 2022 +0000 @@ -704,7 +704,8 @@ static njs_external_t ngx_http_js_ext_r static njs_vm_ops_t ngx_http_js_ops = { ngx_http_js_set_timer, - ngx_http_js_clear_timer + ngx_http_js_clear_timer, + NULL, }; @@ -3490,8 +3491,12 @@ ngx_http_js_init_main_conf(ngx_conf_t *c import = jmcf->imports->elts; for (i = 0; i < jmcf->imports->nelts; i++) { - size += sizeof("import from '';\n") - 1 + import[i].name.len - + import[i].path.len; + + /* import from ''; globalThis. = ; */ + + size += sizeof("import from '';") - 1 + import[i].name.len * 3 + + import[i].path.len + + sizeof(" globalThis. = ;\n") - 1; } start = ngx_pnalloc(cf->pool, size); @@ -3502,11 +3507,18 @@ ngx_http_js_init_main_conf(ngx_conf_t *c p = start; import = jmcf->imports->elts; for (i = 0; i < jmcf->imports->nelts; i++) { + + /* import from ''; globalThis. = ; */ + p = ngx_cpymem(p, "import ", sizeof("import ") - 1); p = ngx_cpymem(p, import[i].name.data, import[i].name.len); p = ngx_cpymem(p, " from '", sizeof(" from '") - 1); p = ngx_cpymem(p, import[i].path.data, import[i].path.len); - p = ngx_cpymem(p, "';\n", sizeof("';\n") - 1); + p = ngx_cpymem(p, "'; globalThis.", sizeof("'; globalThis.") - 1); + p = ngx_cpymem(p, import[i].name.data, import[i].name.len); + p = ngx_cpymem(p, " = ", sizeof(" = ") - 1); + p = ngx_cpymem(p, import[i].name.data, import[i].name.len); + p = ngx_cpymem(p, ";\n", sizeof(";\n") - 1); } njs_vm_opt_init(&options); diff -r 7a08ed3e9cb8 -r 77c398f26d7e nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Tue Feb 15 13:17:52 2022 +0000 +++ b/nginx/ngx_stream_js_module.c Mon Feb 21 14:49:38 2022 +0000 @@ -456,7 +456,8 @@ static njs_external_t ngx_stream_js_ext static njs_vm_ops_t ngx_stream_js_ops = { ngx_stream_js_set_timer, - ngx_stream_js_clear_timer + ngx_stream_js_clear_timer, + NULL, }; @@ -1512,8 +1513,11 @@ ngx_stream_js_init_main_conf(ngx_conf_t import = jmcf->imports->elts; for (i = 0; i < jmcf->imports->nelts; i++) { - size += sizeof("import from '';\n") - 1 + import[i].name.len - + import[i].path.len; + /* import from ''; globalThis. = ; */ + + size += sizeof("import from '';") - 1 + import[i].name.len * 3 + + import[i].path.len + + sizeof(" globalThis. = ;\n") - 1; } start = ngx_pnalloc(cf->pool, size); @@ -1524,11 +1528,18 @@ ngx_stream_js_init_main_conf(ngx_conf_t p = start; import = jmcf->imports->elts; for (i = 0; i < jmcf->imports->nelts; i++) { + + /* import from ''; globalThis. = ; */ + p = ngx_cpymem(p, "import ", sizeof("import ") - 1); p = ngx_cpymem(p, import[i].name.data, import[i].name.len); p = ngx_cpymem(p, " from '", sizeof(" from '") - 1); p = ngx_cpymem(p, import[i].path.data, import[i].path.len); - p = ngx_cpymem(p, "';\n", sizeof("';\n") - 1); + p = ngx_cpymem(p, "'; globalThis.", sizeof("'; globalThis.") - 1); + p = ngx_cpymem(p, import[i].name.data, import[i].name.len); + p = ngx_cpymem(p, " = ", sizeof(" = ") - 1); + p = ngx_cpymem(p, import[i].name.data, import[i].name.len); + p = ngx_cpymem(p, ";\n", sizeof(";\n") - 1); } njs_vm_opt_init(&options); diff -r 7a08ed3e9cb8 -r 77c398f26d7e src/njs.h --- a/src/njs.h Tue Feb 15 13:17:52 2022 +0000 +++ b/src/njs.h Mon Feb 21 14:49:38 2022 +0000 @@ -28,6 +28,7 @@ typedef uintptr_t njs_index_t; typedef struct njs_vm_s njs_vm_t; +typedef struct njs_mod_s njs_mod_t; typedef union njs_value_s njs_value_t; typedef struct njs_function_s njs_function_t; typedef struct njs_vm_shared_s njs_vm_shared_t; @@ -183,11 +184,14 @@ typedef njs_host_event_t (*njs_set_timer uint64_t delay, njs_vm_event_t vm_event); typedef void (*njs_event_destructor_t)(njs_external_ptr_t external, njs_host_event_t event); +typedef njs_mod_t *(*njs_module_loader_t)(njs_vm_t *vm, + njs_external_ptr_t external, njs_str_t *name); typedef struct { njs_set_timer_t set_timer; njs_event_destructor_t clear_timer; + njs_module_loader_t module_loader; } njs_vm_ops_t; @@ -254,6 +258,8 @@ NJS_EXPORT njs_vm_t *njs_vm_create(njs_v NJS_EXPORT void njs_vm_destroy(njs_vm_t *vm); NJS_EXPORT njs_int_t njs_vm_compile(njs_vm_t *vm, u_char **start, u_char *end); +NJS_EXPORT njs_mod_t *njs_vm_compile_module(njs_vm_t *vm, njs_str_t *name, + u_char **start, u_char *end); NJS_EXPORT njs_vm_t *njs_vm_clone(njs_vm_t *vm, njs_external_ptr_t external); NJS_EXPORT njs_vm_event_t njs_vm_add_event(njs_vm_t *vm, diff -r 7a08ed3e9cb8 -r 77c398f26d7e src/njs_buffer.c --- a/src/njs_buffer.c Tue Feb 15 13:17:52 2022 +0000 +++ b/src/njs_buffer.c Mon Feb 21 14:49:38 2022 +0000 @@ -3015,7 +3015,7 @@ njs_buffer_init(njs_vm_t *vm) return NJS_ERROR; } - module = njs_module_add(vm, &njs_str_value("buffer"), 1); + module = njs_module_add(vm, &njs_str_value("buffer")); if (njs_slow_path(module == NULL)) { return NJS_ERROR; } diff -r 7a08ed3e9cb8 -r 77c398f26d7e src/njs_builtin.c --- a/src/njs_builtin.c Tue Feb 15 13:17:52 2022 +0000 +++ b/src/njs_builtin.c Mon Feb 21 14:49:38 2022 +0000 @@ -761,7 +761,9 @@ njs_builtin_match_native_function(njs_vm break; } - if (njs_is_object(&module->value)) { + if (njs_is_object(&module->value) + && !njs_object(&module->value)->shared) + { ctx.match = module->name; ret = njs_object_traverse(vm, njs_object(&module->value), &ctx, diff -r 7a08ed3e9cb8 -r 77c398f26d7e src/njs_disassembler.c --- a/src/njs_disassembler.c Tue Feb 15 13:17:52 2022 +0000 +++ b/src/njs_disassembler.c Mon Feb 21 14:49:38 2022 +0000 @@ -172,15 +172,6 @@ njs_disassembler(njs_vm_t *vm) n = vm->codes->items; while (n != 0) { - if (code->start == vm->start) { - break; - } - - code++; - n--; - } - - while (n != 0) { njs_printf("%V:%V\n", &code->file, &code->name); njs_disassemble(code); code++; @@ -207,6 +198,7 @@ njs_disassemble(njs_vm_code_t *code) njs_vmcode_3addr_t *code3; njs_vmcode_array_t *array; njs_vmcode_catch_t *catch; + njs_vmcode_import_t *import; njs_vmcode_finally_t *finally; njs_vmcode_try_end_t *try_end; njs_vmcode_move_arg_t *move_arg; @@ -398,6 +390,18 @@ njs_disassemble(njs_vm_code_t *code) continue; } + if (operation == NJS_VMCODE_IMPORT) { + import = (njs_vmcode_import_t *) p; + + njs_printf("%5uD | %05uz IMPORT %04Xz %V\n", + line, p - start, (size_t) import->retval, + &import->module->name); + + p += sizeof(njs_vmcode_import_t); + + continue; + } + if (operation == NJS_VMCODE_TRY_START) { try_start = (njs_vmcode_try_start_t *) p; diff -r 7a08ed3e9cb8 -r 77c398f26d7e src/njs_function.c --- a/src/njs_function.c Tue Feb 15 13:17:52 2022 +0000 +++ b/src/njs_function.c Mon Feb 21 14:49:38 2022 +0000 @@ -1209,7 +1209,7 @@ njs_function_constructor(njs_vm_t *vm, n } } - ret = njs_generator_init(&generator, 0, 1); + ret = njs_generator_init(&generator, &file, 0, 1); if (njs_slow_path(ret != NJS_OK)) { njs_internal_error(vm, "njs_generator_init() failed"); return NJS_ERROR; diff -r 7a08ed3e9cb8 -r 77c398f26d7e src/njs_generator.c --- a/src/njs_generator.c Tue Feb 15 13:17:52 2022 +0000 +++ b/src/njs_generator.c Mon Feb 21 14:49:38 2022 +0000 @@ -319,8 +319,6 @@ static njs_int_t njs_generate_throw_end( njs_generator_t *generator, njs_parser_node_t *node); static njs_int_t njs_generate_import_statement(njs_vm_t *vm, njs_generator_t *generator, njs_parser_node_t *node); -static njs_int_t njs_generate_import_statement_end(njs_vm_t *vm, - njs_generator_t *generator, njs_parser_node_t *node); static njs_int_t njs_generate_export_statement(njs_vm_t *vm, njs_generator_t *generator, njs_parser_node_t *node); static njs_int_t njs_generate_export_statement_end(njs_vm_t *vm, @@ -424,8 +422,8 @@ static njs_int_t njs_generate_index_rele njs_code_offset_diff(generator, patch->jump_offset) -#define njs_generate_syntax_error(vm, node, fmt, ...) \ - njs_parser_node_error(vm, node, NJS_OBJ_TYPE_SYNTAX_ERROR, fmt, \ +#define njs_generate_syntax_error(vm, node, file, fmt, ...) \ + njs_parser_node_error(vm, NJS_OBJ_TYPE_SYNTAX_ERROR, node, file, fmt, \ ##__VA_ARGS__) @@ -436,13 +434,14 @@ static const njs_str_t undef_label = { njs_int_t -njs_generator_init(njs_generator_t *generator, njs_int_t depth, - njs_bool_t runtime) +njs_generator_init(njs_generator_t *generator, njs_str_t *file, + njs_int_t depth, njs_bool_t runtime) { njs_memzero(generator, sizeof(njs_generator_t)); njs_queue_init(&generator->stack); + generator->file = *file; generator->depth = depth; generator->runtime = runtime; @@ -2312,7 +2311,8 @@ njs_generate_continue_statement(njs_vm_t syntax_error: - njs_generate_syntax_error(vm, node, "Illegal continue statement"); + njs_generate_syntax_error(vm, node, &generator->file, + "Illegal continue statement"); return NJS_ERROR; } @@ -2357,7 +2357,8 @@ njs_generate_break_statement(njs_vm_t *v syntax_error: - njs_generate_syntax_error(vm, node, "Illegal break statement"); + njs_generate_syntax_error(vm, node, &generator->file, + "Illegal break statement"); return NJS_ERROR; } @@ -3102,17 +3103,13 @@ njs_generate_function(njs_vm_t *vm, njs_ njs_parser_node_t *node) { njs_int_t ret; - njs_bool_t module; - const njs_str_t *name; njs_function_lambda_t *lambda; njs_vmcode_function_t *function; lambda = node->u.value.data.u.lambda; - module = node->right->scope->module; - - name = module ? &njs_entry_module : &njs_entry_anonymous; - - ret = njs_generate_function_scope(vm, generator, lambda, node, name); + + ret = njs_generate_function_scope(vm, generator, lambda, node, + &njs_entry_anonymous); if (njs_slow_path(ret != NJS_OK)) { return ret; } @@ -3641,13 +3638,11 @@ njs_generate_function_scope(njs_vm_t *vm njs_function_lambda_t *lambda, njs_parser_node_t *node, const njs_str_t *name) { - njs_int_t ret; - njs_arr_t *arr; - njs_bool_t module; - njs_uint_t depth; - njs_vm_code_t *code; - njs_generator_t generator; - njs_parser_node_t *file_node; + njs_int_t ret; + njs_arr_t *arr; + njs_uint_t depth; + njs_vm_code_t *code; + njs_generator_t generator; depth = prev->depth; @@ -3656,7 +3651,7 @@ njs_generate_function_scope(njs_vm_t *vm return NJS_ERROR; } - ret = njs_generator_init(&generator, depth, prev->runtime); + ret = njs_generator_init(&generator, &prev->file, depth, prev->runtime); if (njs_slow_path(ret != NJS_OK)) { njs_internal_error(vm, "njs_generator_init() failed"); return NJS_ERROR; @@ -3673,11 +3668,6 @@ njs_generate_function_scope(njs_vm_t *vm return NJS_ERROR; } - module = node->right->scope->module; - file_node = module ? node->right : node; - - code->file = file_node->scope->file; - lambda->start = generator.code_start; lambda->closures = generator.closures->start; lambda->nclosures = generator.closures->items; @@ -3774,7 +3764,7 @@ njs_generate_scope(njs_vm_t *vm, njs_gen code = njs_arr_item(vm->codes, index); code->start = generator->code_start; code->end = generator->code_end; - code->file = scope->file; + code->file = generator->file; code->name = *name; generator->code_size = generator->code_end - generator->code_start; @@ -4620,45 +4610,22 @@ static njs_int_t njs_generate_import_statement(njs_vm_t *vm, njs_generator_t *generator, njs_parser_node_t *node) { - njs_variable_t *var; - njs_parser_node_t *lvalue, *expr; + njs_variable_t *var; + njs_parser_node_t *lvalue; + njs_vmcode_import_t *import; lvalue = node->left; - expr = node->right; var = njs_variable_reference(vm, lvalue); if (njs_slow_path(var == NULL)) { return NJS_ERROR; } - if (expr->left != NULL) { - njs_generator_next(generator, njs_generate, expr->left); - - return njs_generator_after(vm, generator, - njs_queue_first(&generator->stack), node, - njs_generate_import_statement_end, NULL, 0); - } - - return njs_generate_import_statement_end(vm, generator, node); -} - - -static njs_int_t -njs_generate_import_statement_end(njs_vm_t *vm, njs_generator_t *generator, - njs_parser_node_t *node) -{ - njs_mod_t *module; - njs_parser_node_t *expr; - njs_vmcode_object_copy_t *copy; - - expr = node->right; - - module = (njs_mod_t *) expr->index; - - njs_generate_code(generator, njs_vmcode_object_copy_t, copy, - NJS_VMCODE_OBJECT_COPY, 2, node); - copy->retval = node->left->index; - copy->object = module->index; + njs_generate_code(generator, njs_vmcode_import_t, import, + NJS_VMCODE_IMPORT, 1, node); + + import->module = node->u.module; + import->retval = lvalue->index; return njs_generator_stack_pop(vm, generator, NULL); } diff -r 7a08ed3e9cb8 -r 77c398f26d7e src/njs_generator.h --- a/src/njs_generator.h Tue Feb 15 13:17:52 2022 +0000 +++ b/src/njs_generator.h Mon Feb 21 14:49:38 2022 +0000 @@ -27,6 +27,7 @@ struct njs_generator_s { njs_arr_t *index_cache; njs_arr_t *closures; + njs_str_t file; njs_arr_t *lines; size_t code_size; @@ -40,8 +41,8 @@ struct njs_generator_s { }; -njs_int_t njs_generator_init(njs_generator_t *generator, njs_int_t depth, - njs_bool_t runtime); +njs_int_t njs_generator_init(njs_generator_t *generator, njs_str_t *file, + njs_int_t depth, njs_bool_t runtime); njs_vm_code_t *njs_generate_scope(njs_vm_t *vm, njs_generator_t *generator, njs_parser_scope_t *scope, const njs_str_t *name); uint32_t njs_lookup_line(njs_vm_code_t *code, uint32_t offset); diff -r 7a08ed3e9cb8 -r 77c398f26d7e src/njs_module.c --- a/src/njs_module.c Tue Feb 15 13:17:52 2022 +0000 +++ b/src/njs_module.c Mon Feb 21 14:49:38 2022 +0000 @@ -12,286 +12,60 @@ typedef struct { int fd; njs_str_t name; njs_str_t file; + char path[NJS_MAX_PATH + 1]; } njs_module_info_t; -typedef struct { - njs_str_t text; - njs_module_info_t info; - njs_lexer_t *prev; - njs_lexer_t lexer; -} njs_module_temp_t; - - -static njs_int_t njs_parser_module_lambda_after(njs_parser_t *parser, - njs_lexer_token_t *token, njs_queue_link_t *current); -static njs_int_t njs_parser_module_after(njs_parser_t *parser, - njs_lexer_token_t *token, njs_queue_link_t *current); - static njs_int_t njs_module_lookup(njs_vm_t *vm, const njs_str_t *cwd, njs_module_info_t *info); -static njs_int_t njs_module_relative_path(njs_vm_t *vm, - const njs_str_t *dir, njs_module_info_t *info); -static njs_int_t njs_module_absolute_path(njs_vm_t *vm, +static njs_int_t njs_module_path(njs_vm_t *vm, const njs_str_t *dir, njs_module_info_t *info); -static njs_bool_t njs_module_realpath_equal(const njs_str_t *path1, - const njs_str_t *path2); static njs_int_t njs_module_read(njs_vm_t *vm, int fd, njs_str_t *body); -static njs_mod_t *njs_module_find(njs_vm_t *vm, njs_str_t *name, - njs_bool_t local); -static njs_int_t njs_module_insert(njs_parser_t *parser, njs_mod_t *module); - - -njs_int_t -njs_module_load(njs_vm_t *vm) -{ - njs_int_t ret; - njs_mod_t **item, *module; - njs_uint_t i; - njs_value_t *value; - njs_object_t *object; - - if (vm->modules == NULL) { - return NJS_OK; - } - - item = vm->modules->start; - - for (i = 0; i < vm->modules->items; i++) { - module = *item; - - if (module->function.native) { - value = njs_scope_valid_value(vm, module->index); - njs_value_assign(value, &module->value); - - object = njs_object_value_copy(vm, value); - if (njs_slow_path(object == NULL)) { - return NJS_ERROR; - } - - } else { - ret = njs_vm_invoke(vm, &module->function, NULL, 0, - njs_scope_valid_value(vm, module->index)); - if (ret == NJS_ERROR) { - return ret; - } - } - - item++; - } - - return NJS_OK; -} +static njs_mod_t *njs_default_module_loader(njs_vm_t *vm, + njs_external_ptr_t external, njs_str_t *name); -void -njs_module_reset(njs_vm_t *vm) -{ - njs_mod_t **item, *module; - njs_uint_t i; - njs_lvlhsh_query_t lhq; - - if (vm->modules == NULL) { - return; - } - - item = vm->modules->start; - - for (i = 0; i < vm->modules->items; i++) { - module = *item; - - if (!module->function.native) { - lhq.key = module->name; - lhq.key_hash = njs_djb_hash(lhq.key.start, lhq.key.length); - lhq.proto = &njs_modules_hash_proto; - lhq.pool = vm->mem_pool; - - (void) njs_lvlhsh_delete(&vm->modules_hash, &lhq); - } - - item++; - } - - njs_arr_reset(vm->modules); -} - - -njs_int_t -njs_parser_module(njs_parser_t *parser, njs_lexer_token_t *token, - njs_queue_link_t *current) +njs_mod_t * +njs_parser_module(njs_parser_t *parser, njs_str_t *name) { - njs_int_t ret; - njs_str_t name, text; - njs_mod_t *module; - njs_module_temp_t *temp; - njs_module_info_t info; - - name = token->text; - - parser->node = NULL; - - module = njs_module_find(parser->vm, &name, 1); - if (module != NULL && module->function.native) { - njs_lexer_consume_token(parser->lexer, 1); - - parser->target = (njs_parser_node_t *) module; + njs_mod_t *module; + njs_vm_t *vm; + njs_external_ptr_t external; + njs_module_loader_t loader; - return njs_parser_module_after(parser, token, current); - } - - njs_memzero(&text, sizeof(njs_str_t)); - - if (parser->vm->options.sandbox || name.length == 0) { - njs_parser_syntax_error(parser, "Cannot find module \"%V\"", &name); - goto fail; - } + vm = parser->vm; - /* Non-native module. */ - - njs_memzero(&info, sizeof(njs_module_info_t)); - - info.name = name; - - ret = njs_module_lookup(parser->vm, &parser->scope->cwd, &info); - if (njs_slow_path(ret != NJS_OK)) { - njs_parser_syntax_error(parser, "Cannot find module \"%V\"", &name); - goto fail; + if (name->length == 0) { + njs_parser_syntax_error(parser, "Cannot find module \"%V\"", name); + return NULL; } - module = njs_module_find(parser->vm, &info.file, 1); + module = njs_module_find(vm, name, 1); if (module != NULL) { - (void) close(info.fd); - njs_lexer_consume_token(parser->lexer, 1); - - parser->target = (njs_parser_node_t *) module; - - return njs_parser_module_after(parser, token, current); - } - - ret = njs_module_read(parser->vm, info.fd, &text); - - (void) close(info.fd); - - if (njs_slow_path(ret != NJS_OK)) { - njs_internal_error(parser->vm, "while reading \"%V\" module", - &info.file); - goto fail; - } - - if (njs_module_realpath_equal(&parser->lexer->file, &info.file)) { - njs_parser_syntax_error(parser, "Cannot import itself \"%V\"", - &info.file); - goto fail; + goto done; } - temp = njs_mp_alloc(parser->vm->mem_pool, sizeof(njs_module_temp_t)); - if (njs_slow_path(temp == NULL)) { - return NJS_ERROR; - } - - ret = njs_lexer_init(parser->vm, &temp->lexer, &info.file, text.start, - text.start + text.length, 0); - if (njs_slow_path(ret != NJS_OK)) { - return NJS_ERROR; - } - - njs_lexer_consume_token(parser->lexer, 1); + external = parser; + loader = njs_default_module_loader; - temp->prev = parser->lexer; - temp->info = info; - temp->text = text; - - parser->lexer = &temp->lexer; - - njs_parser_next(parser, njs_parser_module_lambda); - - return njs_parser_after(parser, current, temp, 0, - njs_parser_module_lambda_after); - -fail: - - if (text.start != NULL) { - njs_mp_free(parser->vm->mem_pool, text.start); + if (vm->options.ops != NULL && vm->options.ops->module_loader != NULL) { + loader = vm->options.ops->module_loader; + external = vm->external; } - return NJS_ERROR; -} - - -static njs_int_t -njs_parser_module_lambda_after(njs_parser_t *parser, njs_lexer_token_t *token, - njs_queue_link_t *current) -{ - njs_mod_t *module; - njs_module_temp_t *temp; - - temp = (njs_module_temp_t *) parser->target; - - if (parser->ret != NJS_OK) { - njs_mp_free(parser->vm->mem_pool, temp->text.start); - njs_mp_free(parser->vm->mem_pool, temp); - - if (token->type == NJS_TOKEN_END) { - return njs_parser_stack_pop(parser); - } - - return njs_parser_failed(parser); - } - - module = njs_module_add(parser->vm, &temp->info.file, 0); - if (njs_slow_path(module == NULL)) { - parser->lexer = temp->prev; - - if (temp->text.start != NULL) { - njs_mp_free(parser->vm->mem_pool, temp->text.start); - } - - return njs_parser_failed(parser); + module = loader(vm, external, name); + if (module == NULL) { + njs_parser_syntax_error(parser, "Cannot find module \"%V\"", name); + return NULL; } - module->function.args_offset = 1; - module->function.u.lambda = parser->node->u.value.data.u.lambda; - - njs_mp_free(parser->vm->mem_pool, temp->text.start); - - parser->lexer = temp->prev; - parser->target = (njs_parser_node_t *) module; - - njs_mp_free(parser->vm->mem_pool, temp); - - return njs_parser_module_after(parser, token, current); -} +done: - -static njs_int_t -njs_parser_module_after(njs_parser_t *parser, njs_lexer_token_t *token, - njs_queue_link_t *current) -{ - njs_int_t ret; - njs_mod_t *module; - njs_parser_node_t *node; - - node = njs_parser_node_new(parser, 0); - if (njs_slow_path(node == NULL)) { - return NJS_ERROR; + if (module->index == 0) { + module->index = vm->shared->module_items++; } - node->left = parser->node; - - module = (njs_mod_t *) parser->target; - - if (module->index == 0) { - ret = njs_module_insert(parser, module); - if (njs_slow_path(ret != NJS_OK)) { - return NJS_ERROR; - } - } - - node->index = (njs_index_t) module; - - parser->node = node; - - return njs_parser_stack_pop(parser); + return module; } @@ -303,10 +77,10 @@ njs_module_lookup(njs_vm_t *vm, const nj njs_uint_t i; if (info->name.start[0] == '/') { - return njs_module_absolute_path(vm, info); + return njs_module_path(vm, NULL, info); } - ret = njs_module_relative_path(vm, cwd, info); + ret = njs_module_path(vm, cwd, info); if (ret != NJS_DECLINED) { return ret; @@ -319,7 +93,7 @@ njs_module_lookup(njs_vm_t *vm, const nj path = vm->paths->start; for (i = 0; i < vm->paths->items; i++) { - ret = njs_module_relative_path(vm, path, info); + ret = njs_module_path(vm, path, info); if (ret != NJS_DECLINED) { return ret; @@ -333,74 +107,60 @@ njs_module_lookup(njs_vm_t *vm, const nj static njs_int_t -njs_module_absolute_path(njs_vm_t *vm, njs_module_info_t *info) +njs_module_path(njs_vm_t *vm, const njs_str_t *dir, njs_module_info_t *info) { - njs_str_t file; + char *p; + size_t length; + njs_bool_t trail; + char src[NJS_MAX_PATH + 1]; + + trail = 0; + length = info->name.length; + + if (dir != NULL) { + length = dir->length; - file.length = info->name.length; - file.start = njs_mp_alloc(vm->mem_pool, file.length + 1); - if (njs_slow_path(file.start == NULL)) { + if (length == 0) { + return NJS_DECLINED; + } + + trail = (dir->start[dir->length - 1] != '/'); + + if (trail) { + length++; + } + } + + if (njs_slow_path(length > NJS_MAX_PATH)) { return NJS_ERROR; } - memcpy(file.start, info->name.start, file.length); - file.start[file.length] = '\0'; + p = &src[0]; + + if (dir != NULL) { + p = (char *) njs_cpymem(p, dir->start, dir->length); - info->fd = open((char *) file.start, O_RDONLY); - if (info->fd < 0) { - njs_mp_free(vm->mem_pool, file.start); + if (trail) { + *p++ = '/'; + } + } + + p = (char *) njs_cpymem(p, info->name.start, info->name.length); + *p = '\0'; + + p = realpath(&src[0], &info->path[0]); + if (p == NULL) { return NJS_DECLINED; } - info->file = file; - - return NJS_OK; -} - - -static njs_int_t -njs_module_relative_path(njs_vm_t *vm, const njs_str_t *dir, - njs_module_info_t *info) -{ - u_char *p; - njs_str_t file; - njs_bool_t trail; - - file.length = dir->length; - - if (file.length == 0) { + info->fd = open(&info->path[0], O_RDONLY); + if (info->fd < 0) { return NJS_DECLINED; } - trail = (dir->start[dir->length - 1] != '/'); - if (trail) { - file.length++; - } - - file.length += info->name.length; - - file.start = njs_mp_alloc(vm->mem_pool, file.length + 1); - if (njs_slow_path(file.start == NULL)) { - return NJS_ERROR; - } - - p = njs_cpymem(file.start, dir->start, dir->length); - - if (trail) { - *p++ = '/'; - } - - p = njs_cpymem(p, info->name.start, info->name.length); - *p = '\0'; - - info->fd = open((char *) file.start, O_RDONLY); - if (info->fd < 0) { - njs_mp_free(vm->mem_pool, file.start); - return NJS_DECLINED; - } - - info->file = file; + info->file.start = (u_char *) &info->path[0]; + info->file.length = njs_strlen(info->file.start); return NJS_OK; } @@ -412,6 +172,8 @@ njs_module_read(njs_vm_t *vm, int fd, nj ssize_t n; struct stat sb; + text->start = NULL; + if (fstat(fd, &sb) == -1) { goto fail; } @@ -445,18 +207,6 @@ fail: } -static njs_bool_t -njs_module_realpath_equal(const njs_str_t *path1, const njs_str_t *path2) -{ - char rpath1[MAXPATHLEN], rpath2[MAXPATHLEN]; - - realpath((char *) path1->start, rpath1); - realpath((char *) path2->start, rpath2); - - return (strcmp(rpath1, rpath2) == 0); -} - - static njs_int_t njs_module_hash_test(njs_lvlhsh_query_t *lhq, void *data) { @@ -482,7 +232,7 @@ const njs_lvlhsh_proto_t njs_modules_ha }; -static njs_mod_t * +njs_mod_t * njs_module_find(njs_vm_t *vm, njs_str_t *name, njs_bool_t shared) { njs_int_t ret; @@ -533,11 +283,10 @@ njs_module_find(njs_vm_t *vm, njs_str_t njs_mod_t * -njs_module_add(njs_vm_t *vm, njs_str_t *name, njs_bool_t shared) +njs_module_add(njs_vm_t *vm, njs_str_t *name) { njs_int_t ret; njs_mod_t *module; - njs_lvlhsh_t *hash; njs_lvlhsh_query_t lhq; module = njs_mp_zalloc(vm->mem_pool, sizeof(njs_mod_t)); @@ -559,9 +308,7 @@ njs_module_add(njs_vm_t *vm, njs_str_t * lhq.pool = vm->mem_pool; lhq.proto = &njs_modules_hash_proto; - hash = shared ? &vm->shared->modules_hash : &vm->modules_hash; - - ret = njs_lvlhsh_insert(hash, &lhq); + ret = njs_lvlhsh_insert(&vm->shared->modules_hash, &lhq); if (njs_fast_path(ret == NJS_OK)) { return module; } @@ -575,38 +322,6 @@ njs_module_add(njs_vm_t *vm, njs_str_t * } -static njs_int_t -njs_module_insert(njs_parser_t *parser, njs_mod_t *module) -{ - njs_vm_t *vm; - njs_mod_t **value; - njs_parser_scope_t *scope; - - scope = njs_parser_global_scope(parser); - vm = parser->vm; - - module->index = njs_scope_index(scope->type, scope->items, NJS_LEVEL_LOCAL, - NJS_VARIABLE_VAR); - scope->items++; - - if (vm->modules == NULL) { - vm->modules = njs_arr_create(vm->mem_pool, 4, sizeof(njs_mod_t *)); - if (njs_slow_path(vm->modules == NULL)) { - return NJS_ERROR; - } - } - - value = njs_arr_add(vm->modules); From xeioex at nginx.com Mon Feb 21 16:54:58 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 21 Feb 2022 16:54:58 +0000 Subject: [njs] Fixed Array.prototype.concat() when "this" is a slow array. Message-ID: details: https://hg.nginx.org/njs/rev/5064e36ad7b9 branches: changeset: 1829:5064e36ad7b9 user: Dmitry Volyntsev date: Mon Feb 21 16:52:47 2022 +0000 description: Fixed Array.prototype.concat() when "this" is a slow array. Previously, when the current appended element is fast array the "this" array was expected to always be a fast array also. This may not be the case when the previous appended element was not fast thus converting the "this" array to a slow form. Previous fix introduced in 2c1382bab643 (0.7.2) was not complete, the correct fix is to never assume "this" is fast, whereas njs_array_add() may only be called with fast arrays. This closes #471 issue in Github. diffstat: src/njs_array.c | 9 ++++++--- src/test/njs_unit_test.c | 5 +++++ 2 files changed, 11 insertions(+), 3 deletions(-) diffs (41 lines): diff -r 77c398f26d7e -r 5064e36ad7b9 src/njs_array.c --- a/src/njs_array.c Mon Feb 21 14:49:38 2022 +0000 +++ b/src/njs_array.c Mon Feb 21 16:52:47 2022 +0000 @@ -364,6 +364,8 @@ njs_array_expand(njs_vm_t *vm, njs_array uint64_t size; njs_value_t *start, *old; + njs_assert(array->object.fast_array); + free_before = array->start - array->data; free_after = array->size - array->length - free_before; @@ -1754,9 +1756,10 @@ njs_array_prototype_concat(njs_vm_t *vm, njs_set_invalid(&retval); } - ret = njs_array_add(vm, array, &retval); - if (njs_slow_path(ret != NJS_OK)) { - return NJS_ERROR; + ret = njs_value_property_i64_set(vm, &this, length, + &retval); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; } } diff -r 77c398f26d7e -r 5064e36ad7b9 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Mon Feb 21 14:49:38 2022 +0000 +++ b/src/test/njs_unit_test.c Mon Feb 21 16:52:47 2022 +0000 @@ -4253,6 +4253,11 @@ static njs_unit_test_t njs_test[] = "njs.dump([a[0], a[33],a.length])"), njs_str("[1,1,65]") }, + { njs_str("var a = [1]; a[1111111] = 2;" + "var a2 = [3].concat(a, [4]);" + "njs.dump(a2)"), + njs_str("[3,1,<1111110 empty items>,2,4]") }, + { njs_str("var re = /abc/; re[Symbol.isConcatSpreadable] = true;" "re[0] = 1, re[1] = 2, re[2] = 3, re.length = 3;" "[].concat(re)"), From xeioex at nginx.com Mon Feb 21 16:55:00 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 21 Feb 2022 16:55:00 +0000 Subject: [njs] Fixed frame allocation from an awaited frame. Message-ID: details: https://hg.nginx.org/njs/rev/eb8689f0a850 branches: changeset: 1830:eb8689f0a850 user: Dmitry Volyntsev date: Mon Feb 21 16:52:59 2022 +0000 description: Fixed frame allocation from an awaited frame. njs_function_frame_save() is used to save the awaited frame when "await" instruction is encountered. The saving was done as a memcpy() of existing runtime frame. njs_function_frame_alloc() is used to alloc a new function frame, this function tries to use a spare preallocated memory from the previous frame first. Previously, this function might result in "use-after-free" when invoked from a restored frame saved with njs_function_frame_save(). Because njs_function_frame_save() left pointers to the spare memory of the original frame which may be already free when saved frame is restored. The fix is to erase fields for the spare memory from the saved frame. This closes #469 issue on Github. diffstat: src/njs_function.c | 4 ++++ test/js/async_recursive_large.t.js | 26 ++++++++++++++++++++++++++ test/js/async_recursive_mid.t.js | 2 +- 3 files changed, 31 insertions(+), 1 deletions(-) diffs (59 lines): diff -r 5064e36ad7b9 -r eb8689f0a850 src/njs_function.c --- a/src/njs_function.c Mon Feb 21 16:52:47 2022 +0000 +++ b/src/njs_function.c Mon Feb 21 16:52:59 2022 +0000 @@ -811,9 +811,13 @@ njs_function_frame_save(njs_vm_t *vm, nj njs_native_frame_t *active, *native; *frame = *vm->active_frame; + frame->previous_active_frame = NULL; native = &frame->native; + native->size = 0; + native->free = NULL; + native->free_size = 0; active = &vm->active_frame->native; value_count = njs_function_frame_value_count(active); diff -r 5064e36ad7b9 -r eb8689f0a850 test/js/async_recursive_large.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_recursive_large.t.js Mon Feb 21 16:52:59 2022 +0000 @@ -0,0 +1,26 @@ +/*--- +includes: [compareArray.js] +flags: [async] +---*/ + +let stages = []; + +async function f(v) { + if (v == 1000) { + return; + } + + stages.push(`f>${v}`); + + await "X"; + + await f(v + 1); + + stages.push(`f<${v}`); +} + +f(0) +.then(v => { + assert.sameValue(stages.length, 2000); +}) +.then($DONE, $DONE); diff -r 5064e36ad7b9 -r eb8689f0a850 test/js/async_recursive_mid.t.js --- a/test/js/async_recursive_mid.t.js Mon Feb 21 16:52:47 2022 +0000 +++ b/test/js/async_recursive_mid.t.js Mon Feb 21 16:52:59 2022 +0000 @@ -6,7 +6,7 @@ flags: [async] let stages = []; async function f(v) { - if (v == 3) { + if (v == 1000) { return; } From xeioex at nginx.com Mon Feb 21 16:55:02 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 21 Feb 2022 16:55:02 +0000 Subject: [njs] Fixed allocation of large array literals. Message-ID: details: https://hg.nginx.org/njs/rev/805c1c96a2d2 branches: changeset: 1831:805c1c96a2d2 user: Dmitry Volyntsev date: Mon Feb 21 16:53:16 2022 +0000 description: Fixed allocation of large array literals. Previously, allocation of large array literals may result in null-pointer dereference. The reason is that njs_array_alloc() may return a slow array when size is large enough, but the instruction code assumes that array is always flat. The fix is to check fast_array flag before accessing array->start. This closes #473 issue on Github. diffstat: src/njs_vmcode.c | 18 ++++++++++-------- src/test/njs_unit_test.c | 4 ++++ 2 files changed, 14 insertions(+), 8 deletions(-) diffs (42 lines): diff -r eb8689f0a850 -r 805c1c96a2d2 src/njs_vmcode.c --- a/src/njs_vmcode.c Mon Feb 21 16:52:59 2022 +0000 +++ b/src/njs_vmcode.c Mon Feb 21 16:53:16 2022 +0000 @@ -1055,14 +1055,16 @@ njs_vmcode_array(njs_vm_t *vm, u_char *p if (code->ctor) { /* Array of the form [,,,], [1,,]. */ - value = array->start; - length = array->length; - - do { - njs_set_invalid(value); - value++; - length--; - } while (length != 0); + if (array->object.fast_array) { + value = array->start; + length = array->length; + + do { + njs_set_invalid(value); + value++; + length--; + } while (length != 0); + } } else { /* Array of the form [], [,,1], [1,2,3]. */ diff -r eb8689f0a850 -r 805c1c96a2d2 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Mon Feb 21 16:52:59 2022 +0000 +++ b/src/test/njs_unit_test.c Mon Feb 21 16:53:16 2022 +0000 @@ -13154,6 +13154,10 @@ static njs_unit_test_t njs_test[] = { njs_str("(new Function('return 5' + '** 1'.repeat(2**13)))()"), njs_str("5") }, + { njs_str("var a = (new Function('return [' + ','.repeat(2**16) + ']'))();" + "njs.dump(a)"), + njs_str("[<65536 empty items>]") }, + { njs_str("(new Function('var a = 7; return a' + '= a'.repeat(2**13)))()"), njs_str("7") }, From me at davidte.ch Mon Feb 21 21:43:38 2022 From: me at davidte.ch (David Hu) Date: Mon, 21 Feb 2022 13:43:38 -0800 Subject: Clients fail to connect via HTTP3 over QUIC Message-ID: <629715c2-ddb0-8035-6096-01551b5f2da9@davidte.ch> I have compiled the latest master branch of nginx-quic with these options: nginx version: nginx/1.21.7 (8861:b5c87e0e57ef) built with OpenSSL 3.0.1+quic 14 Dec 2021 TLS SNI support enabled configure arguments: --prefix=/usr/local/nginx --build=8861:b5c87e0e57ef --with-debug --with-http_ssl_module --with-http_v2_module --with-stream_quic_module --with-http_v3_module --with-cc-opt='-I/usr/local/include/openssl -O0 -DNGX_HTTP_V3_HQ=1' --with-ld-opt=-L/usr/local/lib64 and OpenSSL version (quictls): OpenSSL 3.0.1+quic 14 Dec 2021 (Library: OpenSSL 3.0.1+quic 14 Dec 2021) built on: Sun Feb 20 01:43:12 2022 UTC platform: linux-x86_64 options: bn(64,64) compiler: gcc -fPIC -pthread -m64 -Wa,--noexecstack -Wall -O3 -DOPENSSL_USE_NODELETE -DL_ENDIAN -DOPENSSL_PIC -DOPENSSL_BUILDING_OPENSSL -DNDEBUG -DOPENSSL_TLS_SECURITY_LEVEL=2 OPENSSLDIR: "/usr/local/ssl" ENGINESDIR: "/usr/local/lib64/engines-81.3" MODULESDIR: "/usr/local/lib64/ossl-modules" Seeding source: os-specific CPUINFO: OPENSSL_ia32cap=0xfff83203078bffff:0x4219c01ab And my nginx config file http block looks like this: [redacted sensitive configs] http { [redacted some configs] quic_retry on; http3_push on; http3_hq on; } However clients cannot cannot to my server either through H3 or HQ anymore Wireshark shows handshake failure CONNECTION_CLOSE (Transport) Error code: CRYPTO_ERROR (No application Protocol) Frame Type: CONNECTION_CLOSE (Transport) (0x000000000000001c) Error code: CRYPTO_ERROR (376) TLS Alert Description: No application Protocol (120) Frame Type: 0 Reason phrase Length: 16 Reason phrase: handshake failed How am I supposed to solve this? From gaoyan09 at baidu.com Tue Feb 22 04:55:02 2022 From: gaoyan09 at baidu.com (=?utf-8?B?R2FvLFlhbijlqpLkvZPkupEp?=) Date: Tue, 22 Feb 2022 04:55:02 +0000 Subject: [HTTP/2] Why HTTP/2 do not send goaway when ngx_exiting Message-ID: <10654E11-208A-4BED-8F4C-C934A3582831@baidu.com> Hi HTTP/2 follow the keepalive config and ignore ngx_exiting, but HTTP/1 and upstream not Gao,Yan(ACG VCP) -------------- next part -------------- An HTML attachment was scrubbed... URL: From vl at nginx.com Tue Feb 22 06:41:06 2022 From: vl at nginx.com (Vladimir Homutov) Date: Tue, 22 Feb 2022 09:41:06 +0300 Subject: Clients fail to connect via HTTP3 over QUIC In-Reply-To: <629715c2-ddb0-8035-6096-01551b5f2da9@davidte.ch> References: <629715c2-ddb0-8035-6096-01551b5f2da9@davidte.ch> Message-ID: 22.02.2022 00:43, David Hu via nginx-devel пишет: > I have compiled the latest master branch of nginx-quic with these options: > > nginx version: nginx/1.21.7 (8861:b5c87e0e57ef) > built with OpenSSL 3.0.1+quic 14 Dec 2021 > TLS SNI support enabled > configure arguments: --prefix=/usr/local/nginx --build=8861:b5c87e0e57ef > --with-debug --with-http_ssl_module --with-http_v2_module > --with-stream_quic_module --with-http_v3_module > --with-cc-opt='-I/usr/local/include/openssl -O0 -DNGX_HTTP_V3_HQ=1' > --with-ld-opt=-L/usr/local/lib64 > > and OpenSSL version (quictls): > OpenSSL 3.0.1+quic 14 Dec 2021 (Library: OpenSSL 3.0.1+quic 14 Dec 2021) > built on: Sun Feb 20 01:43:12 2022 UTC > platform: linux-x86_64 > options:  bn(64,64) > compiler: gcc -fPIC -pthread -m64 -Wa,--noexecstack -Wall -O3 > -DOPENSSL_USE_NODELETE -DL_ENDIAN -DOPENSSL_PIC > -DOPENSSL_BUILDING_OPENSSL -DNDEBUG -DOPENSSL_TLS_SECURITY_LEVEL=2 > OPENSSLDIR: "/usr/local/ssl" > ENGINESDIR: "/usr/local/lib64/engines-81.3" > MODULESDIR: "/usr/local/lib64/ossl-modules" > Seeding source: os-specific > CPUINFO: OPENSSL_ia32cap=0xfff83203078bffff:0x4219c01ab > > > And my nginx config file http block looks like this: > [redacted sensitive configs] > http { >     [redacted some configs] >     quic_retry on; >     http3_push on; >     http3_hq on; > } > > However clients cannot cannot to my server either through H3 or HQ anymore > > Wireshark shows handshake failure > CONNECTION_CLOSE (Transport) Error code: CRYPTO_ERROR (No application > Protocol) >     Frame Type: CONNECTION_CLOSE (Transport) (0x000000000000001c) >     Error code: CRYPTO_ERROR (376) >     TLS Alert Description: No application Protocol (120) >     Frame Type: 0 >     Reason phrase Length: 16 >     Reason phrase: handshake failed > > > How am I supposed to solve this? First, check the logs, the error should be logged. Message supposes your client did not send proper protocol (or no ALPN at all). We've recently removed draft version suppoort (http://hg.nginx.org/nginx-quic/rev/d8865baab732), so now only quic v1 is supported, and "h3" should be used for application protocol. You may want to check your configuration for 'Alt-Svc' header. From vl at nginx.com Tue Feb 22 10:23:27 2022 From: vl at nginx.com (Vladimir Homutov) Date: Tue, 22 Feb 2022 13:23:27 +0300 Subject: [PATCH 1 of 4] QUIC: fixed-length buffers for secrets In-Reply-To: <20220221145142.pic3fkrhd6dkm5xa@MacBook-Air-Sergey.local> References: <1a0a12bef7f00b5422d4.1645441831@vl.krasnogorsk.ru> <20220221145142.pic3fkrhd6dkm5xa@MacBook-Air-Sergey.local> Message-ID: On Mon, Feb 21, 2022 at 05:51:42PM +0300, Sergey Kandaurov wrote: > On Mon, Feb 21, 2022 at 02:10:31PM +0300, Vladimir Homutov wrote: > > Patch subject is complete summary. > > > > > > src/event/quic/ngx_event_quic_protection.c | 202 +++++++++++++++------------- > > 1 files changed, 105 insertions(+), 97 deletions(-) > > > > > > > # HG changeset patch > > # User Vladimir Homutov > > # Date 1645440604 -10800 > > # Mon Feb 21 13:50:04 2022 +0300 > > # Branch quic > > # Node ID 1a0a12bef7f00b5422d449b2d4642fff39e0a47e > > # Parent 55b38514729b8f848709b31295e72d6886a7a433 > > QUIC: fixed-length buffers for secrets. > > > > diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c > > --- a/src/event/quic/ngx_event_quic_protection.c > > +++ b/src/event/quic/ngx_event_quic_protection.c > > @@ -17,6 +17,8 @@ > > > > #define NGX_QUIC_AES_128_KEY_LEN 16 > > > > +#define NGX_QUIC_KEY_LEN 32 > > + > > #define NGX_AES_128_GCM_SHA256 0x1301 > > #define NGX_AES_256_GCM_SHA384 0x1302 > > #define NGX_CHACHA20_POLY1305_SHA256 0x1303 > > @@ -30,6 +32,27 @@ > > > > > > typedef struct { > > + size_t len; > > + u_char data[SHA256_DIGEST_LENGTH]; > > +} ngx_quic_okm_t; > > + > > +typedef struct { > > + size_t len; > > + u_char data[NGX_QUIC_KEY_LEN]; > > +} ngx_quic_key_t; > > + > > +typedef struct { > > + size_t len; > > + u_char data[NGX_QUIC_KEY_LEN]; > > +} ngx_quic_hp_t; > > + > > +typedef struct { > > + size_t len; > > + u_char data[NGX_QUIC_IV_LEN]; > > +} ngx_quic_iv_t; > > Style: two empty lines between struct declarations. thanks, fixed this > > > + > > + > > +typedef struct { > > const ngx_quic_cipher_t *c; > > const EVP_CIPHER *hp; > > const EVP_MD *d; > > @@ -37,10 +60,10 @@ typedef struct { > > > > > > typedef struct ngx_quic_secret_s { > > - ngx_str_t secret; > > - ngx_str_t key; > > - ngx_str_t iv; > > - ngx_str_t hp; > > + ngx_quic_okm_t secret; > > + ngx_quic_key_t key; > > + ngx_quic_iv_t iv; > > + ngx_quic_hp_t hp; > > } ngx_quic_secret_t; > > > > > > @@ -57,6 +80,29 @@ struct ngx_quic_keys_s { > > }; > > > > > > +typedef struct { > > + size_t out_len; > > + u_char *out; > > + > > + size_t prk_len; > > + const uint8_t *prk; > > + > > + size_t label_len; > > + const u_char *label; > > + > > + size_t info_len; > > + uint8_t info[20]; > > +} ngx_quic_hkdf_t; > > + > > +#define ngx_quic_hkdf_set(label, out, prk) \ > > + { \ > > + (out)->len, (out)->data, \ > > + (prk)->len, (prk)->data, \ > > + (sizeof(label) - 1), (u_char *)(label), \ > > + 0, { 0 } \ > > + } > > + > > + > > static ngx_int_t ngx_hkdf_expand(u_char *out_key, size_t out_len, > > const EVP_MD *digest, const u_char *prk, size_t prk_len, > > const u_char *info, size_t info_len); > > @@ -78,8 +124,8 @@ static ngx_int_t ngx_quic_tls_seal(const > > ngx_str_t *ad, ngx_log_t *log); > > static ngx_int_t ngx_quic_tls_hp(ngx_log_t *log, const EVP_CIPHER *cipher, > > ngx_quic_secret_t *s, u_char *out, u_char *in); > > -static ngx_int_t ngx_quic_hkdf_expand(ngx_pool_t *pool, const EVP_MD *digest, > > - ngx_str_t *out, ngx_str_t *label, const uint8_t *prk, size_t prk_len); > > +static ngx_int_t ngx_quic_hkdf_expand(ngx_quic_hkdf_t *hkdf, > > + const EVP_MD *digest, ngx_pool_t *pool); > > > > static ngx_int_t ngx_quic_create_packet(ngx_quic_header_t *pkt, > > ngx_str_t *res); > > @@ -204,28 +250,20 @@ ngx_quic_keys_set_initial_secret(ngx_poo > > client->iv.len = NGX_QUIC_IV_LEN; > > server->iv.len = NGX_QUIC_IV_LEN; > > > > - struct { > > - ngx_str_t label; > > - ngx_str_t *key; > > - ngx_str_t *prk; > > - } seq[] = { > > + ngx_quic_hkdf_t seq[] = { > > /* labels per RFC 9001, 5.1. Packet Protection Keys */ > > - { ngx_string("tls13 client in"), &client->secret, &iss }, > > - { ngx_string("tls13 quic key"), &client->key, &client->secret }, > > - { ngx_string("tls13 quic iv"), &client->iv, &client->secret }, > > - { ngx_string("tls13 quic hp"), &client->hp, &client->secret }, > > - { ngx_string("tls13 server in"), &server->secret, &iss }, > > - { ngx_string("tls13 quic key"), &server->key, &server->secret }, > > - { ngx_string("tls13 quic iv"), &server->iv, &server->secret }, > > - { ngx_string("tls13 quic hp"), &server->hp, &server->secret }, > > + ngx_quic_hkdf_set("tls13 client in", &client->secret, &iss), > > + ngx_quic_hkdf_set("tls13 quic key", &client->key, &client->secret), > > + ngx_quic_hkdf_set("tls13 quic iv", &client->iv, &client->secret), > > + ngx_quic_hkdf_set("tls13 quic hp", &client->hp, &client->secret), > > + ngx_quic_hkdf_set("tls13 server in", &server->secret, &iss), > > + ngx_quic_hkdf_set("tls13 quic key", &server->key, &server->secret), > > + ngx_quic_hkdf_set("tls13 quic iv", &server->iv, &server->secret), > > + ngx_quic_hkdf_set("tls13 quic hp", &server->hp, &server->secret), > > }; > > > > for (i = 0; i < (sizeof(seq) / sizeof(seq[0])); i++) { > > - > > - if (ngx_quic_hkdf_expand(pool, digest, seq[i].key, &seq[i].label, > > - seq[i].prk->data, seq[i].prk->len) > > - != NGX_OK) > > - { > > + if (ngx_quic_hkdf_expand(&seq[i], digest, pool) != NGX_OK) { > > return NGX_ERROR; > > } > > } > > @@ -235,40 +273,39 @@ ngx_quic_keys_set_initial_secret(ngx_poo > > > > > > static ngx_int_t > > -ngx_quic_hkdf_expand(ngx_pool_t *pool, const EVP_MD *digest, ngx_str_t *out, > > - ngx_str_t *label, const uint8_t *prk, size_t prk_len) > > +ngx_quic_hkdf_expand(ngx_quic_hkdf_t *h, const EVP_MD *digest, ngx_pool_t *pool) > > { > > - size_t info_len; > > uint8_t *p; > > - uint8_t info[20]; > > > > - if (out->data == NULL) { > > - out->data = ngx_pnalloc(pool, out->len); > > - if (out->data == NULL) { > > + if (h->out == NULL) { > > + h->out = ngx_pnalloc(pool, h->out_len); > > + if (h->out == NULL) { > > return NGX_ERROR; > > } > > } > > > > - info_len = 2 + 1 + label->len + 1; > > + h->info_len = 2 + 1 + h->label_len + 1; > > > > - info[0] = 0; > > - info[1] = out->len; > > - info[2] = label->len; > > - p = ngx_cpymem(&info[3], label->data, label->len); > > + h->info[0] = 0; > > + h->info[1] = h->out_len; > > + h->info[2] = h->label_len; > > Why? > info/info_len aren't used/useful outside of ngx_quic_hkdf_expand(), > they are barely one-time local storages to produce traffic secrets. sorry, this is a leftover. Indeed, there is no meaning in keeping this. I had some ideas how to initialize it on declaration and merge 'info' with 'label', but the result was too clumsy and unsafe to use. removed info from structure and restored local variable.. > > > + > > + p = ngx_cpymem(&h->info[3], h->label, h->label_len); > > *p = '\0'; > > > > - if (ngx_hkdf_expand(out->data, out->len, digest, > > - prk, prk_len, info, info_len) > > + if (ngx_hkdf_expand(h->out, h->out_len, digest, > > + h->prk, h->prk_len, h->info, h->info_len) > > != NGX_OK) > > { > > ngx_ssl_error(NGX_LOG_INFO, pool->log, 0, > > - "ngx_hkdf_expand(%V) failed", label); > > + "ngx_hkdf_expand(%*s) failed", h->label_len, h->label); > > return NGX_ERROR; > > } > > > > #ifdef NGX_QUIC_DEBUG_CRYPTO > > - ngx_log_debug3(NGX_LOG_DEBUG_EVENT, pool->log, 0, > > - "quic expand %V key len:%uz %xV", label, out->len, out); > > + ngx_log_debug5(NGX_LOG_DEBUG_EVENT, pool->log, 0, > > + "quic expand \"%*s\" key len:%uz %*xs", > > + h->label_len, h->label, h->out_len, h->out_len, h->out); > > #endif > > > > return NGX_OK; > > @@ -652,6 +689,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ > > const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len) > > { > > ngx_int_t key_len; > > + ngx_str_t secret_str; > > ngx_uint_t i; > > ngx_quic_secret_t *peer_secret; > > ngx_quic_ciphers_t ciphers; > > @@ -668,8 +706,9 @@ ngx_quic_keys_set_encryption_secret(ngx_ > > return NGX_ERROR; > > } > > > > - peer_secret->secret.data = ngx_pnalloc(pool, secret_len); > > - if (peer_secret->secret.data == NULL) { > > + if (sizeof(peer_secret->secret.data) < secret_len) { > > + ngx_log_error(NGX_LOG_ERR, pool->log, 0, > > + "unexpected secret len: %uz", secret_len); > > return NGX_ERROR; > > } > > This won't work with cipher suite hash algorithms used to produce > HKDF Hash.length (read: secret length) above SHA256_DIGEST_LENGTH, > such as TLS_AES_256_GCM_SHA384 or any future TLSv1.3 cipher suites > with SHA384 or above. The same for hardcoding NGX_QUIC_KEY_LEN. yes, exactly. currently boringssl just defines max size 48 to keep SHA384, and I think we can safely follow it. If anything will appear in future, we can always adjust size. I've renamed NGX_QUIC_KEY_LEN to NGX_QUIC_MAX_MD_SIZE and renamed structure holding buffer to ngx_quic_md_t, and use it for secret, key and hp, as it makes sens to use them uniformly. > The error, if ever leave it there, deserves rasing logging level > to "alert" as clearly a programmatic error. > agreed. This place is the worst in patch - we get size from the library and hope that our bufer is big enough, otherwise fail with a runtime error. On the other side, it is pretty clear that size here is just digest length, and we can expect reasonable numbers here. -------------- next part -------------- # HG changeset patch # User Vladimir Homutov # Date 1645524401 -10800 # Tue Feb 22 13:06:41 2022 +0300 # Branch quic # Node ID bb1717365759760bc8175b8d8084819a6ec35c26 # Parent 55b38514729b8f848709b31295e72d6886a7a433 QUIC: fixed-length buffers for secrets. diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -17,6 +17,9 @@ #define NGX_QUIC_AES_128_KEY_LEN 16 +/* largest hash used in TLS is SHA-384 */ +#define NGX_QUIC_MAX_MD_SIZE 48 + #define NGX_AES_128_GCM_SHA256 0x1301 #define NGX_AES_256_GCM_SHA384 0x1302 #define NGX_CHACHA20_POLY1305_SHA256 0x1303 @@ -30,6 +33,18 @@ typedef struct { + size_t len; + u_char data[NGX_QUIC_MAX_MD_SIZE]; +} ngx_quic_md_t; + + +typedef struct { + size_t len; + u_char data[NGX_QUIC_IV_LEN]; +} ngx_quic_iv_t; + + +typedef struct { const ngx_quic_cipher_t *c; const EVP_CIPHER *hp; const EVP_MD *d; @@ -37,10 +52,10 @@ typedef struct { typedef struct ngx_quic_secret_s { - ngx_str_t secret; - ngx_str_t key; - ngx_str_t iv; - ngx_str_t hp; + ngx_quic_md_t secret; + ngx_quic_md_t key; + ngx_quic_iv_t iv; + ngx_quic_md_t hp; } ngx_quic_secret_t; @@ -57,6 +72,25 @@ struct ngx_quic_keys_s { }; +typedef struct { + size_t out_len; + u_char *out; + + size_t prk_len; + const uint8_t *prk; + + size_t label_len; + const u_char *label; +} ngx_quic_hkdf_t; + +#define ngx_quic_hkdf_set(label, out, prk) \ + { \ + (out)->len, (out)->data, \ + (prk)->len, (prk)->data, \ + (sizeof(label) - 1), (u_char *)(label), \ + } + + static ngx_int_t ngx_hkdf_expand(u_char *out_key, size_t out_len, const EVP_MD *digest, const u_char *prk, size_t prk_len, const u_char *info, size_t info_len); @@ -78,8 +112,8 @@ static ngx_int_t ngx_quic_tls_seal(const ngx_str_t *ad, ngx_log_t *log); static ngx_int_t ngx_quic_tls_hp(ngx_log_t *log, const EVP_CIPHER *cipher, ngx_quic_secret_t *s, u_char *out, u_char *in); -static ngx_int_t ngx_quic_hkdf_expand(ngx_pool_t *pool, const EVP_MD *digest, - ngx_str_t *out, ngx_str_t *label, const uint8_t *prk, size_t prk_len); +static ngx_int_t ngx_quic_hkdf_expand(ngx_quic_hkdf_t *hkdf, + const EVP_MD *digest, ngx_pool_t *pool); static ngx_int_t ngx_quic_create_packet(ngx_quic_header_t *pkt, ngx_str_t *res); @@ -204,28 +238,20 @@ ngx_quic_keys_set_initial_secret(ngx_poo client->iv.len = NGX_QUIC_IV_LEN; server->iv.len = NGX_QUIC_IV_LEN; - struct { - ngx_str_t label; - ngx_str_t *key; - ngx_str_t *prk; - } seq[] = { + ngx_quic_hkdf_t seq[] = { /* labels per RFC 9001, 5.1. Packet Protection Keys */ - { ngx_string("tls13 client in"), &client->secret, &iss }, - { ngx_string("tls13 quic key"), &client->key, &client->secret }, - { ngx_string("tls13 quic iv"), &client->iv, &client->secret }, - { ngx_string("tls13 quic hp"), &client->hp, &client->secret }, - { ngx_string("tls13 server in"), &server->secret, &iss }, - { ngx_string("tls13 quic key"), &server->key, &server->secret }, - { ngx_string("tls13 quic iv"), &server->iv, &server->secret }, - { ngx_string("tls13 quic hp"), &server->hp, &server->secret }, + ngx_quic_hkdf_set("tls13 client in", &client->secret, &iss), + ngx_quic_hkdf_set("tls13 quic key", &client->key, &client->secret), + ngx_quic_hkdf_set("tls13 quic iv", &client->iv, &client->secret), + ngx_quic_hkdf_set("tls13 quic hp", &client->hp, &client->secret), + ngx_quic_hkdf_set("tls13 server in", &server->secret, &iss), + ngx_quic_hkdf_set("tls13 quic key", &server->key, &server->secret), + ngx_quic_hkdf_set("tls13 quic iv", &server->iv, &server->secret), + ngx_quic_hkdf_set("tls13 quic hp", &server->hp, &server->secret), }; for (i = 0; i < (sizeof(seq) / sizeof(seq[0])); i++) { - - if (ngx_quic_hkdf_expand(pool, digest, seq[i].key, &seq[i].label, - seq[i].prk->data, seq[i].prk->len) - != NGX_OK) - { + if (ngx_quic_hkdf_expand(&seq[i], digest, pool) != NGX_OK) { return NGX_ERROR; } } @@ -235,40 +261,41 @@ ngx_quic_keys_set_initial_secret(ngx_poo static ngx_int_t -ngx_quic_hkdf_expand(ngx_pool_t *pool, const EVP_MD *digest, ngx_str_t *out, - ngx_str_t *label, const uint8_t *prk, size_t prk_len) +ngx_quic_hkdf_expand(ngx_quic_hkdf_t *h, const EVP_MD *digest, ngx_pool_t *pool) { size_t info_len; uint8_t *p; uint8_t info[20]; - if (out->data == NULL) { - out->data = ngx_pnalloc(pool, out->len); - if (out->data == NULL) { + if (h->out == NULL) { + h->out = ngx_pnalloc(pool, h->out_len); + if (h->out == NULL) { return NGX_ERROR; } } - info_len = 2 + 1 + label->len + 1; + info_len = 2 + 1 + h->label_len + 1; info[0] = 0; - info[1] = out->len; - info[2] = label->len; - p = ngx_cpymem(&info[3], label->data, label->len); + info[1] = h->out_len; + info[2] = h->label_len; + + p = ngx_cpymem(&info[3], h->label, h->label_len); *p = '\0'; - if (ngx_hkdf_expand(out->data, out->len, digest, - prk, prk_len, info, info_len) + if (ngx_hkdf_expand(h->out, h->out_len, digest, + h->prk, h->prk_len, info, info_len) != NGX_OK) { ngx_ssl_error(NGX_LOG_INFO, pool->log, 0, - "ngx_hkdf_expand(%V) failed", label); + "ngx_hkdf_expand(%*s) failed", h->label_len, h->label); return NGX_ERROR; } #ifdef NGX_QUIC_DEBUG_CRYPTO - ngx_log_debug3(NGX_LOG_DEBUG_EVENT, pool->log, 0, - "quic expand %V key len:%uz %xV", label, out->len, out); + ngx_log_debug5(NGX_LOG_DEBUG_EVENT, pool->log, 0, + "quic expand \"%*s\" key len:%uz %*xs", + h->label_len, h->label, h->out_len, h->out_len, h->out); #endif return NGX_OK; @@ -652,6 +679,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len) { ngx_int_t key_len; + ngx_str_t secret_str; ngx_uint_t i; ngx_quic_secret_t *peer_secret; ngx_quic_ciphers_t ciphers; @@ -668,8 +696,9 @@ ngx_quic_keys_set_encryption_secret(ngx_ return NGX_ERROR; } - peer_secret->secret.data = ngx_pnalloc(pool, secret_len); - if (peer_secret->secret.data == NULL) { + if (sizeof(peer_secret->secret.data) < secret_len) { + ngx_log_error(NGX_LOG_ALERT, pool->log, 0, + "unexpected secret len: %uz", secret_len); return NGX_ERROR; } @@ -680,22 +709,17 @@ ngx_quic_keys_set_encryption_secret(ngx_ peer_secret->iv.len = NGX_QUIC_IV_LEN; peer_secret->hp.len = key_len; - struct { - ngx_str_t label; - ngx_str_t *key; - const uint8_t *secret; - } seq[] = { - { ngx_string("tls13 quic key"), &peer_secret->key, secret }, - { ngx_string("tls13 quic iv"), &peer_secret->iv, secret }, - { ngx_string("tls13 quic hp"), &peer_secret->hp, secret }, + secret_str.len = secret_len; + secret_str.data = (u_char *) secret; + + ngx_quic_hkdf_t seq[] = { + ngx_quic_hkdf_set("tls13 quic key", &peer_secret->key, &secret_str), + ngx_quic_hkdf_set("tls13 quic iv", &peer_secret->iv, &secret_str), + ngx_quic_hkdf_set("tls13 quic hp", &peer_secret->hp, &secret_str), }; for (i = 0; i < (sizeof(seq) / sizeof(seq[0])); i++) { - - if (ngx_quic_hkdf_expand(pool, ciphers.d, seq[i].key, &seq[i].label, - seq[i].secret, secret_len) - != NGX_OK) - { + if (ngx_quic_hkdf_expand(&seq[i], ciphers.d, pool) != NGX_OK) { return NGX_ERROR; } } @@ -769,49 +793,23 @@ ngx_quic_keys_update(ngx_connection_t *c next->server.iv.len = NGX_QUIC_IV_LEN; next->server.hp = current->server.hp; - struct { - ngx_str_t label; - ngx_str_t *key; - ngx_str_t *secret; - } seq[] = { - { - ngx_string("tls13 quic ku"), - &next->client.secret, - ¤t->client.secret, - }, - { - ngx_string("tls13 quic key"), - &next->client.key, - &next->client.secret, - }, - { - ngx_string("tls13 quic iv"), - &next->client.iv, - &next->client.secret, - }, - { - ngx_string("tls13 quic ku"), - &next->server.secret, - ¤t->server.secret, - }, - { - ngx_string("tls13 quic key"), - &next->server.key, - &next->server.secret, - }, - { - ngx_string("tls13 quic iv"), - &next->server.iv, - &next->server.secret, - }, + ngx_quic_hkdf_t seq[] = { + ngx_quic_hkdf_set("tls13 quic ku", + &next->client.secret, ¤t->client.secret), + ngx_quic_hkdf_set("tls13 quic key", + &next->client.key, &next->client.secret), + ngx_quic_hkdf_set("tls13 quic iv", + &next->client.iv, &next->client.secret), + ngx_quic_hkdf_set("tls13 quic ku", + &next->server.secret, ¤t->server.secret), + ngx_quic_hkdf_set("tls13 quic key", + &next->server.key, &next->server.secret), + ngx_quic_hkdf_set("tls13 quic iv", + &next->server.iv, &next->server.secret), }; for (i = 0; i < (sizeof(seq) / sizeof(seq[0])); i++) { - - if (ngx_quic_hkdf_expand(c->pool, ciphers.d, seq[i].key, &seq[i].label, - seq[i].secret->data, seq[i].secret->len) - != NGX_OK) - { + if (ngx_quic_hkdf_expand(&seq[i], ciphers.d, c->pool) != NGX_OK) { return NGX_ERROR; } } @@ -909,7 +907,7 @@ ngx_quic_create_retry_packet(ngx_quic_he } secret.key.len = sizeof(key); - secret.key.data = key; + ngx_memcpy(secret.key.data, key, sizeof(key)); secret.iv.len = NGX_QUIC_IV_LEN; if (ngx_quic_tls_seal(ciphers.c, &secret, &itag, nonce, &in, &ad, pkt->log) From ru at nginx.com Wed Feb 23 04:09:34 2022 From: ru at nginx.com (Ruslan Ermilov) Date: Wed, 23 Feb 2022 07:09:34 +0300 Subject: [PATCH] Add ipv4=off option in resolver like ipv6=off (ticket #1330) In-Reply-To: <20220216123055.GA18027@lo0.su> References: <20220216123055.GA18027@lo0.su> Message-ID: <20220223040934.GD18027@lo0.su> On Wed, Feb 16, 2022 at 03:30:55PM +0300, Ruslan Ermilov wrote: > Hi Lukas, > > On Wed, Jan 19, 2022 at 07:47:44PM +0100, Lukas Lihotzki via nginx-devel wrote: > > # HG changeset patch > > # User Lukas Lihotzki > > # Date 1642618053 -3600 > > # Wed Jan 19 19:47:33 2022 +0100 > > # Node ID e9f06dc2d6a4a1aa61c15009b84ceedcaf5983b2 > > # Parent aeab41dfd2606dd36cabbf01f1472726e27e8aea > > Add ipv4=off option in resolver like ipv6=off (ticket #1330). > > > > IPv6-only hosts (ticket #1330) and upstreams with IPv6 bind address > > (ticket #1535) need to disable resolving to IPv4 addresses. > > > > Ticket #1330 mentions ipv4=off is the proper fix. > > There's a number of problems in your patch. Please try this > one instead: > > # HG changeset patch > # User Ruslan Ermilov > # Date 1644873563 -10800 > # Tue Feb 15 00:19:23 2022 +0300 > # Node ID 5d2cb60a78dd32a10a0010ccff39974fd7605867 > # Parent 1add55d236522616ce34ffaa4dc697a76d3d41a4 > The "ipv4=" parameter of the "resolver" directive (ticket #2196). > > When set to "off", only IPv6 addresses will be resolved, and no > A queries are ever sent. > > diff --git a/src/core/ngx_resolver.c b/src/core/ngx_resolver.c > --- a/src/core/ngx_resolver.c > +++ b/src/core/ngx_resolver.c > @@ -157,6 +157,8 @@ ngx_resolver_create(ngx_conf_t *cf, ngx_ > cln->handler = ngx_resolver_cleanup; > cln->data = r; > > + r->ipv4 = 1; > + > ngx_rbtree_init(&r->name_rbtree, &r->name_sentinel, > ngx_resolver_rbtree_insert_value); > > @@ -225,6 +227,23 @@ ngx_resolver_create(ngx_conf_t *cf, ngx_ > } > > #if (NGX_HAVE_INET6) > + if (ngx_strncmp(names[i].data, "ipv4=", 5) == 0) { > + > + if (ngx_strcmp(&names[i].data[5], "on") == 0) { > + r->ipv4 = 1; > + > + } else if (ngx_strcmp(&names[i].data[5], "off") == 0) { > + r->ipv4 = 0; > + > + } else { > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "invalid parameter: %V", &names[i]); > + return NULL; > + } > + > + continue; > + } > + > if (ngx_strncmp(names[i].data, "ipv6=", 5) == 0) { > > if (ngx_strcmp(&names[i].data[5], "on") == 0) { Addon to the patch: diff --git a/src/core/ngx_resolver.c b/src/core/ngx_resolver.c --- a/src/core/ngx_resolver.c +++ b/src/core/ngx_resolver.c @@ -229,10 +229,12 @@ ngx_resolver_create(ngx_conf_t *cf, ngx_ #if (NGX_HAVE_INET6) if (ngx_strncmp(names[i].data, "ipv4=", 5) == 0) { - if (ngx_strcmp(&names[i].data[5], "on") == 0) { + if (ngx_strcasecmp(&names[i].data[5], (u_char *) "on") == 0) { r->ipv4 = 1; - } else if (ngx_strcmp(&names[i].data[5], "off") == 0) { + } else if (ngx_strcasecmp(&names[i].data[5], (u_char *) "off") + == 0) + { r->ipv4 = 0; } else { @@ -246,10 +248,12 @@ ngx_resolver_create(ngx_conf_t *cf, ngx_ if (ngx_strncmp(names[i].data, "ipv6=", 5) == 0) { - if (ngx_strcmp(&names[i].data[5], "on") == 0) { + if (ngx_strcasecmp(&names[i].data[5], (u_char *) "on") == 0) { r->ipv6 = 1; - } else if (ngx_strcmp(&names[i].data[5], "off") == 0) { + } else if (ngx_strcasecmp(&names[i].data[5], (u_char *) "off") + == 0) + { r->ipv6 = 0; } else { And a full updated patch: # HG changeset patch # User Ruslan Ermilov # Date 1645589317 -10800 # Wed Feb 23 07:08:37 2022 +0300 # Node ID 1c19779448db2309d607c74e2628ff98f84569ff # Parent 1add55d236522616ce34ffaa4dc697a76d3d41a4 The "ipv4=" parameter of the "resolver" directive (ticket #2196). When set to "off", only IPv6 addresses will be resolved, and no A queries are ever sent. diff --git a/src/core/ngx_resolver.c b/src/core/ngx_resolver.c --- a/src/core/ngx_resolver.c +++ b/src/core/ngx_resolver.c @@ -157,6 +157,8 @@ ngx_resolver_create(ngx_conf_t *cf, ngx_ cln->handler = ngx_resolver_cleanup; cln->data = r; + r->ipv4 = 1; + ngx_rbtree_init(&r->name_rbtree, &r->name_sentinel, ngx_resolver_rbtree_insert_value); @@ -225,12 +227,33 @@ ngx_resolver_create(ngx_conf_t *cf, ngx_ } #if (NGX_HAVE_INET6) + if (ngx_strncmp(names[i].data, "ipv4=", 5) == 0) { + + if (ngx_strcasecmp(&names[i].data[5], (u_char *) "on") == 0) { + r->ipv4 = 1; + + } else if (ngx_strcasecmp(&names[i].data[5], (u_char *) "off") + == 0) + { + r->ipv4 = 0; + + } else { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid parameter: %V", &names[i]); + return NULL; + } + + continue; + } + if (ngx_strncmp(names[i].data, "ipv6=", 5) == 0) { - if (ngx_strcmp(&names[i].data[5], "on") == 0) { + if (ngx_strcasecmp(&names[i].data[5], (u_char *) "on") == 0) { r->ipv6 = 1; - } else if (ngx_strcmp(&names[i].data[5], "off") == 0) { + } else if (ngx_strcasecmp(&names[i].data[5], (u_char *) "off") + == 0) + { r->ipv6 = 0; } else { @@ -273,6 +296,14 @@ ngx_resolver_create(ngx_conf_t *cf, ngx_ } } +#if (NGX_HAVE_INET6) + if (r->ipv4 + r->ipv6 == 0) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "\"ipv4\" and \"ipv6\" cannot both be \"off\""); + return NULL; + } +#endif + if (n && r->connections.nelts == 0) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "no name servers defined"); return NULL; @@ -836,7 +867,7 @@ ngx_resolve_name_locked(ngx_resolver_t * r->last_connection = 0; } - rn->naddrs = (u_short) -1; + rn->naddrs = r->ipv4 ? (u_short) -1 : 0; rn->tcp = 0; #if (NGX_HAVE_INET6) rn->naddrs6 = r->ipv6 ? (u_short) -1 : 0; @@ -1263,7 +1294,7 @@ ngx_resolver_send_query(ngx_resolver_t * rec->log.action = "resolving"; } - if (rn->naddrs == (u_short) -1) { + if (rn->query && rn->naddrs == (u_short) -1) { rc = rn->tcp ? ngx_resolver_send_tcp_query(r, rec, rn->query, rn->qlen) : ngx_resolver_send_udp_query(r, rec, rn->query, rn->qlen); @@ -1764,10 +1795,13 @@ ngx_resolver_process_response(ngx_resolv q = ngx_queue_next(q)) { rn = ngx_queue_data(q, ngx_resolver_node_t, queue); - qident = (rn->query[0] << 8) + rn->query[1]; - - if (qident == ident) { - goto dns_error_name; + + if (rn->query) { + qident = (rn->query[0] << 8) + rn->query[1]; + + if (qident == ident) { + goto dns_error_name; + } } #if (NGX_HAVE_INET6) @@ -3644,7 +3678,7 @@ ngx_resolver_create_name_query(ngx_resol len = sizeof(ngx_resolver_hdr_t) + nlen + sizeof(ngx_resolver_qs_t); #if (NGX_HAVE_INET6) - p = ngx_resolver_alloc(r, r->ipv6 ? len * 2 : len); + p = ngx_resolver_alloc(r, len * (r->ipv4 + r->ipv6)); #else p = ngx_resolver_alloc(r, len); #endif @@ -3653,23 +3687,28 @@ ngx_resolver_create_name_query(ngx_resol } rn->qlen = (u_short) len; - rn->query = p; + + if (r->ipv4) { + rn->query = p; + } #if (NGX_HAVE_INET6) if (r->ipv6) { - rn->query6 = p + len; + rn->query6 = r->ipv4 ? (p + len) : p; } #endif query = (ngx_resolver_hdr_t *) p; - ident = ngx_random(); - - ngx_log_debug2(NGX_LOG_DEBUG_CORE, r->log, 0, - "resolve: \"%V\" A %i", name, ident & 0xffff); - - query->ident_hi = (u_char) ((ident >> 8) & 0xff); - query->ident_lo = (u_char) (ident & 0xff); + if (r->ipv4) { + ident = ngx_random(); + + ngx_log_debug2(NGX_LOG_DEBUG_CORE, r->log, 0, + "resolve: \"%V\" A %i", name, ident & 0xffff); + + query->ident_hi = (u_char) ((ident >> 8) & 0xff); + query->ident_lo = (u_char) (ident & 0xff); + } /* recursion query */ query->flags_hi = 1; query->flags_lo = 0; @@ -3730,7 +3769,9 @@ ngx_resolver_create_name_query(ngx_resol p = rn->query6; - ngx_memcpy(p, rn->query, rn->qlen); + if (r->ipv4) { + ngx_memcpy(p, rn->query, rn->qlen); + } query = (ngx_resolver_hdr_t *) p; diff --git a/src/core/ngx_resolver.h b/src/core/ngx_resolver.h --- a/src/core/ngx_resolver.h +++ b/src/core/ngx_resolver.h @@ -175,8 +175,10 @@ struct ngx_resolver_s { ngx_queue_t srv_expire_queue; ngx_queue_t addr_expire_queue; + unsigned ipv4:1; + #if (NGX_HAVE_INET6) - ngx_uint_t ipv6; /* unsigned ipv6:1; */ + unsigned ipv6:1; ngx_rbtree_t addr6_rbtree; ngx_rbtree_node_t addr6_sentinel; ngx_queue_t addr6_resend_queue; From ru at nginx.com Wed Feb 23 04:11:25 2022 From: ru at nginx.com (=?koi8-r?b?UnVzbGFuIEVybWlsb3Y=?=) Date: Wed, 23 Feb 2022 07:11:25 +0300 Subject: [PATCH] The "sort=" parameter of the "resolver" directive In-Reply-To: <20220223040934.GD18027@lo0.su> References: <20220223040934.GD18027@lo0.su> Message-ID: <8db4bbd67840e8bebb23.1645589485@1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa> src/core/ngx_resolver.c | 38 +++++++++++++++++++++++++++++++++++++- src/core/ngx_resolver.h | 5 +++++ 2 files changed, 42 insertions(+), 1 deletions(-) # HG changeset patch # User Ruslan Ermilov # Date 1645589387 -10800 # Wed Feb 23 07:09:47 2022 +0300 # Node ID 8db4bbd67840e8bebb23f9c6d10c0f633552e616 # Parent 1c19779448db2309d607c74e2628ff98f84569ff The "sort=" parameter of the "resolver" directive. diff --git a/src/core/ngx_resolver.c b/src/core/ngx_resolver.c --- a/src/core/ngx_resolver.c +++ b/src/core/ngx_resolver.c @@ -266,6 +266,27 @@ ngx_resolver_create(ngx_conf_t *cf, ngx_ } #endif + if (ngx_strncmp(names[i].data, "sort=", 5) == 0) { + + if (ngx_strcasecmp(&names[i].data[5], (u_char *) "ipv4") == 0) { + r->sort = NGX_RESOLVE_A_FIRST; + +#if (NGX_HAVE_INET6) + } else if (ngx_strcasecmp(&names[i].data[5], (u_char *) "ipv6") + == 0) + { + r->sort = NGX_RESOLVE_AAAA_FIRST; +#endif + + } else { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid parameter: %V", &names[i]); + return NULL; + } + + continue; + } + ngx_memzero(&u, sizeof(ngx_url_t)); u.url = names[i]; @@ -4253,7 +4274,22 @@ ngx_resolver_export(ngx_resolver_t *r, n } i = 0; - d = rotate ? ngx_random() % n : 0; + + if (r->sort == NGX_RESOLVE_A_FIRST) { + d = 0; + +#if (NGX_HAVE_INET6) + } else if (r->sort == NGX_RESOLVE_AAAA_FIRST) { + d = rn->naddrs6; + + if (d == n) { + d = 0; + } +#endif + + } else { + d = rotate ? ngx_random() % n : 0; + } if (rn->naddrs) { j = rotate ? ngx_random() % rn->naddrs : 0; diff --git a/src/core/ngx_resolver.h b/src/core/ngx_resolver.h --- a/src/core/ngx_resolver.h +++ b/src/core/ngx_resolver.h @@ -36,6 +36,9 @@ #define NGX_RESOLVER_MAX_RECURSION 50 +#define NGX_RESOLVE_A_FIRST 1 +#define NGX_RESOLVE_AAAA_FIRST 2 + typedef struct ngx_resolver_s ngx_resolver_t; @@ -185,6 +188,8 @@ struct ngx_resolver_s { ngx_queue_t addr6_expire_queue; #endif + ngx_uint_t sort; + time_t resend_timeout; time_t tcp_timeout; time_t expire; From ranier.vf at gmail.com Thu Feb 24 12:51:42 2022 From: ranier.vf at gmail.com (Ranier Vilela) Date: Thu, 24 Feb 2022 09:51:42 -0300 Subject: Unsubscribe from Nginx Project Message-ID: Hi, Please unsubscribe me from the Nginx mail list. Ranier Vilela --Stop the War. -------------- next part -------------- An HTML attachment was scrubbed... URL: From serg.brester at sebres.de Thu Feb 24 13:14:17 2022 From: serg.brester at sebres.de (Sergey Brester) Date: Thu, 24 Feb 2022 14:14:17 +0100 Subject: Unsubscribe from Nginx Project In-Reply-To: References: Message-ID: To unsubscribe send an email to nginx-devel-leave at nginx.org Sergey Brester --Stop the pathetic Hypocrisy 24.02.2022 13:51, Ranier Vilela wrote: > Hi, > > Please unsubscribe me from the Nginx mail list. > > Ranier Vilela > --Stop the War. > > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org -------------- next part -------------- An HTML attachment was scrubbed... URL: From ranier.vf at gmail.com Thu Feb 24 13:18:43 2022 From: ranier.vf at gmail.com (Ranier Vilela) Date: Thu, 24 Feb 2022 10:18:43 -0300 Subject: Unsubscribe from Nginx Project In-Reply-To: References: Message-ID: Em qui., 24 de fev. de 2022 às 10:14, Sergey Brester escreveu: > > To unsubscribe send an email to nginx-devel-leave at nginx.org > > Done. regards, Ranier Vilela --Stop the War. -------------- next part -------------- An HTML attachment was scrubbed... URL: From Vinayaka.Holla at alliancebernstein.com Mon Feb 28 13:24:19 2022 From: Vinayaka.Holla at alliancebernstein.com (Holla, Vinayaka) Date: Mon, 28 Feb 2022 13:24:19 +0000 Subject: capture ngnix user login information Message-ID: Please let me know how to capture user (client) login details Vinayaka Holla VP/Data Engineer Data Science t: +1 212-823-6936 m: +1 609-819-6041 e: Vinayaka.Holla at alliancebernstein.com [AB_Logo_86px] ............................................................................ For further important information about AllianceBernstein please click here http://www.alliancebernstein.com/disclaimer/email/disclaimer.html -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: image001.png Type: image/png Size: 881 bytes Desc: image001.png URL: From vasiliy.soshnikov at gmail.com Mon Feb 28 13:35:21 2022 From: vasiliy.soshnikov at gmail.com (=?UTF-8?B?VmFzaWxpeSBTb3Nobmlrb3Y=?=) Date: Mon, 28 Feb 2022 16:35:21 +0300 Subject: =?UTF-8?B?UmU6IGNhcHR1cmUgbmduaXggdXNlciBsb2dpbiBpbmZvcm1hdGlvbg==?= In-Reply-To: References: Message-ID: <1646055321.759966430@f22.my.com> Hello, please could you provide you provide more details? Ex: get some data from the input form and put to the log, or something like this. Отправлено из myMail для iOS понедельник, 28 февраля 2022 г., 16:25 +0300 от Vinayaka.Holla at alliancebernstein.com : >Please let me know how to capture user (client) login details >  >Vinayaka Holla >VP/Data Engineer >Data Science >t: +1 212-823-6936 >m: +1 609-819-6041 >e: Vinayaka.Holla at alliancebernstein.com >  >  >............................................................................ >For further important information about AllianceBernstein please click here >http://www.alliancebernstein.com/disclaimer/email/disclaimer.html >_______________________________________________ >nginx-devel mailing list -- nginx-devel at nginx.org >To unsubscribe send an email to nginx-devel-leave at nginx.org -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: image001.png Type: image/png Size: 881 bytes Desc: not available URL: From Vinayaka.Holla at alliancebernstein.com Mon Feb 28 13:53:53 2022 From: Vinayaka.Holla at alliancebernstein.com (Holla, Vinayaka) Date: Mon, 28 Feb 2022 13:53:53 +0000 Subject: capture ngnix user login information Message-ID: We have simple app, with ngnix/flask and Plotly, 1. when user open the app from browser, we need to capture which user is logged in so we can update database table. 2. like in IIS we use Request.LogonUserIdentity.Name 3. current ngnix is running with no authentication configuration Thank you Vinayaka Holla VP/Data Engineer Data Science t: +1 212-823-6936 m: +1 609-819-6041 e: Vinayaka.Holla at alliancebernstein.com -----Original Message----- From: nginx-devel-request at nginx.org Sent: Monday, February 28, 2022 8:36 AM To: nginx-devel at nginx.org Subject: nginx-devel Digest, Vol 148, Issue 21 External Email. Use caution when clicking links or opening file attachments. Send nginx-devel mailing list submissions to nginx-devel at nginx.org To subscribe or unsubscribe via email, send a message with subject or body 'help' to nginx-devel-request at nginx.org You can reach the person managing the list at nginx-devel-owner at nginx.org When replying, please edit your Subject line so it is more specific than "Re: Contents of nginx-devel digest..." Today's Topics: 1. capture ngnix user login information (Holla, Vinayaka) 2. Re: capture ngnix user login information (Vasiliy Soshnikov) ---------------------------------------------------------------------- Message: 1 Date: Mon, 28 Feb 2022 13:24:19 +0000 From: "Holla, Vinayaka" Subject: capture ngnix user login information To: "nginx-devel at nginx.org" Message-ID: Content-Type: multipart/related; boundary="_004_CH0PR20MB389877 7AD725EF9C32F8306980019CH0PR20MB3898namp_"; type="multipart/alternative" Please let me know how to capture user (client) login details Vinayaka Holla VP/Data Engineer Data Science t: +1 212-823-6936 m: +1 609-819-6041 e: Vinayaka.Holla at alliancebernstein.com [AB_Logo_86px] ............................................................................ For further important information about AllianceBernstein please click here http://www.alliancebernstein.com/disclaimer/email/disclaimer.html -------------- next part -------------- A message part incompatible with plain text digests has been removed ... Name: not available Type: text/html Size: 3590 bytes Desc: not available -------------- next part -------------- A message part incompatible with plain text digests has been removed ... Name: image001.png Type: image/png Size: 881 bytes Desc: image001.png ------------------------------ Message: 2 Date: Mon, 28 Feb 2022 16:35:21 +0300 From: Vasiliy Soshnikov Subject: Re: capture ngnix user login information To: nginx-devel at nginx.org Message-ID: <1646055321.759966430 at f22.my.com> Content-Type: multipart/alternative; boundary="--ALT--611fd3d7ad1a4db0bbe658353383472d1646055321" Hello, please could you provide you provide more details? Ex: get some data from the input form and put to the log, or something like this. Отправлено из myMail для iOS понедельник, 28 февраля 2022 г., 16:25 +0300 от Vinayaka.Holla at alliancebernstein.com : >Please let me know how to capture user (client) login details >  >Vinayaka Holla >VP/Data Engineer >Data Science >t: +1 212-823-6936 >m: +1 609-819-6041 >e: Vinayaka.Holla at alliancebernstein.com >  >  >............................................................................ >For further important information about AllianceBernstein please click >here http://www.alliancebernstein.com/disclaimer/email/disclaimer.html >_______________________________________________ >nginx-devel mailing list -- nginx-devel at nginx.org To unsubscribe send >an email to nginx-devel-leave at nginx.org -------------- next part -------------- A message part incompatible with plain text digests has been removed ... Name: not available Type: text/html Size: 3954 bytes Desc: not available -------------- next part -------------- A message part incompatible with plain text digests has been removed ... Name: =?UTF-8?B?aW1hZ2UwMDEucG5n?= Type: image/png Size: 881 bytes Desc: not available ------------------------------ Subject: Digest Footer _______________________________________________ nginx-devel mailing list -- nginx-devel at nginx.org To unsubscribe send an email to nginx-devel-leave at nginx.org ------------------------------ End of nginx-devel Digest, Vol 148, Issue 21 ********************************************