From benjamin.p.kallus.gr at dartmouth.edu Wed Jan 3 23:57:57 2024 From: benjamin.p.kallus.gr at dartmouth.edu (Ben Kallus) Date: Wed, 3 Jan 2024 23:57:57 +0000 Subject: Core: Avoid memcpy from NULL In-Reply-To: References: <6fb91d26f5e60149d7b98c3ad37a0683@sebres.de> Message-ID: > Still, general style guidelines suggests that the code shouldn't > be written this way, and the only reason for j++ in the line in > question is that it mimics corresponding IPv4 code. > It's not "just happens". The point I'm trying to make is that ensuring correctness with function-like macros is difficult, both because of operator precedence and argument reevaluation. Expecting contributors to read the definitions of every macro they use becomes more and more cumbersome as the codebase expands, especially when some symbols are variably macros or functions depending on the state of (even infrequently-used) compile-time constants. All that said, upon further reflection, I think the UB issue is best solved outside of ngx_strcpy, where the overhead of an extra check may have a performance impact. The following patch is sufficient to silence UBSan in my configuration: # HG changeset patch # User Ben Kallus # Date 1704322684 18000 # Wed Jan 03 17:58:04 2024 -0500 # Node ID 04eb4b1622d1a488f14bb6d5af25e422ff23d82d # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 Add check to ngx_pstrdup to prevent 0-length memcpy. diff -r ee40e2b1d083 -r 04eb4b1622d1 src/core/ngx_string.c --- a/src/core/ngx_string.c Mon Dec 25 21:15:48 2023 +0400 +++ b/src/core/ngx_string.c Wed Jan 03 17:58:04 2024 -0500 @@ -77,8 +77,8 @@ u_char *dst; dst = ngx_pnalloc(pool, src->len); - if (dst == NULL) { - return NULL; + if (dst == NULL || src->len == 0) { + return dst; } ngx_memcpy(dst, src->data, src->len); From benjamin.p.kallus.gr at dartmouth.edu Thu Jan 4 00:55:08 2024 From: benjamin.p.kallus.gr at dartmouth.edu (Ben Kallus) Date: Thu, 4 Jan 2024 00:55:08 +0000 Subject: [PATCH] Satisfy UBSan in njs Message-ID: When I run my nginx+njs application with UBSan enabled, I encounter a few instances of undefined behavior in njs: 1. A memcpy from NULL 2. A couple of offsets applied to NULL 3. A u32 assigned to nan 4. A u32 assigned to inf This patch adds checks to prevent these undefined operations. With it, my application no longer has any UBSan alerts. # HG changeset patch # User Ben Kallus # Date 1704329280 18000 # Wed Jan 03 19:48:00 2024 -0500 # Node ID 85d5846984fc2731ad74f91f21c74be67d6974a9 # Parent 4a15613f4e8bb4a8349ee1cefbae07585da4cbc6 Prevent undefined operations on NULL, INF, and NAN diff -r 4a15613f4e8b -r 85d5846984fc nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Tue Dec 19 12:37:05 2023 -0800 +++ b/nginx/ngx_http_js_module.c Wed Jan 03 19:48:00 2024 -0500 @@ -2717,7 +2717,9 @@ for ( /* void */ ; cl; cl = cl->next) { buf = cl->buf; - p = ngx_cpymem(p, buf->pos, buf->last - buf->pos); + if (buf->last - buf->pos > 0) { + p = ngx_cpymem(p, buf->pos, buf->last - buf->pos); + } } done: diff -r 4a15613f4e8b -r 85d5846984fc src/njs_extern.c --- a/src/njs_extern.c Tue Dec 19 12:37:05 2023 -0800 +++ b/src/njs_extern.c Wed Jan 03 19:48:00 2024 -0500 @@ -38,7 +38,10 @@ lhq.proto = &njs_object_hash_proto; lhq.pool = vm->mem_pool; - end = external + n; + end = external; + if (n > 0) { + end += n; + } while (external < end) { diff -r 4a15613f4e8b -r 85d5846984fc src/njs_number.h --- a/src/njs_number.h Tue Dec 19 12:37:05 2023 -0800 +++ b/src/njs_number.h Wed Jan 03 19:48:00 2024 -0500 @@ -41,6 +41,10 @@ { uint32_t u32; + if (isnan(num) || isinf(num)) { + return 0; + } + u32 = num; return (u32 == num && u32 != 0xffffffff); diff -r 4a15613f4e8b -r 85d5846984fc src/njs_object.c --- a/src/njs_object.c Tue Dec 19 12:37:05 2023 -0800 +++ b/src/njs_object.c Wed Jan 03 19:48:00 2024 -0500 @@ -598,7 +598,10 @@ start = array->start; p = start; - end = p + array->length; + end = p; + if (array->length > 0) { + end += array->length; + } switch (kind) { case NJS_ENUM_KEYS: From arut at nginx.com Thu Jan 4 16:03:27 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 4 Jan 2024 20:03:27 +0400 Subject: [PATCH 1 of 3] Stream: socket peek in preread phase In-Reply-To: <20231227143458.n36haxmr57zfhdua@Y9MQ9X2QVV> References: <966331bb4936888ef2f0.1699610839@arut-laptop> <20231213140659.nt4kcbem26hkyrsd@N00W24XTQX> <20231227143458.n36haxmr57zfhdua@Y9MQ9X2QVV> Message-ID: <20240104160327.q2cmayipp7ozrxs7@N00W24XTQX> Hi, On Wed, Dec 27, 2023 at 06:34:58PM +0400, Sergey Kandaurov wrote: > On Wed, Dec 13, 2023 at 06:06:59PM +0400, Roman Arutyunyan wrote: > > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1702476295 -14400 > > # Wed Dec 13 18:04:55 2023 +0400 > > # Node ID 844486cdd43a32d10b78493d7e7b80e9e2239d7e > > # Parent 6c8595b77e667bd58fd28186939ed820f2e55e0e > > Stream: socket peek in preread phase. > > > > Previously, preread buffer was always read out from socket, which made it > > impossible to terminate SSL on the connection without introducing additional > > SSL BIOs. The following patches will rely on this. > > > > Now, when possible, recv(MSG_PEEK) is used instead, which keeps data in socket. > > It's called if SSL is not already terminated and if an egde-triggered event > > method is used. For epoll, EPOLLRDHUP support is also required. > > > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > > --- a/src/stream/ngx_stream_core_module.c > > +++ b/src/stream/ngx_stream_core_module.c > > @@ -10,6 +10,10 @@ > > #include > > > > > > +static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, > > + ngx_stream_phase_handler_t *ph); > > +static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, > > + ngx_stream_phase_handler_t *ph); > > static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf); > > static void *ngx_stream_core_create_main_conf(ngx_conf_t *cf); > > static char *ngx_stream_core_init_main_conf(ngx_conf_t *cf, void *conf); > > @@ -203,8 +207,6 @@ ngx_int_t > > ngx_stream_core_preread_phase(ngx_stream_session_t *s, > > ngx_stream_phase_handler_t *ph) > > { > > - size_t size; > > - ssize_t n; > > ngx_int_t rc; > > ngx_connection_t *c; > > ngx_stream_core_srv_conf_t *cscf; > > @@ -217,56 +219,40 @@ ngx_stream_core_preread_phase(ngx_stream > > > > if (c->read->timedout) { > > rc = NGX_STREAM_OK; > > + goto done; > > + } > > > > - } else if (c->read->timer_set) { > > - rc = NGX_AGAIN; > > + if (!c->read->timer_set) { > > + rc = ph->handler(s); > > > > - } else { > > - rc = ph->handler(s); > > + if (rc != NGX_AGAIN) { > > + goto done; > > + } > > } > > > > - while (rc == NGX_AGAIN) { > > - > > + if (c->buffer == NULL) { > > + c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > > if (c->buffer == NULL) { > > - c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > > - if (c->buffer == NULL) { > > - rc = NGX_ERROR; > > - break; > > - } > > + rc = NGX_ERROR; > > + goto done; > > } > > - > > - size = c->buffer->end - c->buffer->last; > > - > > - if (size == 0) { > > - ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > > - rc = NGX_STREAM_BAD_REQUEST; > > - break; > > - } > > + } > > > > - if (c->read->eof) { > > - rc = NGX_STREAM_OK; > > - break; > > - } > > - > > - if (!c->read->ready) { > > - break; > > - } > > - > > - n = c->recv(c, c->buffer->last, size); > > + if (c->ssl == NULL > > + && (ngx_event_flags & NGX_USE_CLEAR_EVENT) > > + && ((ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0 > > +#if (NGX_HAVE_EPOLLRDHUP) > > + || ngx_use_epoll_rdhup > > +#endif > > BTW, c->ssl needs to be guarded under an appropriate macro test. > Probably, it makes sense to rewrite this in a more readable way. > For example: > > : peak = 0; > : > : #if (NGX_HAVE_KQUEUE) > : if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { > : peak = 1; > : } > : #endif > : > : #if (NGX_HAVE_EPOLLRDHUP) > : if ((ngx_event_flags & NGX_USE_EPOLL_EVENT) && ngx_use_epoll_rdhup) { > : peak = 1; > : } > : #endif > : > : #if (NGX_STREAM_SSL) > : if (c->ssl) { > : peak = 0; > : } > : #endif [..] I think it's still too complicated. I suggest a separate function: diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c +++ b/src/stream/ngx_stream_core_module.c @@ -10,6 +10,7 @@ #include +static ngx_int_t ngx_stream_preread_can_peek(ngx_connection_t *c); static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph); static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, @@ -238,14 +239,7 @@ ngx_stream_core_preread_phase(ngx_stream } } - if (c->ssl == NULL - && (ngx_event_flags & NGX_USE_CLEAR_EVENT) - && ((ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0 -#if (NGX_HAVE_EPOLLRDHUP) - || ngx_use_epoll_rdhup -#endif - )) - { + if (ngx_stream_preread_can_peek(c)) { rc = ngx_stream_preread_peek(s, ph); } else { @@ -298,6 +292,35 @@ done: static ngx_int_t +ngx_stream_preread_can_peek(ngx_connection_t *c) +{ +#if (NGX_STREAM_SSL) + if (c->ssl) { + return 0; + } +#endif + + if ((ngx_event_flags & NGX_USE_CLEAR_EVENT) == 0) { + return 0; + } + +#if (NGX_HAVE_KQUEUE) + if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { + return 1; + } +#endif + +#if (NGX_HAVE_EPOLLRDHUP) + if ((ngx_event_flags & NGX_USE_EPOLL_EVENT) && ngx_use_epoll_rdhup) { + return 1; + } +#endif + + return 0; +} + + +static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) { ssize_t n; -- Roman Arutyunyan From mdounin at mdounin.ru Thu Jan 4 16:11:56 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 4 Jan 2024 19:11:56 +0300 Subject: Core: Avoid memcpy from NULL In-Reply-To: References: <6fb91d26f5e60149d7b98c3ad37a0683@sebres.de> Message-ID: Hello! On Wed, Jan 03, 2024 at 11:57:57PM +0000, Ben Kallus wrote: > > Still, general style guidelines suggests that the code shouldn't > > be written this way, and the only reason for j++ in the line in > > question is that it mimics corresponding IPv4 code. > > > It's not "just happens". > > The point I'm trying to make is that ensuring correctness with > function-like macros is difficult, both because of operator precedence > and argument reevaluation. Expecting contributors to read the > definitions of every macro they use becomes more and more cumbersome > as the codebase expands, especially when some symbols are variably > macros or functions depending on the state of (even infrequently-used) > compile-time constants. Sure, and hence the style. > All that said, upon further reflection, I think the UB issue is best > solved outside of ngx_strcpy, where the overhead of an extra check may > have a performance impact. The following patch is sufficient to > silence UBSan in my configuration: I've already pointed you to the Vladimir's patch which contains at least a dozen of places where the same "UB issue" is reported when running nginx tests with UBSan. This demonstrates that your patch is clearly insufficient. Further, Vladimir's patch is clearly insufficient too, as shown for the another patch in the same patch series. If we want to follow this approach, we need some way to trace places where zero length memcpy() can occur. My best guess is that the only way is to look through all ngx_cpymem() / ngx_memcpy() / ngx_memmove() / ngx_movemem() calls, as nginx routinely uses { 0, NULL } as an empty string. Given that there are 600+ of such calls in the codebase, and at least some need serious audit to find out if { 0, NULL } can appear in the call, this is going to be a huge work. And, given that the only expected effect is theoretical correctness of the code, I doubt it worth the effort, especially given that the end result will likely reduce readability of the code. -- Maxim Dounin http://mdounin.ru/ From jordanc.carter at outlook.com Mon Jan 8 11:25:55 2024 From: jordanc.carter at outlook.com (J Carter) Date: Mon, 8 Jan 2024 11:25:55 +0000 Subject: [PATCH 4 of 4] AIO operations now add timers (ticket #2162) In-Reply-To: <00c3e7333145ddb5ea0e.1701053427@vm-bsd.mdounin.ru> References: <00c3e7333145ddb5ea0e.1701053427@vm-bsd.mdounin.ru> Message-ID: Hello, On Mon, 27 Nov 2023 05:50:27 +0300 Maxim Dounin wrote: > # HG changeset patch > # User Maxim Dounin > # Date 1701050170 -10800 > # Mon Nov 27 04:56:10 2023 +0300 > # Node ID 00c3e7333145ddb5ea0eeaaa66b3d9c26973c9c2 > # Parent 61d08e4cf97cc073200ec32fc6ada9a2d48ffe51 > AIO operations now add timers (ticket #2162). > > Each AIO (thread IO) operation being run is now accompanied with 1-minute > timer. This timer prevents unexpected shutdown of the worker process while > an AIO operation is running, and logs an alert if the operation is running > for too long. Shouldn't this timer's duration be set to match worker_shutdown_timeout's duration rather than being hard coded to 60s ? > This fixes "open socket left" alerts during worker processes shutdown > due to pending AIO (or thread IO) operations while corresponding requests > have no timers. In particular, such errors were observed while reading > cache headers (ticket #2162), and with worker_shutdown_timeout. [...] From jordanc.carter at outlook.com Mon Jan 8 13:31:11 2024 From: jordanc.carter at outlook.com (J Carter) Date: Mon, 8 Jan 2024 13:31:11 +0000 Subject: [PATCH 4 of 4] AIO operations now add timers (ticket #2162) In-Reply-To: References: <00c3e7333145ddb5ea0e.1701053427@vm-bsd.mdounin.ru> Message-ID: On Mon, 8 Jan 2024 11:25:55 +0000 J Carter wrote: > Hello, > > On Mon, 27 Nov 2023 05:50:27 +0300 > Maxim Dounin wrote: > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1701050170 -10800 > > # Mon Nov 27 04:56:10 2023 +0300 > > # Node ID 00c3e7333145ddb5ea0eeaaa66b3d9c26973c9c2 > > # Parent 61d08e4cf97cc073200ec32fc6ada9a2d48ffe51 > > AIO operations now add timers (ticket #2162). > > > > Each AIO (thread IO) operation being run is now accompanied with 1-minute > > timer. This timer prevents unexpected shutdown of the worker process while > > an AIO operation is running, and logs an alert if the operation is running > > for too long. > > Shouldn't this timer's duration be set to match worker_shutdown_timeout's > duration rather than being hard coded to 60s ? Ah nevermind, I understand. These timers will either expire from passing the 60s set duration, or will expire as worker_process_timeout itself expires, kills the connection and times out associated timers (including the aio timers). Setting it to worker_shutdown_timeout's duration would be pointless (an 'infinite' timer would give the same result). So the only situation in which a different value for these AIO timers would make sense is if these AIO operations are expected to take longer 60s, but less than worker_shutdown_timeout (in cases where it has been increased from it's own default of 60s). In that case the AIO operation's timeout would have to be one (or more) of it's own directives, with a value less than worker_shutdown_timeout. (doesn't seem like it's worth it from the ticket discussion, sorry for the noise). > > This fixes "open socket left" alerts during worker processes shutdown > > due to pending AIO (or thread IO) operations while corresponding requests > > have no timers. In particular, such errors were observed while reading > > cache headers (ticket #2162), and with worker_shutdown_timeout. > > [...] From arut at nginx.com Mon Jan 8 13:31:15 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 8 Jan 2024 17:31:15 +0400 Subject: [PATCH 1 of 6] Stream: using ngx_stream_ssl_srv_conf_t *sscf naming convention In-Reply-To: References: Message-ID: <20240108133115.lrgpw7blvqokciva@N00W24XTQX> Hi, On Fri, Dec 15, 2023 at 07:37:44PM +0400, Sergey Kandaurov wrote: > # HG changeset patch > # User Sergey Kandaurov > # Date 1702646778 -14400 > # Fri Dec 15 17:26:18 2023 +0400 > # Node ID cb377d36446e1ce22b71848a4a138564b2e38719 > # Parent 763803589a36e3c67cbe39dd324b4e91fe57ecb7 > Stream: using ngx_stream_ssl_srv_conf_t *sscf naming convention. > > Originally, the stream module was developed based on the mail module, > following the existing style. Then it was diverged to closely follow > the http module development. This change updates style to use sscf > naming convention troughout the stream module, which matches the http > module code style. No functional changes. > > diff --git a/src/stream/ngx_stream_ssl_module.c b/src/stream/ngx_stream_ssl_module.c > --- a/src/stream/ngx_stream_ssl_module.c > +++ b/src/stream/ngx_stream_ssl_module.c > @@ -40,12 +40,12 @@ static ngx_int_t ngx_stream_ssl_variable > ngx_stream_variable_value_t *v, uintptr_t data); > > static ngx_int_t ngx_stream_ssl_add_variables(ngx_conf_t *cf); > -static void *ngx_stream_ssl_create_conf(ngx_conf_t *cf); > -static char *ngx_stream_ssl_merge_conf(ngx_conf_t *cf, void *parent, > +static void *ngx_stream_ssl_create_srv_conf(ngx_conf_t *cf); > +static char *ngx_stream_ssl_merge_srv_conf(ngx_conf_t *cf, void *parent, > void *child); > > static ngx_int_t ngx_stream_ssl_compile_certificates(ngx_conf_t *cf, > - ngx_stream_ssl_conf_t *conf); > + ngx_stream_ssl_srv_conf_t *conf); > > static char *ngx_stream_ssl_password_file(ngx_conf_t *cf, ngx_command_t *cmd, > void *conf); > @@ -90,21 +90,21 @@ static ngx_command_t ngx_stream_ssl_com > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > ngx_conf_set_msec_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, handshake_timeout), > + offsetof(ngx_stream_ssl_srv_conf_t, handshake_timeout), > NULL }, > > { ngx_string("ssl_certificate"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > ngx_conf_set_str_array_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, certificates), > + offsetof(ngx_stream_ssl_srv_conf_t, certificates), > NULL }, > > { ngx_string("ssl_certificate_key"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > ngx_conf_set_str_array_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, certificate_keys), > + offsetof(ngx_stream_ssl_srv_conf_t, certificate_keys), > NULL }, > > { ngx_string("ssl_password_file"), > @@ -118,63 +118,63 @@ static ngx_command_t ngx_stream_ssl_com > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > ngx_conf_set_str_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, dhparam), > + offsetof(ngx_stream_ssl_srv_conf_t, dhparam), > NULL }, > > { ngx_string("ssl_ecdh_curve"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > ngx_conf_set_str_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, ecdh_curve), > + offsetof(ngx_stream_ssl_srv_conf_t, ecdh_curve), > NULL }, > > { ngx_string("ssl_protocols"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_1MORE, > ngx_conf_set_bitmask_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, protocols), > + offsetof(ngx_stream_ssl_srv_conf_t, protocols), > &ngx_stream_ssl_protocols }, > > { ngx_string("ssl_ciphers"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > ngx_conf_set_str_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, ciphers), > + offsetof(ngx_stream_ssl_srv_conf_t, ciphers), > NULL }, > > { ngx_string("ssl_verify_client"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > ngx_conf_set_enum_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, verify), > + offsetof(ngx_stream_ssl_srv_conf_t, verify), > &ngx_stream_ssl_verify }, > > { ngx_string("ssl_verify_depth"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > ngx_conf_set_num_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, verify_depth), > + offsetof(ngx_stream_ssl_srv_conf_t, verify_depth), > NULL }, > > { ngx_string("ssl_client_certificate"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > ngx_conf_set_str_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, client_certificate), > + offsetof(ngx_stream_ssl_srv_conf_t, client_certificate), > NULL }, > > { ngx_string("ssl_trusted_certificate"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > ngx_conf_set_str_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, trusted_certificate), > + offsetof(ngx_stream_ssl_srv_conf_t, trusted_certificate), > NULL }, > > { ngx_string("ssl_prefer_server_ciphers"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_FLAG, > ngx_conf_set_flag_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, prefer_server_ciphers), > + offsetof(ngx_stream_ssl_srv_conf_t, prefer_server_ciphers), > NULL }, > > { ngx_string("ssl_session_cache"), > @@ -188,42 +188,42 @@ static ngx_command_t ngx_stream_ssl_com > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_FLAG, > ngx_conf_set_flag_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, session_tickets), > + offsetof(ngx_stream_ssl_srv_conf_t, session_tickets), > NULL }, > > { ngx_string("ssl_session_ticket_key"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > ngx_conf_set_str_array_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, session_ticket_keys), > + offsetof(ngx_stream_ssl_srv_conf_t, session_ticket_keys), > NULL }, > > { ngx_string("ssl_session_timeout"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > ngx_conf_set_sec_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, session_timeout), > + offsetof(ngx_stream_ssl_srv_conf_t, session_timeout), > NULL }, > > { ngx_string("ssl_crl"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > ngx_conf_set_str_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, crl), > + offsetof(ngx_stream_ssl_srv_conf_t, crl), > NULL }, > > { ngx_string("ssl_conf_command"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE2, > ngx_conf_set_keyval_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, conf_commands), > + offsetof(ngx_stream_ssl_srv_conf_t, conf_commands), > &ngx_stream_ssl_conf_command_post }, > > { ngx_string("ssl_reject_handshake"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_FLAG, > ngx_conf_set_flag_slot, > NGX_STREAM_SRV_CONF_OFFSET, > - offsetof(ngx_stream_ssl_conf_t, reject_handshake), > + offsetof(ngx_stream_ssl_srv_conf_t, reject_handshake), > NULL }, > > { ngx_string("ssl_alpn"), > @@ -244,8 +244,8 @@ static ngx_stream_module_t ngx_stream_s > NULL, /* create main configuration */ > NULL, /* init main configuration */ > > - ngx_stream_ssl_create_conf, /* create server configuration */ > - ngx_stream_ssl_merge_conf /* merge server configuration */ > + ngx_stream_ssl_create_srv_conf, /* create server configuration */ > + ngx_stream_ssl_merge_srv_conf /* merge server configuration */ > }; > > > @@ -339,11 +339,11 @@ static ngx_str_t ngx_stream_ssl_sess_id_ > static ngx_int_t > ngx_stream_ssl_handler(ngx_stream_session_t *s) > { > - long rc; > - X509 *cert; > - ngx_int_t rv; > - ngx_connection_t *c; > - ngx_stream_ssl_conf_t *sslcf; > + long rc; > + X509 *cert; > + ngx_int_t rv; > + ngx_connection_t *c; > + ngx_stream_ssl_srv_conf_t *sscf; > > if (!s->ssl) { > return NGX_OK; > @@ -351,23 +351,23 @@ ngx_stream_ssl_handler(ngx_stream_sessio > > c = s->connection; > > - sslcf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); > + sscf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); > > if (c->ssl == NULL) { > c->log->action = "SSL handshaking"; > > - rv = ngx_stream_ssl_init_connection(&sslcf->ssl, c); > + rv = ngx_stream_ssl_init_connection(&sscf->ssl, c); > > if (rv != NGX_OK) { > return rv; > } > } > > - if (sslcf->verify) { > + if (sscf->verify) { > rc = SSL_get_verify_result(c->ssl->connection); > > if (rc != X509_V_OK > - && (sslcf->verify != 3 || !ngx_ssl_verify_error_optional(rc))) > + && (sscf->verify != 3 || !ngx_ssl_verify_error_optional(rc))) > { > ngx_log_error(NGX_LOG_INFO, c->log, 0, > "client SSL certificate verify error: (%l:%s)", > @@ -378,7 +378,7 @@ ngx_stream_ssl_handler(ngx_stream_sessio > return NGX_ERROR; > } > > - if (sslcf->verify == 1) { > + if (sscf->verify == 1) { > cert = SSL_get_peer_certificate(c->ssl->connection); > > if (cert == NULL) { > @@ -403,7 +403,7 @@ ngx_stream_ssl_init_connection(ngx_ssl_t > { > ngx_int_t rc; > ngx_stream_session_t *s; > - ngx_stream_ssl_conf_t *sslcf; > + ngx_stream_ssl_srv_conf_t *sscf; > ngx_stream_core_srv_conf_t *cscf; > > s = c->data; > @@ -425,9 +425,9 @@ ngx_stream_ssl_init_connection(ngx_ssl_t > } > > if (rc == NGX_AGAIN) { > - sslcf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); > + sscf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); > > - ngx_add_timer(c->read, sslcf->handshake_timeout); > + ngx_add_timer(c->read, sscf->handshake_timeout); > > c->ssl->handler = ngx_stream_ssl_handshake_handler; > > @@ -470,7 +470,7 @@ ngx_stream_ssl_servername(ngx_ssl_conn_t > const char *servername; > ngx_connection_t *c; > ngx_stream_session_t *s; > - ngx_stream_ssl_conf_t *sscf; > + ngx_stream_ssl_srv_conf_t *sscf; > ngx_stream_core_srv_conf_t *cscf; > > c = ngx_ssl_get_connection(ssl_conn); > @@ -625,7 +625,7 @@ ngx_stream_ssl_certificate(ngx_ssl_conn_ > ngx_uint_t i, nelts; > ngx_connection_t *c; > ngx_stream_session_t *s; > - ngx_stream_ssl_conf_t *sslcf; > + ngx_stream_ssl_srv_conf_t *sscf; > ngx_stream_complex_value_t *certs, *keys; > > c = ngx_ssl_get_connection(ssl_conn); > @@ -636,11 +636,11 @@ ngx_stream_ssl_certificate(ngx_ssl_conn_ > > s = c->data; > > - sslcf = arg; > + sscf = arg; > > - nelts = sslcf->certificate_values->nelts; > - certs = sslcf->certificate_values->elts; > - keys = sslcf->certificate_key_values->elts; > + nelts = sscf->certificate_values->nelts; > + certs = sscf->certificate_values->elts; > + keys = sscf->certificate_key_values->elts; > > for (i = 0; i < nelts; i++) { > > @@ -659,7 +659,7 @@ ngx_stream_ssl_certificate(ngx_ssl_conn_ > "ssl key: \"%s\"", key.data); > > if (ngx_ssl_connection_certificate(c, c->pool, &cert, &key, > - sslcf->passwords) > + sscf->passwords) > != NGX_OK) > { > return 0; > @@ -755,53 +755,53 @@ ngx_stream_ssl_add_variables(ngx_conf_t > > > static void * > -ngx_stream_ssl_create_conf(ngx_conf_t *cf) > +ngx_stream_ssl_create_srv_conf(ngx_conf_t *cf) > { > - ngx_stream_ssl_conf_t *scf; > + ngx_stream_ssl_srv_conf_t *sscf; > > - scf = ngx_pcalloc(cf->pool, sizeof(ngx_stream_ssl_conf_t)); > - if (scf == NULL) { > + sscf = ngx_pcalloc(cf->pool, sizeof(ngx_stream_ssl_srv_conf_t)); > + if (sscf == NULL) { > return NULL; > } > > /* > * set by ngx_pcalloc(): > * > - * scf->protocols = 0; > - * scf->certificate_values = NULL; > - * scf->dhparam = { 0, NULL }; > - * scf->ecdh_curve = { 0, NULL }; > - * scf->client_certificate = { 0, NULL }; > - * scf->trusted_certificate = { 0, NULL }; > - * scf->crl = { 0, NULL }; > - * scf->alpn = { 0, NULL }; > - * scf->ciphers = { 0, NULL }; > - * scf->shm_zone = NULL; > + * sscf->protocols = 0; > + * sscf->certificate_values = NULL; > + * sscf->dhparam = { 0, NULL }; > + * sscf->ecdh_curve = { 0, NULL }; > + * sscf->client_certificate = { 0, NULL }; > + * sscf->trusted_certificate = { 0, NULL }; > + * sscf->crl = { 0, NULL }; > + * sscf->alpn = { 0, NULL }; > + * sscf->ciphers = { 0, NULL }; > + * sscf->shm_zone = NULL; > */ > > - scf->handshake_timeout = NGX_CONF_UNSET_MSEC; > - scf->certificates = NGX_CONF_UNSET_PTR; > - scf->certificate_keys = NGX_CONF_UNSET_PTR; > - scf->passwords = NGX_CONF_UNSET_PTR; > - scf->conf_commands = NGX_CONF_UNSET_PTR; > - scf->prefer_server_ciphers = NGX_CONF_UNSET; > - scf->reject_handshake = NGX_CONF_UNSET; > - scf->verify = NGX_CONF_UNSET_UINT; > - scf->verify_depth = NGX_CONF_UNSET_UINT; > - scf->builtin_session_cache = NGX_CONF_UNSET; > - scf->session_timeout = NGX_CONF_UNSET; > - scf->session_tickets = NGX_CONF_UNSET; > - scf->session_ticket_keys = NGX_CONF_UNSET_PTR; > + sscf->handshake_timeout = NGX_CONF_UNSET_MSEC; > + sscf->certificates = NGX_CONF_UNSET_PTR; > + sscf->certificate_keys = NGX_CONF_UNSET_PTR; > + sscf->passwords = NGX_CONF_UNSET_PTR; > + sscf->conf_commands = NGX_CONF_UNSET_PTR; > + sscf->prefer_server_ciphers = NGX_CONF_UNSET; > + sscf->reject_handshake = NGX_CONF_UNSET; > + sscf->verify = NGX_CONF_UNSET_UINT; > + sscf->verify_depth = NGX_CONF_UNSET_UINT; > + sscf->builtin_session_cache = NGX_CONF_UNSET; > + sscf->session_timeout = NGX_CONF_UNSET; > + sscf->session_tickets = NGX_CONF_UNSET; > + sscf->session_ticket_keys = NGX_CONF_UNSET_PTR; > > - return scf; > + return sscf; > } > > > static char * > -ngx_stream_ssl_merge_conf(ngx_conf_t *cf, void *parent, void *child) > +ngx_stream_ssl_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child) > { > - ngx_stream_ssl_conf_t *prev = parent; > - ngx_stream_ssl_conf_t *conf = child; > + ngx_stream_ssl_srv_conf_t *prev = parent; > + ngx_stream_ssl_srv_conf_t *conf = child; > > ngx_pool_cleanup_t *cln; > > @@ -1010,7 +1010,7 @@ ngx_stream_ssl_merge_conf(ngx_conf_t *cf > > static ngx_int_t > ngx_stream_ssl_compile_certificates(ngx_conf_t *cf, > - ngx_stream_ssl_conf_t *conf) > + ngx_stream_ssl_srv_conf_t *conf) > { > ngx_str_t *cert, *key; > ngx_uint_t i, nelts; > @@ -1099,19 +1099,19 @@ found: > static char * > ngx_stream_ssl_password_file(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) > { > - ngx_stream_ssl_conf_t *scf = conf; > + ngx_stream_ssl_srv_conf_t *sscf = conf; > > ngx_str_t *value; > > - if (scf->passwords != NGX_CONF_UNSET_PTR) { > + if (sscf->passwords != NGX_CONF_UNSET_PTR) { > return "is duplicate"; > } > > value = cf->args->elts; > > - scf->passwords = ngx_ssl_read_password_file(cf, &value[1]); > + sscf->passwords = ngx_ssl_read_password_file(cf, &value[1]); > > - if (scf->passwords == NULL) { > + if (sscf->passwords == NULL) { > return NGX_CONF_ERROR; > } > > @@ -1122,7 +1122,7 @@ ngx_stream_ssl_password_file(ngx_conf_t > static char * > ngx_stream_ssl_session_cache(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) > { > - ngx_stream_ssl_conf_t *scf = conf; > + ngx_stream_ssl_srv_conf_t *sscf = conf; > > size_t len; > ngx_str_t *value, name, size; > @@ -1134,17 +1134,17 @@ ngx_stream_ssl_session_cache(ngx_conf_t > for (i = 1; i < cf->args->nelts; i++) { > > if (ngx_strcmp(value[i].data, "off") == 0) { > - scf->builtin_session_cache = NGX_SSL_NO_SCACHE; > + sscf->builtin_session_cache = NGX_SSL_NO_SCACHE; > continue; > } > > if (ngx_strcmp(value[i].data, "none") == 0) { > - scf->builtin_session_cache = NGX_SSL_NONE_SCACHE; > + sscf->builtin_session_cache = NGX_SSL_NONE_SCACHE; > continue; > } > > if (ngx_strcmp(value[i].data, "builtin") == 0) { > - scf->builtin_session_cache = NGX_SSL_DFLT_BUILTIN_SCACHE; > + sscf->builtin_session_cache = NGX_SSL_DFLT_BUILTIN_SCACHE; > continue; > } > > @@ -1159,7 +1159,7 @@ ngx_stream_ssl_session_cache(ngx_conf_t > goto invalid; > } > > - scf->builtin_session_cache = n; > + sscf->builtin_session_cache = n; > > continue; > } > @@ -1202,13 +1202,13 @@ ngx_stream_ssl_session_cache(ngx_conf_t > return NGX_CONF_ERROR; > } > > - scf->shm_zone = ngx_shared_memory_add(cf, &name, n, > + sscf->shm_zone = ngx_shared_memory_add(cf, &name, n, > &ngx_stream_ssl_module); > - if (scf->shm_zone == NULL) { > + if (sscf->shm_zone == NULL) { > return NGX_CONF_ERROR; > } > > - scf->shm_zone->init = ngx_ssl_session_cache_init; > + sscf->shm_zone->init = ngx_ssl_session_cache_init; > > continue; > } > @@ -1216,8 +1216,8 @@ ngx_stream_ssl_session_cache(ngx_conf_t > goto invalid; > } > > - if (scf->shm_zone && scf->builtin_session_cache == NGX_CONF_UNSET) { > - scf->builtin_session_cache = NGX_SSL_NO_BUILTIN_SCACHE; > + if (sscf->shm_zone && sscf->builtin_session_cache == NGX_CONF_UNSET) { > + sscf->builtin_session_cache = NGX_SSL_NO_BUILTIN_SCACHE; > } > > return NGX_CONF_OK; > @@ -1236,14 +1236,14 @@ ngx_stream_ssl_alpn(ngx_conf_t *cf, ngx_ > { > #ifdef TLSEXT_TYPE_application_layer_protocol_negotiation > > - ngx_stream_ssl_conf_t *scf = conf; > + ngx_stream_ssl_srv_conf_t *sscf = conf; > > u_char *p; > size_t len; > ngx_str_t *value; > ngx_uint_t i; > > - if (scf->alpn.len) { > + if (sscf->alpn.len) { > return "is duplicate"; > } > > @@ -1260,19 +1260,19 @@ ngx_stream_ssl_alpn(ngx_conf_t *cf, ngx_ > len += value[i].len + 1; > } > > - scf->alpn.data = ngx_pnalloc(cf->pool, len); > - if (scf->alpn.data == NULL) { > + sscf->alpn.data = ngx_pnalloc(cf->pool, len); > + if (sscf->alpn.data == NULL) { > return NGX_CONF_ERROR; > } > > - p = scf->alpn.data; > + p = sscf->alpn.data; > > for (i = 1; i < cf->args->nelts; i++) { > *p++ = value[i].len; > p = ngx_cpymem(p, value[i].data, value[i].len); > } > > - scf->alpn.len = len; > + sscf->alpn.len = len; > > return NGX_CONF_OK; > > @@ -1301,9 +1301,9 @@ ngx_stream_ssl_init(ngx_conf_t *cf) > { > ngx_uint_t a, p, s; > ngx_stream_handler_pt *h; > - ngx_stream_ssl_conf_t *sscf; > ngx_stream_conf_addr_t *addr; > ngx_stream_conf_port_t *port; > + ngx_stream_ssl_srv_conf_t *sscf; > ngx_stream_core_srv_conf_t **cscfp, *cscf; > ngx_stream_core_main_conf_t *cmcf; > > diff --git a/src/stream/ngx_stream_ssl_module.h b/src/stream/ngx_stream_ssl_module.h > --- a/src/stream/ngx_stream_ssl_module.h > +++ b/src/stream/ngx_stream_ssl_module.h > @@ -53,7 +53,7 @@ typedef struct { > > ngx_flag_t session_tickets; > ngx_array_t *session_ticket_keys; > -} ngx_stream_ssl_conf_t; > +} ngx_stream_ssl_srv_conf_t; > > > extern ngx_module_t ngx_stream_ssl_module; > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel Looks fine From xeioex at nginx.com Tue Jan 9 00:57:11 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 09 Jan 2024 00:57:11 +0000 Subject: [njs] Fixed Date constructor for overflows and with NaN values. Message-ID: details: https://hg.nginx.org/njs/rev/57071ecadeb5 branches: changeset: 2251:57071ecadeb5 user: Dmitry Volyntsev date: Mon Jan 08 16:40:27 2024 -0800 description: Fixed Date constructor for overflows and with NaN values. Found by UndefinedBehaviorSanitizer. diffstat: src/njs_date.c | 8 ++++++++ 1 files changed, 8 insertions(+), 0 deletions(-) diffs (23 lines): diff -r 4a15613f4e8b -r 57071ecadeb5 src/njs_date.c --- a/src/njs_date.c Tue Dec 19 12:37:05 2023 -0800 +++ b/src/njs_date.c Mon Jan 08 16:40:27 2024 -0800 @@ -243,11 +243,19 @@ njs_make_date(int64_t tm[], njs_bool_t l days = njs_make_day(tm[NJS_DATE_YR], tm[NJS_DATE_MON], tm[NJS_DATE_DAY]); + if (njs_slow_path(isnan(days))) { + return NAN; + } + time = ((tm[NJS_DATE_HR] * 60.0 + tm[NJS_DATE_MIN]) * 60.0 + tm[NJS_DATE_SEC]) * 1000.0 + tm[NJS_DATE_MSEC]; time += days * 86400000.0; + if (time < -8.64e15 || time > 8.64e15) { + return NAN; + } + if (local) { time += njs_tz_offset(time) * 60000; } From xeioex at nginx.com Tue Jan 9 00:57:13 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 09 Jan 2024 00:57:13 +0000 Subject: [njs] Unifying hash function prototypes. Message-ID: details: https://hg.nginx.org/njs/rev/721475693b80 branches: changeset: 2252:721475693b80 user: Dmitry Volyntsev date: Mon Jan 08 16:40:42 2024 -0800 description: Unifying hash function prototypes. This fixes UndefinedBehaviorSanitizer warning "call to function through pointer to incorrect function type". Found by UndefinedBehaviorSanitizer. diffstat: external/njs_crypto_module.c | 71 +++++++++++++++++-------------------------- src/njs_hash.h | 32 +++++++++++++++++++ src/njs_main.h | 4 +- src/njs_md5.c | 10 +++--- src/njs_md5.h | 23 -------------- src/njs_sha1.c | 10 +++--- src/njs_sha1.h | 24 -------------- src/njs_sha2.c | 10 +++--- src/njs_sha2.h | 24 -------------- 9 files changed, 77 insertions(+), 131 deletions(-) diffs (424 lines): diff -r 57071ecadeb5 -r 721475693b80 external/njs_crypto_module.c --- a/external/njs_crypto_module.c Mon Jan 08 16:40:27 2024 -0800 +++ b/external/njs_crypto_module.c Mon Jan 08 16:40:42 2024 -0800 @@ -6,16 +6,14 @@ #include -#include -#include -#include +#include #include #include -typedef void (*njs_hash_init)(void *ctx); -typedef void (*njs_hash_update)(void *ctx, const void *data, size_t size); -typedef void (*njs_hash_final)(u_char *result, void *ctx); +typedef void (*njs_hash_init)(njs_hash_t *ctx); +typedef void (*njs_hash_update)(njs_hash_t *ctx, const void *data, size_t size); +typedef void (*njs_hash_final)(u_char result[32], njs_hash_t *ctx); typedef njs_int_t (*njs_digest_encode)(njs_vm_t *vm, njs_value_t *value, const njs_str_t *src); @@ -31,24 +29,13 @@ typedef struct { } njs_hash_alg_t; typedef struct { - union { - njs_md5_t md5; - njs_sha1_t sha1; - njs_sha2_t sha2; - } u; - + njs_hash_t ctx; njs_hash_alg_t *alg; } njs_digest_t; typedef struct { u_char opad[64]; - - union { - njs_md5_t md5; - njs_sha1_t sha1; - njs_sha2_t sha2; - } u; - + njs_hash_t ctx; njs_hash_alg_t *alg; } njs_hmac_t; @@ -85,25 +72,25 @@ static njs_hash_alg_t njs_hash_algorithm { njs_str("md5"), 16, - (njs_hash_init) njs_md5_init, - (njs_hash_update) njs_md5_update, - (njs_hash_final) njs_md5_final + njs_md5_init, + njs_md5_update, + njs_md5_final }, { njs_str("sha1"), 20, - (njs_hash_init) njs_sha1_init, - (njs_hash_update) njs_sha1_update, - (njs_hash_final) njs_sha1_final + njs_sha1_init, + njs_sha1_update, + njs_sha1_final }, { njs_str("sha256"), 32, - (njs_hash_init) njs_sha2_init, - (njs_hash_update) njs_sha2_update, - (njs_hash_final) njs_sha2_final + njs_sha2_init, + njs_sha2_update, + njs_sha2_final }, { @@ -312,7 +299,7 @@ njs_crypto_create_hash(njs_vm_t *vm, njs dgst->alg = alg; - alg->init(&dgst->u); + alg->init(&dgst->ctx); return njs_vm_external_create(vm, retval, njs_crypto_hash_proto_id, dgst, 0); @@ -390,10 +377,10 @@ njs_hash_prototype_update(njs_vm_t *vm, } if (!hmac) { - dgst->alg->update(&dgst->u, data.start, data.length); + dgst->alg->update(&dgst->ctx, data.start, data.length); } else { - ctx->alg->update(&ctx->u, data.start, data.length); + ctx->alg->update(&ctx->ctx, data.start, data.length); } njs_value_assign(retval, this); @@ -450,17 +437,17 @@ njs_hash_prototype_digest(njs_vm_t *vm, if (!hmac) { alg = dgst->alg; - alg->final(digest, &dgst->u); + alg->final(digest, &dgst->ctx); dgst->alg = NULL; } else { alg = ctx->alg; - alg->final(hash1, &ctx->u); + alg->final(hash1, &ctx->ctx); - alg->init(&ctx->u); - alg->update(&ctx->u, ctx->opad, 64); - alg->update(&ctx->u, hash1, alg->size); - alg->final(digest, &ctx->u); + alg->init(&ctx->ctx); + alg->update(&ctx->ctx, ctx->opad, 64); + alg->update(&ctx->ctx, hash1, alg->size); + alg->final(digest, &ctx->ctx); ctx->alg = NULL; } @@ -562,9 +549,9 @@ njs_crypto_create_hmac(njs_vm_t *vm, njs ctx->alg = alg; if (key.length > sizeof(key_buf)) { - alg->init(&ctx->u); - alg->update(&ctx->u, key.start, key.length); - alg->final(digest, &ctx->u); + alg->init(&ctx->ctx); + alg->update(&ctx->ctx, key.start, key.length); + alg->final(digest, &ctx->ctx); memcpy(key_buf, digest, alg->size); njs_explicit_memzero(key_buf + alg->size, sizeof(key_buf) - alg->size); @@ -583,8 +570,8 @@ njs_crypto_create_hmac(njs_vm_t *vm, njs key_buf[i] ^= 0x36; } - alg->init(&ctx->u); - alg->update(&ctx->u, key_buf, 64); + alg->init(&ctx->ctx); + alg->update(&ctx->ctx, key_buf, 64); return njs_vm_external_create(vm, retval, njs_crypto_hmac_proto_id, ctx, 0); diff -r 57071ecadeb5 -r 721475693b80 src/njs_hash.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/njs_hash.h Mon Jan 08 16:40:42 2024 -0800 @@ -0,0 +1,32 @@ + +/* + * Copyright (C) Dmitry Volyntsev + * Copyright (C) NGINX, Inc. + */ + + +#ifndef _NJS_HASH_H_INCLUDED_ +#define _NJS_HASH_H_INCLUDED_ + + +typedef struct { + uint64_t bytes; + uint32_t a, b, c, d, e, f, g, h; + u_char buffer[64]; +} njs_hash_t; + + +NJS_EXPORT void njs_md5_init(njs_hash_t *ctx); +NJS_EXPORT void njs_md5_update(njs_hash_t *ctx, const void *data, size_t size); +NJS_EXPORT void njs_md5_final(u_char result[32], njs_hash_t *ctx); + +NJS_EXPORT void njs_sha1_init(njs_hash_t *ctx); +NJS_EXPORT void njs_sha1_update(njs_hash_t *ctx, const void *data, size_t size); +NJS_EXPORT void njs_sha1_final(u_char result[32], njs_hash_t *ctx); + +NJS_EXPORT void njs_sha2_init(njs_hash_t *ctx); +NJS_EXPORT void njs_sha2_update(njs_hash_t *ctx, const void *data, size_t size); +NJS_EXPORT void njs_sha2_final(u_char result[32], njs_hash_t *ctx); + + +#endif /* _NJS_HASH_H_INCLUDED_ */ diff -r 57071ecadeb5 -r 721475693b80 src/njs_main.h --- a/src/njs_main.h Mon Jan 08 16:40:27 2024 -0800 +++ b/src/njs_main.h Mon Jan 08 16:40:42 2024 -0800 @@ -41,9 +41,7 @@ #include -#include -#include -#include +#include #include #include diff -r 57071ecadeb5 -r 721475693b80 src/njs_md5.c --- a/src/njs_md5.c Mon Jan 08 16:40:27 2024 -0800 +++ b/src/njs_md5.c Mon Jan 08 16:40:42 2024 -0800 @@ -9,12 +9,12 @@ #include -static const u_char *njs_md5_body(njs_md5_t *ctx, const u_char *data, +static const u_char *njs_md5_body(njs_hash_t *ctx, const u_char *data, size_t size); void -njs_md5_init(njs_md5_t *ctx) +njs_md5_init(njs_hash_t *ctx) { ctx->a = 0x67452301; ctx->b = 0xefcdab89; @@ -26,7 +26,7 @@ njs_md5_init(njs_md5_t *ctx) void -njs_md5_update(njs_md5_t *ctx, const void *data, size_t size) +njs_md5_update(njs_hash_t *ctx, const void *data, size_t size) { size_t used, free; @@ -57,7 +57,7 @@ njs_md5_update(njs_md5_t *ctx, const voi void -njs_md5_final(u_char result[16], njs_md5_t *ctx) +njs_md5_final(u_char result[32], njs_hash_t *ctx) { size_t used, free; @@ -152,7 +152,7 @@ njs_md5_final(u_char result[16], njs_md5 */ static const u_char * -njs_md5_body(njs_md5_t *ctx, const u_char *data, size_t size) +njs_md5_body(njs_hash_t *ctx, const u_char *data, size_t size) { uint32_t a, b, c, d; uint32_t saved_a, saved_b, saved_c, saved_d; diff -r 57071ecadeb5 -r 721475693b80 src/njs_md5.h --- a/src/njs_md5.h Mon Jan 08 16:40:27 2024 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,23 +0,0 @@ - -/* - * Copyright (C) Igor Sysoev - * Copyright (C) Nginx, Inc. - */ - - -#ifndef _NJS_MD5_H_INCLUDED_ -#define _NJS_MD5_H_INCLUDED_ - - -typedef struct { - uint64_t bytes; - uint32_t a, b, c, d; - u_char buffer[64]; -} njs_md5_t; - - -NJS_EXPORT void njs_md5_init(njs_md5_t *ctx); -NJS_EXPORT void njs_md5_update(njs_md5_t *ctx, const void *data, size_t size); -NJS_EXPORT void njs_md5_final(u_char result[16], njs_md5_t *ctx); - -#endif /* _NJS_MD5_H_INCLUDED_ */ diff -r 57071ecadeb5 -r 721475693b80 src/njs_sha1.c --- a/src/njs_sha1.c Mon Jan 08 16:40:27 2024 -0800 +++ b/src/njs_sha1.c Mon Jan 08 16:40:42 2024 -0800 @@ -10,12 +10,12 @@ #include -static const u_char *njs_sha1_body(njs_sha1_t *ctx, const u_char *data, +static const u_char *njs_sha1_body(njs_hash_t *ctx, const u_char *data, size_t size); void -njs_sha1_init(njs_sha1_t *ctx) +njs_sha1_init(njs_hash_t *ctx) { ctx->a = 0x67452301; ctx->b = 0xefcdab89; @@ -28,7 +28,7 @@ njs_sha1_init(njs_sha1_t *ctx) void -njs_sha1_update(njs_sha1_t *ctx, const void *data, size_t size) +njs_sha1_update(njs_hash_t *ctx, const void *data, size_t size) { size_t used, free; @@ -59,7 +59,7 @@ njs_sha1_update(njs_sha1_t *ctx, const v void -njs_sha1_final(u_char result[20], njs_sha1_t *ctx) +njs_sha1_final(u_char result[32], njs_hash_t *ctx) { size_t used, free; @@ -152,7 +152,7 @@ njs_sha1_final(u_char result[20], njs_sh */ static const u_char * -njs_sha1_body(njs_sha1_t *ctx, const u_char *data, size_t size) +njs_sha1_body(njs_hash_t *ctx, const u_char *data, size_t size) { uint32_t a, b, c, d, e, temp; uint32_t saved_a, saved_b, saved_c, saved_d, saved_e; diff -r 57071ecadeb5 -r 721475693b80 src/njs_sha1.h --- a/src/njs_sha1.h Mon Jan 08 16:40:27 2024 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,24 +0,0 @@ - -/* - * Copyright (C) Igor Sysoev - * Copyright (C) NGINX, Inc. - */ - - -#ifndef _NJS_SHA1_H_INCLUDED_ -#define _NJS_SHA1_H_INCLUDED_ - - -typedef struct { - uint64_t bytes; - uint32_t a, b, c, d, e; - u_char buffer[64]; -} njs_sha1_t; - - -NJS_EXPORT void njs_sha1_init(njs_sha1_t *ctx); -NJS_EXPORT void njs_sha1_update(njs_sha1_t *ctx, const void *data, size_t size); -NJS_EXPORT void njs_sha1_final(u_char result[20], njs_sha1_t *ctx); - - -#endif /* _NJS_SHA1_H_INCLUDED_ */ diff -r 57071ecadeb5 -r 721475693b80 src/njs_sha2.c --- a/src/njs_sha2.c Mon Jan 08 16:40:27 2024 -0800 +++ b/src/njs_sha2.c Mon Jan 08 16:40:42 2024 -0800 @@ -10,12 +10,12 @@ #include -static const u_char *njs_sha2_body(njs_sha2_t *ctx, const u_char *data, +static const u_char *njs_sha2_body(njs_hash_t *ctx, const u_char *data, size_t size); void -njs_sha2_init(njs_sha2_t *ctx) +njs_sha2_init(njs_hash_t *ctx) { ctx->a = 0x6a09e667; ctx->b = 0xbb67ae85; @@ -31,7 +31,7 @@ njs_sha2_init(njs_sha2_t *ctx) void -njs_sha2_update(njs_sha2_t *ctx, const void *data, size_t size) +njs_sha2_update(njs_hash_t *ctx, const void *data, size_t size) { size_t used, free; @@ -62,7 +62,7 @@ njs_sha2_update(njs_sha2_t *ctx, const v void -njs_sha2_final(u_char result[32], njs_sha2_t *ctx) +njs_sha2_final(u_char result[32], njs_hash_t *ctx) { size_t used, free; @@ -172,7 +172,7 @@ njs_sha2_final(u_char result[32], njs_sh */ static const u_char * -njs_sha2_body(njs_sha2_t *ctx, const u_char *data, size_t size) +njs_sha2_body(njs_hash_t *ctx, const u_char *data, size_t size) { uint32_t a, b, c, d, e, f, g, h, s0, s1, temp1, temp2; uint32_t saved_a, saved_b, saved_c, saved_d, saved_e, saved_f, diff -r 57071ecadeb5 -r 721475693b80 src/njs_sha2.h --- a/src/njs_sha2.h Mon Jan 08 16:40:27 2024 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,24 +0,0 @@ - -/* - * Copyright (C) Dmitry Volyntsev - * Copyright (C) NGINX, Inc. - */ - - -#ifndef _NJS_SHA2_H_INCLUDED_ -#define _NJS_SHA2_H_INCLUDED_ - - -typedef struct { - uint64_t bytes; - uint32_t a, b, c, d, e, f, g, h; - u_char buffer[64]; -} njs_sha2_t; - - -NJS_EXPORT void njs_sha2_init(njs_sha2_t *ctx); -NJS_EXPORT void njs_sha2_update(njs_sha2_t *ctx, const void *data, size_t size); -NJS_EXPORT void njs_sha2_final(u_char result[32], njs_sha2_t *ctx); - - -#endif /* _NJS_SHA2_H_INCLUDED_ */ From xeioex at nginx.com Tue Jan 9 00:57:15 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 09 Jan 2024 00:57:15 +0000 Subject: [njs] Fixed external values initialization in unit tests. Message-ID: details: https://hg.nginx.org/njs/rev/9fadb2e9c6ea branches: changeset: 2253:9fadb2e9c6ea user: Dmitry Volyntsev date: Mon Jan 08 16:40:42 2024 -0800 description: Fixed external values initialization in unit tests. Since 0.8.0 modules can create their own constructors and prototypes. A modules has two method: preinit() and init(). A module should add its constructors and prototypes in preinit() and create its own values in init(). Creating a value in preinit() results in an error. The patch fixes the issue by creating an external value in init() instead of preinit(). Found by UndefinedBehaviorSanitizer. diffstat: src/test/njs_externals_test.c | 136 ++++++++++++++++++++++------------------- 1 files changed, 72 insertions(+), 64 deletions(-) diffs (176 lines): diff -r 721475693b80 -r 9fadb2e9c6ea src/test/njs_externals_test.c --- a/src/test/njs_externals_test.c Mon Jan 08 16:40:42 2024 -0800 +++ b/src/test/njs_externals_test.c Mon Jan 08 16:40:42 2024 -0800 @@ -28,6 +28,7 @@ typedef struct { static njs_int_t njs_externals_262_init(njs_vm_t *vm); static njs_int_t njs_externals_shared_preinit(njs_vm_t *vm); +static njs_int_t njs_externals_shared_init(njs_vm_t *vm); njs_int_t njs_array_buffer_detach(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval); @@ -47,7 +48,7 @@ njs_module_t njs_unit_test_262_module = njs_module_t njs_unit_test_external_module = { .name = njs_str("external"), .preinit = njs_externals_shared_preinit, - .init = NULL, + .init = njs_externals_shared_init, }; @@ -1253,77 +1254,15 @@ njs_externals_init_internal(njs_vm_t *vm { njs_int_t ret; njs_uint_t i, j; - njs_function_t *f; - njs_opaque_value_t value; njs_unit_test_req_t *requests; njs_unit_test_prop_t *prop; - static const njs_str_t external_ctor = njs_str("ExternalConstructor"); - static const njs_str_t external_null = njs_str("ExternalNull"); - static const njs_str_t external_error = njs_str("ExternalError"); - - if (shared) { - njs_external_r_proto_id = njs_vm_external_prototype(vm, - njs_unit_test_r_external, - njs_nitems(njs_unit_test_r_external)); - if (njs_slow_path(njs_external_r_proto_id < 0)) { - njs_printf("njs_vm_external_prototype() failed\n"); - return NJS_ERROR; - } - - f = njs_vm_function_alloc(vm, njs_unit_test_constructor, 1, 1); - if (f == NULL) { - njs_printf("njs_vm_function_alloc() failed\n"); - return NJS_ERROR; - } - - njs_value_function_set(njs_value_arg(&value), f); - - ret = njs_vm_bind(vm, &external_ctor, njs_value_arg(&value), 1); - if (njs_slow_path(ret != NJS_OK)) { - njs_printf("njs_vm_bind() failed\n"); - return NJS_ERROR; - } - - njs_external_null_proto_id = njs_vm_external_prototype(vm, - njs_unit_test_null_external, - njs_nitems(njs_unit_test_null_external)); - if (njs_slow_path(njs_external_null_proto_id < 0)) { - njs_printf("njs_vm_external_prototype() failed\n"); - return NJS_ERROR; - } - - ret = njs_vm_external_create(vm, njs_value_arg(&value), - njs_external_null_proto_id, NULL, 1); - if (njs_slow_path(ret != NJS_OK)) { - return NJS_ERROR; - } - - ret = njs_vm_bind(vm, &external_null, njs_value_arg(&value), 1); - if (njs_slow_path(ret != NJS_OK)) { - njs_printf("njs_vm_bind() failed\n"); - return NJS_ERROR; - } - - njs_external_error_ctor_id = - njs_vm_external_constructor(vm, &external_error, - njs_error_constructor, njs_unit_test_ctor_props, - njs_nitems(njs_unit_test_ctor_props), - njs_unit_test_proto_props, - njs_nitems(njs_unit_test_proto_props)); - if (njs_slow_path(njs_external_error_ctor_id < 0)) { - njs_printf("njs_vm_external_constructor() failed\n"); - return NJS_ERROR; - } - } - requests = njs_mp_zalloc(vm->mem_pool, n * sizeof(njs_unit_test_req_t)); if (njs_slow_path(requests == NULL)) { return NJS_ERROR; } for (i = 0; i < n; i++) { - requests[i] = init[i].request; ret = njs_vm_external_create(vm, njs_value_arg(&requests[i].value), @@ -1396,7 +1335,76 @@ njs_externals_262_init(njs_vm_t *vm) static njs_int_t njs_externals_shared_preinit(njs_vm_t *vm) { - return njs_externals_init_internal(vm, njs_test_requests, 1, 1); + static const njs_str_t external_error = njs_str("ExternalError"); + + njs_external_r_proto_id = njs_vm_external_prototype(vm, + njs_unit_test_r_external, + njs_nitems(njs_unit_test_r_external)); + if (njs_slow_path(njs_external_r_proto_id < 0)) { + njs_printf("njs_vm_external_prototype() failed\n"); + return NJS_ERROR; + } + + njs_external_null_proto_id = njs_vm_external_prototype(vm, + njs_unit_test_null_external, + njs_nitems(njs_unit_test_null_external)); + if (njs_slow_path(njs_external_null_proto_id < 0)) { + njs_printf("njs_vm_external_prototype() failed\n"); + return NJS_ERROR; + } + + njs_external_error_ctor_id = + njs_vm_external_constructor(vm, &external_error, + njs_error_constructor, njs_unit_test_ctor_props, + njs_nitems(njs_unit_test_ctor_props), + njs_unit_test_proto_props, + njs_nitems(njs_unit_test_proto_props)); + if (njs_slow_path(njs_external_error_ctor_id < 0)) { + njs_printf("njs_vm_external_constructor() failed\n"); + return NJS_ERROR; + } + + return NJS_OK; +} + + +static njs_int_t +njs_externals_shared_init(njs_vm_t *vm) +{ + njs_int_t ret; + njs_function_t *f; + njs_opaque_value_t value; + + static const njs_str_t external_ctor = njs_str("ExternalConstructor"); + static const njs_str_t external_null = njs_str("ExternalNull"); + + f = njs_vm_function_alloc(vm, njs_unit_test_constructor, 1, 1); + if (f == NULL) { + njs_printf("njs_vm_function_alloc() failed\n"); + return NJS_ERROR; + } + + njs_value_function_set(njs_value_arg(&value), f); + + ret = njs_vm_bind(vm, &external_ctor, njs_value_arg(&value), 1); + if (njs_slow_path(ret != NJS_OK)) { + njs_printf("njs_vm_bind() failed\n"); + return NJS_ERROR; + } + + ret = njs_vm_external_create(vm, njs_value_arg(&value), + njs_external_null_proto_id, NULL, 1); + if (njs_slow_path(ret != NJS_OK)) { + return NJS_ERROR; + } + + ret = njs_vm_bind(vm, &external_null, njs_value_arg(&value), 1); + if (njs_slow_path(ret != NJS_OK)) { + njs_printf("njs_vm_bind() failed\n"); + return NJS_ERROR; + } + + return njs_externals_init_internal(vm, &njs_test_requests[0], 1, 1); } From xeioex at nginx.com Tue Jan 9 00:57:17 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 09 Jan 2024 00:57:17 +0000 Subject: [njs] QueryString: fixed underflow in parse(). Message-ID: details: https://hg.nginx.org/njs/rev/c43745da92cd branches: changeset: 2254:c43745da92cd user: Dmitry Volyntsev date: Mon Jan 08 16:40:42 2024 -0800 description: QueryString: fixed underflow in parse(). Previously, njs_query_string_append() might be provided with invalid val_size value when value in a key-value pair was absent. Found by UndefinedBehaviorSanitizer. diffstat: external/njs_query_string_module.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 9fadb2e9c6ea -r c43745da92cd external/njs_query_string_module.c --- a/external/njs_query_string_module.c Mon Jan 08 16:40:42 2024 -0800 +++ b/external/njs_query_string_module.c Mon Jan 08 16:40:42 2024 -0800 @@ -506,7 +506,7 @@ njs_query_string_parser(njs_vm_t *vm, u_ size = val - key; - if (val != end) { + if (val != part) { val += eq->length; } From xeioex at nginx.com Tue Jan 9 00:57:19 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 09 Jan 2024 00:57:19 +0000 Subject: [njs] Fixed initialization of external prototypes with object entry. Message-ID: details: https://hg.nginx.org/njs/rev/ee4d396aa418 branches: changeset: 2255:ee4d396aa418 user: Dmitry Volyntsev date: Mon Jan 08 16:40:42 2024 -0800 description: Fixed initialization of external prototypes with object entry. When external was NULL (for example, when .u.object.properties is not declared), an arithmetic operation was performed with NULL pointer which is undefined behavior. Found by UndefinedBehaviorSanitizer. diffstat: src/njs_extern.c | 4 ++++ 1 files changed, 4 insertions(+), 0 deletions(-) diffs (14 lines): diff -r c43745da92cd -r ee4d396aa418 src/njs_extern.c --- a/src/njs_extern.c Mon Jan 08 16:40:42 2024 -0800 +++ b/src/njs_extern.c Mon Jan 08 16:40:42 2024 -0800 @@ -34,6 +34,10 @@ njs_external_add(njs_vm_t *vm, njs_arr_t hash = &slot->external_shared_hash; njs_lvlhsh_init(hash); + if (n == 0) { + return NJS_OK; + } + lhq.replace = 0; lhq.proto = &njs_object_hash_proto; lhq.pool = vm->mem_pool; From xeioex at nginx.com Tue Jan 9 00:57:21 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 09 Jan 2024 00:57:21 +0000 Subject: [njs] Improved array enumeration with length 0. Message-ID: details: https://hg.nginx.org/njs/rev/41d0de3ad198 branches: changeset: 2256:41d0de3ad198 user: Dmitry Volyntsev date: Mon Jan 08 16:40:42 2024 -0800 description: Improved array enumeration with length 0. The fix eliminates an arithmetic operation with NULL pointer. Found by UndefinedBehaviorSanitizer. diffstat: src/njs_object.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r ee4d396aa418 -r 41d0de3ad198 src/njs_object.c --- a/src/njs_object.c Mon Jan 08 16:40:42 2024 -0800 +++ b/src/njs_object.c Mon Jan 08 16:40:42 2024 -0800 @@ -591,7 +591,7 @@ njs_object_enumerate_array(njs_vm_t *vm, njs_value_t *p, *start, *end; njs_array_t *entry; - if (!array->object.fast_array) { + if (!array->object.fast_array || array->length == 0) { return NJS_OK; } From xeioex at nginx.com Tue Jan 9 00:57:23 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 09 Jan 2024 00:57:23 +0000 Subject: [njs] Fixed RegExp.prototype.exec() when second argument is absent. Message-ID: details: https://hg.nginx.org/njs/rev/275d785ab5bf branches: changeset: 2257:275d785ab5bf user: Dmitry Volyntsev date: Mon Jan 08 16:40:42 2024 -0800 description: Fixed RegExp.prototype.exec() when second argument is absent. Previously, when the second argument is undefined, NaN is casted to unsigned which is undefined behavior. Found by UndefinedBehaviorSanitizer. diffstat: src/njs_regexp.c | 11 +++++++++-- 1 files changed, 9 insertions(+), 2 deletions(-) diffs (28 lines): diff -r 41d0de3ad198 -r 275d785ab5bf src/njs_regexp.c --- a/src/njs_regexp.c Mon Jan 08 16:40:42 2024 -0800 +++ b/src/njs_regexp.c Mon Jan 08 16:40:42 2024 -0800 @@ -1235,6 +1235,7 @@ njs_int_t njs_regexp_prototype_exec(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval) { + unsigned flags; njs_int_t ret; njs_value_t *r, *s; njs_value_t string_lvalue; @@ -1253,8 +1254,14 @@ njs_regexp_prototype_exec(njs_vm_t *vm, return ret; } - return njs_regexp_builtin_exec(vm, r, s, - njs_number(njs_arg(args, nargs, 2)), retval); + if (nargs > 2) { + flags = njs_number(njs_arg(args, nargs, 2)); + + } else { + flags = 0; + } + + return njs_regexp_builtin_exec(vm, r, s, flags, retval); } From mdounin at mdounin.ru Tue Jan 9 04:37:49 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 9 Jan 2024 07:37:49 +0300 Subject: [PATCH] SSL: raised limit for upstream session size In-Reply-To: References: Message-ID: Hello! On Tue, Dec 26, 2023 at 12:29:54AM +0400, Sergey Kandaurov wrote: > > On 23 Dec 2023, at 01:46, Maxim Dounin wrote: > > > > Hello! > > > > On Fri, Dec 22, 2023 at 06:28:34PM +0400, Sergey Kandaurov wrote: > > > >> # HG changeset patch > >> # User Sergey Kandaurov > >> # Date 1703255284 -14400 > >> # Fri Dec 22 18:28:04 2023 +0400 > >> # Node ID a463fb67e143c051fd373d1df94e5813a37d5cea > >> # Parent 44266e0651c44f530c4aa66e68c1b9464a9acee7 > >> SSL: raised limit for upstream session size. > >> > >> Unlike shared session cache used to store multiple client SSL sessions and > >> which may be per a single SSL connection, sessions saved from upstream are > >> per upstream server peer, so there is no such multiplier effect, but they > >> may be of noticeably larger size due to session tickets being used. > >> > >> It was observed that session tickets sent from JVM backends may result in > >> a decoded session size nearly the previous maximum session size limit of > >> 4096 or slightly beyond. Raising the limit allows to save such sessions. > > > > Session tickets are not expected to be larger than sessions > > itself, except by several bytes used for key identification and > > encryption overhead. I see no reasons why the limit should be > > different in different places. > > > > And 4096 for an SSL session looks a lot. The only justification I > > can assume here is an SSL session with the client certificate (or > > even certificate chain) being saved into the session. It might > > worth looking into what actually happens here. > > > > Indeed. Both local and peer certificate chains are serialized and > encrypted as part of constructing a session ticket. Per the original > change to support tickets, this is hardcoded and may not be adjusted: > https://hg.openjdk.org/jdk/jdk/rev/c2398053ee90#l4.352 > https://hg.openjdk.org/jdk/jdk/rev/c2398053ee90#l10.261 >From my limited understanding of the JDK code, at least peerCerts seems to contain only certificates actually sent by the client, which is understandable (links to Github, since hg.openjdk.org used to be unresponsive when writing this, and returned 504 for almost all requests): https://github.com/openjdk/jdk/blob/4fc6b0ffa4f771991a5ebd982b5133d2e364fdae/src/java.base/share/classes/sun/security/ssl/CertificateMessage.java#L416 But localCerts seems to be always set on the server side, with all the certificates being sent to the client: https://github.com/openjdk/jdk/blob/4fc6b0ffa4f771991a5ebd982b5133d2e364fdae/src/java.base/share/classes/sun/security/ssl/CertificateMessage.java#L265 This looks like an issue on the JDK side: there are no reasons why server certificates needs to be saved into the session on the server, as they are readily available on the server. Further, relevant saving code seems to be commented as "Client identity", which suggests that these might be saved unintentionally with an assumption that the code is only used on the client (OTOH, I don't see why the client needs its own certificates in the session as well). Do you have an affected JVM backend on hand to confirm it's indeed the case? I tend to think this needs to be reported to JDK developers. Their current code results in sending server certificate chain to the client at least two times (once in the handshake itself, and at least once in the ticket; not to mention that there can be more than one ticket in TLSv1.3), and they might reconsider doing this. (Funny enough, they seems to be using cache to deserialize certificates from such tickets, see https://bugs.openjdk.org/browse/JDK-8286433 for details.) Meanwhile, we can consider implementing a workaround on our side (that is, raising the limit, though I don't think there should be separate limits; also, I'm somewhat concerned about using 8k buffers on stack, we currently don't use anything larger than 4k) or instead focus on providing some guidance to users of affected JVM backends (I guess switching off tickets and/or TLSv1.3 should be enough in most cases). -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Jan 9 05:59:14 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 9 Jan 2024 08:59:14 +0300 Subject: [PATCH 4 of 4] AIO operations now add timers (ticket #2162) In-Reply-To: References: <00c3e7333145ddb5ea0e.1701053427@vm-bsd.mdounin.ru> Message-ID: Hello! On Mon, Jan 08, 2024 at 01:31:11PM +0000, J Carter wrote: > On Mon, 8 Jan 2024 11:25:55 +0000 > J Carter wrote: > > > Hello, > > > > On Mon, 27 Nov 2023 05:50:27 +0300 > > Maxim Dounin wrote: > > > > > # HG changeset patch > > > # User Maxim Dounin > > > # Date 1701050170 -10800 > > > # Mon Nov 27 04:56:10 2023 +0300 > > > # Node ID 00c3e7333145ddb5ea0eeaaa66b3d9c26973c9c2 > > > # Parent 61d08e4cf97cc073200ec32fc6ada9a2d48ffe51 > > > AIO operations now add timers (ticket #2162). > > > > > > Each AIO (thread IO) operation being run is now accompanied with 1-minute > > > timer. This timer prevents unexpected shutdown of the worker process while > > > an AIO operation is running, and logs an alert if the operation is running > > > for too long. > > > > Shouldn't this timer's duration be set to match worker_shutdown_timeout's > > duration rather than being hard coded to 60s ? > > Ah nevermind, I understand. > > These timers will either expire from passing the 60s set duration, or > will expire as worker_process_timeout itself expires, kills the > connection and times out associated timers (including the aio timers). > > Setting it to worker_shutdown_timeout's duration would be pointless > (an 'infinite' timer would give the same result). > > So the only situation in which a different value for these AIO > timers would make sense is if these AIO operations are expected to > take longer 60s, but less than worker_shutdown_timeout (in cases > where it has been increased from it's own default of 60s). > > In that case the AIO operation's timeout would have to be one > (or more) of it's own directives, with a value less than > worker_shutdown_timeout. Not really. When worker_shutdown_timeout expires, it tries to terminate the request, but it can't as long as an AIO operation is running. When the AIO operation completes, the request will be actually terminated and the worker process will be allowed to exit. So far so good. But if the AIO operation never completes, the timer will expire after 1 minute, will log an alert, and the worker processes will be anyway allowed to exit (with the request still present). This might not be actually possible though - for example, ngx_thread_pool_exit_worker() will just block waiting for all pending operations to complete. In theory, the situation when an AIO operation never completes should never happen, and just a counter of pending AIO operations can be used instead to delay shutdown (which is essentially equivalent to an infinite timer). In practice, though, using a large enough guard timer might be better: it provides additional level of protection against bugs or system misbehaviour, and at least logs an alert if something really weird happens. It is also looks more universal and in line with current approach of using existing timers as an indicator that something is going on and shutdown should be delayed. The timer is expected to be "large enough", since we cannot do anything meaningful with an AIO operation which never completes, we can only complain loudly, so the timer should never expire unless there is something really wrong. This is not the case with worker_shutdown_timeout: it can be set to an arbitrary low value, which is not expected to mean that something is really wrong if the timer expires, but rather means that nginx shouldn't try to process remaining requests, but should instead close them as long as it can do so. That is, worker_shutdown_timeout does not fit semantically. Further, worker_shutdown_timeout is not set by default, so it simply cannot be used. The 1 minute was chosen as it matches default send_timeout, which typically accompanies AIO operations when sending responses (and also delays shutdown, so no "open socket left" alerts are normally seen). Still, send_timeout is also quite different semantically, and therefore a hardcoded value is used instead. I don't think there are valid cases when AIO operations can take longer than 1 minute, as these are considered to be small and fast operations, which are normally done synchronously within nginx event loop when not using AIO, and an operations which takes 1m would mean nginx is completely unresponsive. Still, if we'll found out that 1 minute is not large enough in some cases, we can just bump it to a larger value. [...] -- Maxim Dounin http://mdounin.ru/ From xeioex at nginx.com Tue Jan 9 06:16:17 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 09 Jan 2024 06:16:17 +0000 Subject: [njs] Ignoring UndefinedBehaviorSanitizer warnings where appropriate. Message-ID: details: https://hg.nginx.org/njs/rev/0490f1ae4cf5 branches: changeset: 2258:0490f1ae4cf5 user: Dmitry Volyntsev date: Sun Jul 30 10:21:51 2023 +0100 description: Ignoring UndefinedBehaviorSanitizer warnings where appropriate. Prodded by David Carlier and Ben Kallus. diffstat: auto/clang | 11 +++++++++++ src/njs_clang.h | 25 +++++++++++++++++++++++++ src/njs_number.c | 4 +++- src/njs_number.h | 2 +- src/njs_typed_array.c | 2 +- 5 files changed, 41 insertions(+), 3 deletions(-) diffs (94 lines): diff -r 275d785ab5bf -r 0490f1ae4cf5 auto/clang --- a/auto/clang Mon Jan 08 16:40:42 2024 -0800 +++ b/auto/clang Sun Jul 30 10:21:51 2023 +0100 @@ -161,6 +161,17 @@ njs_feature_test="int main(int argc, cha . auto/feature +njs_feature="GCC __attribute__ no_sanitize" +njs_feature_name=NJS_HAVE_GCC_ATTRIBUTE_NO_SANITIZE +njs_feature_run=no +njs_feature_path= +njs_feature_libs= +njs_feature_test="__attribute__((no_sanitize(\"undefined\"))) int main(void) { + return 0; + }" +. auto/feature + + njs_feature="Address sanitizer" njs_feature_name=NJS_HAVE_ADDRESS_SANITIZER njs_feature_run=no diff -r 275d785ab5bf -r 0490f1ae4cf5 src/njs_clang.h --- a/src/njs_clang.h Mon Jan 08 16:40:42 2024 -0800 +++ b/src/njs_clang.h Sun Jul 30 10:21:51 2023 +0100 @@ -183,6 +183,31 @@ njs_leading_zeros64(uint64_t x) #define njs_msan_unpoison(ptr, size) #endif +#if (NJS_HAVE_GCC_ATTRIBUTE_NO_SANITIZE) +#define NJS_NOSANITIZE(options) __attribute__((no_sanitize(options))) +#else +#define NJS_NOSANITIZE(options) +#endif + + +njs_inline NJS_NOSANITIZE("float-cast-overflow") int64_t +njs_unsafe_cast_double_to_int64(double num) +{ + /* + * Casting NaN to integer is undefined behavior, + * but it is fine in some cases where we do additional checks later. + * For example: + * int64_t i64 = njs_unsafe_cast_double_to_int64(num); + * if (i64 == num) { + * // num is integer + * } + * + * We do this as inline function to avoid UndefinedBehaviorSanitizer + * warnings. + */ + return (int64_t) num; +} + #if (NJS_HAVE_DENORMALS_CONTROL) #include diff -r 275d785ab5bf -r 0490f1ae4cf5 src/njs_number.c --- a/src/njs_number.c Mon Jan 08 16:40:42 2024 -0800 +++ b/src/njs_number.c Sun Jul 30 10:21:51 2023 +0100 @@ -382,7 +382,9 @@ njs_number_is_safe_integer(njs_vm_t *vm, if (nargs > 1 && njs_is_number(&args[1])) { num = njs_number(&args[1]); - if (num == (int64_t) num && fabs(num) <= NJS_MAX_SAFE_INTEGER) { + if (num == njs_unsafe_cast_double_to_int64(num) + && fabs(num) <= NJS_MAX_SAFE_INTEGER) + { integer = 1; } } diff -r 275d785ab5bf -r 0490f1ae4cf5 src/njs_number.h --- a/src/njs_number.h Mon Jan 08 16:40:42 2024 -0800 +++ b/src/njs_number.h Sun Jul 30 10:21:51 2023 +0100 @@ -36,7 +36,7 @@ njs_int_t njs_number_parse_float(njs_vm_ njs_uint_t nargs, njs_index_t unused, njs_value_t *retval); -njs_inline njs_bool_t +njs_inline NJS_NOSANITIZE("float-cast-overflow") njs_bool_t njs_number_is_integer_index(double num) { uint32_t u32; diff -r 275d785ab5bf -r 0490f1ae4cf5 src/njs_typed_array.c --- a/src/njs_typed_array.c Mon Jan 08 16:40:42 2024 -0800 +++ b/src/njs_typed_array.c Sun Jul 30 10:21:51 2023 +0100 @@ -1388,7 +1388,7 @@ njs_typed_array_prototype_index_of(njs_v v = njs_number(njs_argument(args, 1)); - i64 = v; + i64 = njs_unsafe_cast_double_to_int64(v); integer = (v == i64); buffer = array->buffer; From arut at nginx.com Tue Jan 9 13:14:07 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 9 Jan 2024 17:14:07 +0400 Subject: [PATCH 2 of 6] Overhauled some diagnostic messages missed in 1b05b9bbcebf In-Reply-To: References: Message-ID: <20240109131407.d6sz6d3rzscsuvwc@N00W24XTQX> Hi, On Fri, Dec 15, 2023 at 07:37:45PM +0400, Sergey Kandaurov wrote: > # HG changeset patch > # User Sergey Kandaurov > # Date 1702647536 -14400 > # Fri Dec 15 17:38:56 2023 +0400 > # Node ID de11f5373157db6c1e22dbad2ab4014143a5e8f8 > # Parent cb377d36446e1ce22b71848a4a138564b2e38719 > Overhauled some diagnostic messages missed in 1b05b9bbcebf. The commit dates back to 2011 when Stream did not exist yet. So technically saying "missed" is not quite correct. But since Stream is copied from Mail, that's ok. > diff --git a/src/http/modules/ngx_http_referer_module.c b/src/http/modules/ngx_http_referer_module.c > --- a/src/http/modules/ngx_http_referer_module.c > +++ b/src/http/modules/ngx_http_referer_module.c > @@ -631,7 +631,7 @@ ngx_http_add_regex_referer(ngx_conf_t *c > #else > > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > - "the using of the regex \"%V\" requires PCRE library", > + "using regex \"%V\" requires PCRE library", > name); > > return NGX_ERROR; > diff --git a/src/http/modules/ngx_http_ssi_filter_module.c b/src/http/modules/ngx_http_ssi_filter_module.c > --- a/src/http/modules/ngx_http_ssi_filter_module.c > +++ b/src/http/modules/ngx_http_ssi_filter_module.c > @@ -2001,7 +2001,7 @@ ngx_http_ssi_regex_match(ngx_http_reques > #else > > ngx_log_error(NGX_LOG_ALERT, r->connection->log, 0, > - "the using of the regex \"%V\" in SSI requires PCRE library", > + "using regex \"%V\" in SSI requires PCRE library", > pattern); > return NGX_HTTP_SSI_ERROR; > > diff --git a/src/mail/ngx_mail_core_module.c b/src/mail/ngx_mail_core_module.c > --- a/src/mail/ngx_mail_core_module.c > +++ b/src/mail/ngx_mail_core_module.c > @@ -441,7 +441,7 @@ ngx_mail_core_listen(ngx_conf_t *cf, ngx > continue; > #else > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > - "bind ipv6only is not supported " > + "ipv6only is not supported " > "on this platform"); > return NGX_CONF_ERROR; > #endif > @@ -564,7 +564,7 @@ ngx_mail_core_listen(ngx_conf_t *cf, ngx > } > > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > - "the invalid \"%V\" parameter", &value[i]); > + "invalid \"%V\" parameter", &value[i]); > return NGX_CONF_ERROR; > } > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -1008,7 +1008,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > continue; > #else > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > - "bind ipv6only is not supported " > + "ipv6only is not supported " > "on this platform"); > return NGX_CONF_ERROR; > #endif > @@ -1136,7 +1136,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > } > > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > - "the invalid \"%V\" parameter", &value[i]); > + "invalid \"%V\" parameter", &value[i]); > return NGX_CONF_ERROR; > } > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel Looks fine From arut at nginx.com Tue Jan 9 13:17:18 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 9 Jan 2024 17:17:18 +0400 Subject: [PATCH 3 of 6] Stream: reshuffled ngx_stream_listen_opt_t fields In-Reply-To: <4d90cb223fdb9e3e6c14.1702654666@enoparse.local> References: <4d90cb223fdb9e3e6c14.1702654666@enoparse.local> Message-ID: <20240109131718.b4bzfjojnl6ymowc@N00W24XTQX> Hi, On Fri, Dec 15, 2023 at 07:37:46PM +0400, Sergey Kandaurov wrote: > # HG changeset patch > # User Sergey Kandaurov > # Date 1702648226 -14400 > # Fri Dec 15 17:50:26 2023 +0400 > # Node ID 4d90cb223fdb9e3e6c148726e36cec7835b2f0f8 > # Parent de11f5373157db6c1e22dbad2ab4014143a5e8f8 > Stream: reshuffled ngx_stream_listen_opt_t fields. > > In preparation for adding more parameters to the listen directive, > and to be in sync with the corresponding structure in the http module. > No functional changes. > > diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h > --- a/src/stream/ngx_stream.h > +++ b/src/stream/ngx_stream.h > @@ -56,18 +56,19 @@ typedef struct { > unsigned reuseport:1; > unsigned so_keepalive:2; > unsigned proxy_protocol:1; > + > + int backlog; > + int rcvbuf; > + int sndbuf; > + int type; > +#if (NGX_HAVE_TCP_FASTOPEN) > + int fastopen; > +#endif > #if (NGX_HAVE_KEEPALIVE_TUNABLE) > int tcp_keepidle; > int tcp_keepintvl; > int tcp_keepcnt; > #endif > - int backlog; > - int rcvbuf; > - int sndbuf; > -#if (NGX_HAVE_TCP_FASTOPEN) > - int fastopen; > -#endif > - int type; > } ngx_stream_listen_opt_t; > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel Looks fine From jordanc.carter at outlook.com Tue Jan 9 15:01:31 2024 From: jordanc.carter at outlook.com (J Carter) Date: Tue, 9 Jan 2024 15:01:31 +0000 Subject: [PATCH 4 of 4] AIO operations now add timers (ticket #2162) In-Reply-To: References: <00c3e7333145ddb5ea0e.1701053427@vm-bsd.mdounin.ru> Message-ID: Hello, On Tue, 9 Jan 2024 08:59:14 +0300 Maxim Dounin wrote: > Hello! > > On Mon, Jan 08, 2024 at 01:31:11PM +0000, J Carter wrote: > > > On Mon, 8 Jan 2024 11:25:55 +0000 > > J Carter wrote: > > > > > Hello, > > > > > > On Mon, 27 Nov 2023 05:50:27 +0300 > > > Maxim Dounin wrote: > > > > > > > # HG changeset patch > > > > # User Maxim Dounin > > > > # Date 1701050170 -10800 > > > > # Mon Nov 27 04:56:10 2023 +0300 > > > > # Node ID 00c3e7333145ddb5ea0eeaaa66b3d9c26973c9c2 > > > > # Parent 61d08e4cf97cc073200ec32fc6ada9a2d48ffe51 > > > > AIO operations now add timers (ticket #2162). > > > > > > > > Each AIO (thread IO) operation being run is now accompanied with 1-minute > > > > timer. This timer prevents unexpected shutdown of the worker process while > > > > an AIO operation is running, and logs an alert if the operation is running > > > > for too long. > > > > > > Shouldn't this timer's duration be set to match worker_shutdown_timeout's > > > duration rather than being hard coded to 60s ? > > > > Ah nevermind, I understand. > > > > These timers will either expire from passing the 60s set duration, or > > will expire as worker_process_timeout itself expires, kills the > > connection and times out associated timers (including the aio timers). > > > > Setting it to worker_shutdown_timeout's duration would be pointless > > (an 'infinite' timer would give the same result). > > > > So the only situation in which a different value for these AIO > > timers would make sense is if these AIO operations are expected to > > take longer 60s, but less than worker_shutdown_timeout (in cases > > where it has been increased from it's own default of 60s). > > > > In that case the AIO operation's timeout would have to be one > > (or more) of it's own directives, with a value less than > > worker_shutdown_timeout. > > Not really. > > When worker_shutdown_timeout expires, it tries to terminate the > request, but it can't as long as an AIO operation is running. > When the AIO operation completes, the request will be actually > terminated and the worker process will be allowed to exit. So far > so good. > > But if the AIO operation never completes, the timer will expire > after 1 minute, will log an alert, and the worker processes will > be anyway allowed to exit (with the request still present). This > might not be actually possible though - for example, > ngx_thread_pool_exit_worker() will just block waiting for all > pending operations to complete. > > In theory, the situation when an AIO operation never completes > should never happen, and just a counter of pending AIO > operations can be used instead to delay shutdown (which is > essentially equivalent to an infinite timer). > > In practice, though, using a large enough guard timer might be > better: it provides additional level of protection against bugs or > system misbehaviour, and at least logs an alert if something > really weird happens. It is also looks more universal and in line > with current approach of using existing timers as an indicator > that something is going on and shutdown should be delayed. > > The timer is expected to be "large enough", since we cannot do > anything meaningful with an AIO operation which never completes, > we can only complain loudly, so the timer should never expire > unless there is something really wrong. This is not the case with > worker_shutdown_timeout: it can be set to an arbitrary low value, > which is not expected to mean that something is really wrong if > the timer expires, but rather means that nginx shouldn't try to > process remaining requests, but should instead close them as long > as it can do so. That is, worker_shutdown_timeout does not fit > semantically. Further, worker_shutdown_timeout is not set by > default, so it simply cannot be used. > Good point, for whatever reason I had it in my mind that was set set by default (and you're right it doesn't fit in any case). > The 1 minute was chosen as it matches default send_timeout, which > typically accompanies AIO operations when sending responses (and > also delays shutdown, so no "open socket left" alerts are normally > seen). Still, send_timeout is also quite different semantically, > and therefore a hardcoded value is used instead. > > I don't think there are valid cases when AIO operations can take > longer than 1 minute, as these are considered to be small and fast > operations, which are normally done synchronously within nginx > event loop when not using AIO, and an operations which takes 1m > would mean nginx is completely unresponsive. Still, if we'll > found out that 1 minute is not large enough in some cases, > we can just bump it to a larger value. > > [...] > Thanks for the detailed explanation - makes sense to me. From arut at nginx.com Tue Jan 9 15:39:35 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 9 Jan 2024 19:39:35 +0400 Subject: [PATCH 4 of 6] Stream: the "deferred" parameter of the "listen" directive In-Reply-To: References: Message-ID: <20240109153935.syjw55gwb63t6hoa@N00W24XTQX> Hi, On Fri, Dec 15, 2023 at 07:37:47PM +0400, Sergey Kandaurov wrote: > # HG changeset patch > # User Sergey Kandaurov > # Date 1702650289 -14400 > # Fri Dec 15 18:24:49 2023 +0400 > # Node ID cca722e447f8beaaa6b41a620c8b4239a5d1aa7d > # Parent 4d90cb223fdb9e3e6c148726e36cec7835b2f0f8 > Stream: the "deferred" parameter of the "listen" directive. > > The Linux TCP_DEFER_ACCEPT support. > > diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c > --- a/src/stream/ngx_stream.c > +++ b/src/stream/ngx_stream.c > @@ -1021,6 +1021,10 @@ ngx_stream_add_listening(ngx_conf_t *cf, > ls->keepcnt = addr->opt.tcp_keepcnt; > #endif > > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > + ls->deferred_accept = addr->opt.deferred_accept; > +#endif > + > #if (NGX_HAVE_INET6) > ls->ipv6only = addr->opt.ipv6only; > #endif > diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h > --- a/src/stream/ngx_stream.h > +++ b/src/stream/ngx_stream.h > @@ -53,6 +53,7 @@ typedef struct { > #if (NGX_HAVE_INET6) > unsigned ipv6only:1; > #endif > + unsigned deferred_accept:1; > unsigned reuseport:1; > unsigned so_keepalive:2; > unsigned proxy_protocol:1; > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -987,6 +987,19 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > continue; > } > > + if (ngx_strcmp(value[i].data, "deferred") == 0) { > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > + lsopt.deferred_accept = 1; > + lsopt.set = 1; > + lsopt.bind = 1; > +#else > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "the deferred accept is not supported " > + "on this platform, ignored"); > +#endif > + continue; > + } > + > if (ngx_strncmp(value[i].data, "ipv6only=o", 10) == 0) { > #if (NGX_HAVE_INET6 && defined IPV6_V6ONLY) > if (ngx_strcmp(&value[i].data[10], "n") == 0) { We should trigger an error if this option (TCP_DEFER_ACCEPT) is set for UDP. We have a block "if (lsopt.type == SOCK_DGRAM) {}" later in this function. -- Roman Arutyunyan From benjamin.p.kallus.gr at dartmouth.edu Tue Jan 9 16:18:06 2024 From: benjamin.p.kallus.gr at dartmouth.edu (Ben Kallus) Date: Tue, 9 Jan 2024 16:18:06 +0000 Subject: Core: Avoid memcpy from NULL In-Reply-To: References: <6fb91d26f5e60149d7b98c3ad37a0683@sebres.de> Message-ID: > This demonstrates that your patch > is clearly insufficient. Further, Vladimir's patch is clearly > insufficient too, as shown for the another patch in the same > patch series. "Insufficient" only when compared to a hypothetical perfectly exhaustive patch that requires "huge work," as you put it. It's best not to let the perfect be the enemy of the good. Avoiding UB in normal program execution (as opposed to the test suite) will prevent common workloads from executing UB, which is not merely an issue of "theoretical correctness." See https://blog.regehr.org/archives/213 (section "A Fun Case Analysis") for an example of how this "NULL used in nonnull context" issue leads to unexpected program behavior. Thus, I think the best approach is to patch pstrdup to avoid memcpy-from-NULL, and patch other functions only if someone can present a backtrace from a real configuration of nginx that executed UB. -Ben -------------- next part -------------- An HTML attachment was scrubbed... URL: From xeioex at nginx.com Tue Jan 9 17:31:18 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 09 Jan 2024 17:31:18 +0000 Subject: [njs] Avoiding pointer wraparound for padded integer specifier. Message-ID: details: https://hg.nginx.org/njs/rev/e2c6451435a0 branches: changeset: 2259:e2c6451435a0 user: Dmitry Volyntsev date: Mon Jan 08 22:19:59 2024 -0800 description: Avoiding pointer wraparound for padded integer specifier. Previously, when integer was larger than the padded width in a integer specifier, the "end" pointer was evaluated to a value before "buf" pointer. Found by UndefinedBehaviorSanitizer. diffstat: src/njs_sprintf.c | 9 ++++----- 1 files changed, 4 insertions(+), 5 deletions(-) diffs (28 lines): diff -r 0490f1ae4cf5 -r e2c6451435a0 src/njs_sprintf.c --- a/src/njs_sprintf.c Sun Jul 30 10:21:51 2023 +0100 +++ b/src/njs_sprintf.c Mon Jan 08 22:19:59 2024 -0800 @@ -522,12 +522,12 @@ njs_integer(njs_sprintf_t *spf, u_char * } while (ui64 != 0); } + length = (temp + NJS_INT64_T_LEN) - p; + /* Zero or space padding. */ - if (spf->width != 0) { - - length = (temp + NJS_INT64_T_LEN) - p; - end = buf + (spf->width - length); + if (length < spf->width) { + end = buf + spf->width - length; end = njs_min(end, spf->end); while (buf < end) { @@ -537,7 +537,6 @@ njs_integer(njs_sprintf_t *spf, u_char * /* Number copying. */ - length = (temp + NJS_INT64_T_LEN) - p; end = buf + length; end = njs_min(end, spf->end); From xeioex at nginx.com Tue Jan 9 17:31:20 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 09 Jan 2024 17:31:20 +0000 Subject: [njs] Avoiding casting NaN value to int64_t in njs.dump(). Message-ID: details: https://hg.nginx.org/njs/rev/c15a6129ade7 branches: changeset: 2260:c15a6129ade7 user: Dmitry Volyntsev date: Mon Jan 08 22:20:10 2024 -0800 description: Avoiding casting NaN value to int64_t in njs.dump(). Found by UndefinedBehaviorSanitizer. diffstat: src/njs_json.c | 10 ++++++---- 1 files changed, 6 insertions(+), 4 deletions(-) diffs (41 lines): diff -r e2c6451435a0 -r c15a6129ade7 src/njs_json.c --- a/src/njs_json.c Mon Jan 08 22:19:59 2024 -0800 +++ b/src/njs_json.c Mon Jan 08 22:20:10 2024 -0800 @@ -1894,7 +1894,7 @@ njs_dump_visited(njs_vm_t *vm, njs_json_ } -njs_inline njs_bool_t +njs_inline void njs_dump_empty(njs_json_stringify_t *stringify, njs_json_state_t *state, njs_chb_t *chain, njs_bool_t sep_position) { @@ -1902,7 +1902,7 @@ njs_dump_empty(njs_json_stringify_t *str int64_t diff; if (!state->array) { - return 0; + return; } if (sep_position) { @@ -1919,6 +1919,10 @@ njs_dump_empty(njs_json_stringify_t *str } } + if (isnan(prev)) { + return; + } + if (isnan(key)) { key = state->length; } @@ -1947,8 +1951,6 @@ njs_dump_empty(njs_json_stringify_t *str njs_json_stringify_indent(stringify, chain, 1); } } - - return 1; } From xeioex at nginx.com Tue Jan 9 17:31:22 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 09 Jan 2024 17:31:22 +0000 Subject: [njs] Avoiding arithmetic operations with NULL pointer in TextDecoder(). Message-ID: details: https://hg.nginx.org/njs/rev/5d2a3da0674f branches: changeset: 2261:5d2a3da0674f user: Dmitry Volyntsev date: Mon Jan 08 22:20:19 2024 -0800 description: Avoiding arithmetic operations with NULL pointer in TextDecoder(). Found by UndefinedBehaviorSanitizer. diffstat: src/njs_encoding.c | 2 +- src/njs_utf8.c | 30 ++++++++++++++++-------------- 2 files changed, 17 insertions(+), 15 deletions(-) diffs (57 lines): diff -r c15a6129ade7 -r 5d2a3da0674f src/njs_encoding.c --- a/src/njs_encoding.c Mon Jan 08 22:20:10 2024 -0800 +++ b/src/njs_encoding.c Mon Jan 08 22:20:19 2024 -0800 @@ -543,7 +543,7 @@ njs_text_decoder_decode(njs_vm_t *vm, nj /* Looking for BOM. */ - if (!data->ignore_bom) { + if (start != NULL && !data->ignore_bom) { start += njs_utf8_bom(start, end); } diff -r c15a6129ade7 -r 5d2a3da0674f src/njs_utf8.c --- a/src/njs_utf8.c Mon Jan 08 22:20:10 2024 -0800 +++ b/src/njs_utf8.c Mon Jan 08 22:20:19 2024 -0800 @@ -361,25 +361,27 @@ njs_utf8_stream_length(njs_unicode_decod size = 0; length = 0; - end = p + len; + if (p != NULL) { + end = p + len; + + while (p < end) { + codepoint = njs_utf8_decode(ctx, &p, end); - while (p < end) { - codepoint = njs_utf8_decode(ctx, &p, end); + if (codepoint > NJS_UNICODE_MAX_CODEPOINT) { + if (codepoint == NJS_UNICODE_CONTINUE) { + break; + } - if (codepoint > NJS_UNICODE_MAX_CODEPOINT) { - if (codepoint == NJS_UNICODE_CONTINUE) { - break; + if (fatal) { + return -1; + } + + codepoint = NJS_UNICODE_REPLACEMENT; } - if (fatal) { - return -1; - } - - codepoint = NJS_UNICODE_REPLACEMENT; + size += njs_utf8_size(codepoint); + length++; } - - size += njs_utf8_size(codepoint); - length++; } if (last && ctx->need != 0x00) { From xeioex at nginx.com Tue Jan 9 17:31:24 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 09 Jan 2024 17:31:24 +0000 Subject: [njs] Avoiding casting Infinity to integer in String.fromCodePoint(). Message-ID: details: https://hg.nginx.org/njs/rev/f4cb0dc3e8ea branches: changeset: 2262:f4cb0dc3e8ea user: Dmitry Volyntsev date: Mon Jan 08 22:21:14 2024 -0800 description: Avoiding casting Infinity to integer in String.fromCodePoint(). Found by UndefinedBehaviorSanitizer. diffstat: src/njs_string.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 5d2a3da0674f -r f4cb0dc3e8ea src/njs_string.c --- a/src/njs_string.c Mon Jan 08 22:20:19 2024 -0800 +++ b/src/njs_string.c Mon Jan 08 22:21:14 2024 -0800 @@ -1617,7 +1617,7 @@ njs_string_from_char_code(njs_vm_t *vm, if (is_point) { num = njs_number(&args[i]); - if (isnan(num)) { + if (isnan(num) || isinf(num)) { goto range_error; } From xeioex at nginx.com Tue Jan 9 17:31:26 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 09 Jan 2024 17:31:26 +0000 Subject: [njs] Avoiding arithmetic ops with NULL in %TypedArray%.prototype.sort(). Message-ID: details: https://hg.nginx.org/njs/rev/50c587f74a09 branches: changeset: 2263:50c587f74a09 user: Dmitry Volyntsev date: Tue Jan 09 09:14:42 2024 -0800 description: Avoiding arithmetic ops with NULL in %TypedArray%.prototype.sort(). Found by UndefinedBehaviorSanitizer. diffstat: src/njs_typed_array.c | 9 +++++---- 1 files changed, 5 insertions(+), 4 deletions(-) diffs (26 lines): diff -r f4cb0dc3e8ea -r 50c587f74a09 src/njs_typed_array.c --- a/src/njs_typed_array.c Mon Jan 08 22:21:14 2024 -0800 +++ b/src/njs_typed_array.c Tue Jan 09 09:14:42 2024 -0800 @@ -2035,6 +2035,11 @@ njs_typed_array_prototype_sort(njs_vm_t } njs_qsort(base, length, element_size, cmp, &ctx); + + if (njs_slow_path(ctx.exception)) { + return NJS_ERROR; + } + if (ctx.function != NULL) { if (&buffer->u.u8[array->offset * element_size] == orig) { memcpy(orig, base, length * element_size); @@ -2043,10 +2048,6 @@ njs_typed_array_prototype_sort(njs_vm_t njs_mp_free(vm->mem_pool, base); } - if (njs_slow_path(ctx.exception)) { - return NJS_ERROR; - } - njs_set_typed_array(retval, array); return NJS_OK; From xeioex at nginx.com Tue Jan 9 17:40:26 2024 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 9 Jan 2024 09:40:26 -0800 Subject: [PATCH] Satisfy UBSan in njs In-Reply-To: References: Message-ID: <975b64e9-d98e-49f7-9d4c-99400f0c9041@nginx.com> On 1/3/24 4:55 PM, Ben Kallus wrote: > When I run my nginx+njs application with UBSan enabled, I encounter a > few instances of undefined behavior in njs: > > 1. A memcpy from NULL > 2. A couple of offsets applied to NULL > 3. A u32 assigned to nan > 4. A u32 assigned to inf > > This patch adds checks to prevent these undefined operations. With it, > my application no longer has any UBSan alerts. Hi Ben, I did a bunch of patches related to UBSan in njs core, most notably https://hg.nginx.org/njs/rev/0490f1ae4cf5. Now unit tests and test262 pass without warnings. Thank you for prodding. > > # HG changeset patch > # User Ben Kallus > # Date 1704329280 18000 > # Wed Jan 03 19:48:00 2024 -0500 > # Node ID 85d5846984fc2731ad74f91f21c74be67d6974a9 > # Parent 4a15613f4e8bb4a8349ee1cefbae07585da4cbc6 > Prevent undefined operations on NULL, INF, and NAN > > diff -r 4a15613f4e8b -r 85d5846984fc nginx/ngx_http_js_module.c > --- a/nginx/ngx_http_js_module.c Tue Dec 19 12:37:05 2023 -0800 > +++ b/nginx/ngx_http_js_module.c Wed Jan 03 19:48:00 2024 -0500 > @@ -2717,7 +2717,9 @@ > > for ( /* void */ ; cl; cl = cl->next) { > buf = cl->buf; > - p = ngx_cpymem(p, buf->pos, buf->last - buf->pos); > + if (buf->last - buf->pos > 0) { > + p = ngx_cpymem(p, buf->pos, buf->last - buf->pos); > + } > } > > done: > diff -r 4a15613f4e8b -r 85d5846984fc src/njs_extern.c > --- a/src/njs_extern.c Tue Dec 19 12:37:05 2023 -0800 > +++ b/src/njs_extern.c Wed Jan 03 19:48:00 2024 -0500 > @@ -38,7 +38,10 @@ > lhq.proto = &njs_object_hash_proto; > lhq.pool = vm->mem_pool; > > - end = external + n; > + end = external; > + if (n > 0) { > + end += n; > + } > > while (external < end) { > > diff -r 4a15613f4e8b -r 85d5846984fc src/njs_number.h > --- a/src/njs_number.h Tue Dec 19 12:37:05 2023 -0800 > +++ b/src/njs_number.h Wed Jan 03 19:48:00 2024 -0500 > @@ -41,6 +41,10 @@ > { > uint32_t u32; > > + if (isnan(num) || isinf(num)) { > + return 0; > + } > + > u32 = num; > > return (u32 == num && u32 != 0xffffffff); > diff -r 4a15613f4e8b -r 85d5846984fc src/njs_object.c > --- a/src/njs_object.c Tue Dec 19 12:37:05 2023 -0800 > +++ b/src/njs_object.c Wed Jan 03 19:48:00 2024 -0500 > @@ -598,7 +598,10 @@ > start = array->start; > > p = start; > - end = p + array->length; > + end = p; > + if (array->length > 0) { > + end += array->length; > + } > > switch (kind) { > case NJS_ENUM_KEYS: > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From mdounin at mdounin.ru Tue Jan 9 19:24:11 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 9 Jan 2024 22:24:11 +0300 Subject: Core: Avoid memcpy from NULL In-Reply-To: References: <6fb91d26f5e60149d7b98c3ad37a0683@sebres.de> Message-ID: Hello! On Tue, Jan 09, 2024 at 04:18:06PM +0000, Ben Kallus wrote: > > This demonstrates that your patch > > is clearly insufficient. Further, Vladimir's patch is clearly > > insufficient too, as shown for the another patch in the same > > patch series. > > "Insufficient" only when compared to a hypothetical perfectly exhaustive > patch that requires "huge work," as you put it. It's best not to let the > perfect be the enemy of the good. > > Avoiding UB in normal program execution (as opposed to the test suite) will > prevent common workloads from executing UB, which is not merely an issue of > "theoretical correctness." See https://blog.regehr.org/archives/213 > (section "A Fun Case Analysis") for an example of how this "NULL used in > nonnull context" issue leads to unexpected program behavior. > > Thus, I think the best approach is to patch pstrdup to avoid > memcpy-from-NULL, and patch other functions only if someone can present a > backtrace from a real configuration of nginx that executed UB. Thank you for your opinion. As I tried to explain in the review of Vladimir's patches, fixing scattered sanitizer reports individually, assuming no direct impact is present, has an obvious downside: as long as there is a consistent coding pattern which causes such reports, fixing individual reports will hide the pattern from being seen by the sanitizer, but won't eliminate it. As such, it will greatly reduce pressure on fixing the pattern, but if the pattern is indeed practically dangerous and has security consequences, it will be trivial for an attacker to find out cases which are not fixed and exploit them. As such, I prefer to identify patterns and fix them consistently over the code base instead of trying to quench individual reports. Quenching individual reports makes sense if we don't want to fix the pattern, assuming it is completely harmless anyway, but rather want to simplify usage of the sanitizer to identify other issues. This does not look like what you are advocating about though. (Also, again, patching just ngx_pstrdup() is clearly not enough even for this, see Vladimir's patch for a list of other places reported by UBSan in perfectly real configurations.) As already pointed out previously, there are no known cases when memcpy(p, NULL, 0) can result in miscompilation of nginx code, as nginx usually does not checks string data pointers against NULL (and instead checks length, if needed). In particular, ngx_pstrdup() you are trying to patch doesn't. That is, this is exactly the "no direct impact" situation as assumed above. If you think there are cases when the code can be miscompiled in practice, and not theoretically, please share. -- Maxim Dounin http://mdounin.ru/ From xeioex at nginx.com Wed Jan 10 02:27:52 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Wed, 10 Jan 2024 02:27:52 +0000 Subject: [njs] Fixed potential buffer overread in String.prototype.match(). Message-ID: details: https://hg.nginx.org/njs/rev/476f7b3e617d branches: changeset: 2264:476f7b3e617d user: Dmitry Volyntsev date: Tue Jan 09 17:56:19 2024 -0800 description: Fixed potential buffer overread in String.prototype.match(). diffstat: src/njs_string.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 50c587f74a09 -r 476f7b3e617d src/njs_string.c --- a/src/njs_string.c Tue Jan 09 09:14:42 2024 -0800 +++ b/src/njs_string.c Tue Jan 09 17:56:19 2024 -0800 @@ -2797,7 +2797,7 @@ njs_string_prototype_match(njs_vm_t *vm, match: - return njs_regexp_prototype_exec(vm, arguments, nargs, unused, retval); + return njs_regexp_prototype_exec(vm, arguments, 2, unused, retval); } From v.zhestikov at f5.com Thu Jan 11 00:27:43 2024 From: v.zhestikov at f5.com (=?utf-8?q?Vadim_Zhestikov?=) Date: Thu, 11 Jan 2024 00:27:43 +0000 Subject: [njs] Removed unused field from njs_vmcode_t. Message-ID: details: https://hg.nginx.org/njs/rev/a5f279148c9f branches: changeset: 2265:a5f279148c9f user: Vadim Zhestikov date: Wed Jan 10 16:26:35 2024 -0800 description: Removed unused field from njs_vmcode_t. diffstat: src/njs_disassembler.c | 8 +- src/njs_generator.c | 161 ++++++++++++++++++++++++------------------------ src/njs_parser.c | 46 +++++++------- src/njs_parser.h | 2 +- src/njs_vmcode.c | 4 +- src/njs_vmcode.h | 12 +--- 6 files changed, 111 insertions(+), 122 deletions(-) diffs (939 lines): diff -r 476f7b3e617d -r a5f279148c9f src/njs_disassembler.c --- a/src/njs_disassembler.c Tue Jan 09 17:56:19 2024 -0800 +++ b/src/njs_disassembler.c Wed Jan 10 16:26:35 2024 -0800 @@ -9,7 +9,7 @@ typedef struct { - njs_vmcode_operation_t operation; + njs_vmcode_t operation; size_t size; njs_str_t name; } njs_code_name_t; @@ -191,6 +191,7 @@ njs_disassemble(u_char *start, u_char *e njs_str_t *name; njs_uint_t n; const char *type; + njs_vmcode_t operation; njs_code_name_t *code_name; njs_vmcode_jump_t *jump; njs_vmcode_error_t *error; @@ -203,7 +204,6 @@ njs_disassemble(u_char *start, u_char *e njs_vmcode_finally_t *finally; njs_vmcode_try_end_t *try_end; njs_vmcode_try_start_t *try_start; - njs_vmcode_operation_t operation; njs_vmcode_cond_jump_t *cond_jump; njs_vmcode_test_jump_t *test_jump; njs_vmcode_prop_next_t *prop_next; @@ -224,7 +224,7 @@ njs_disassemble(u_char *start, u_char *e p = start; while (((p < end) && (count == -1)) || (count-- > 0)) { - operation = *(njs_vmcode_operation_t *) p; + operation = *(njs_vmcode_t *) p; line = njs_lookup_line(lines, p - start); if (operation == NJS_VMCODE_ARRAY) { @@ -553,7 +553,7 @@ njs_disassemble(u_char *start, u_char *e njs_printf("%5uD | %05uz UNKNOWN %04Xz\n", line, p - start, (size_t) (uintptr_t) operation); - p += sizeof(njs_vmcode_operation_t); + p += sizeof(njs_vmcode_t); next: diff -r 476f7b3e617d -r a5f279148c9f src/njs_generator.c --- a/src/njs_generator.c Tue Jan 09 17:56:19 2024 -0800 +++ b/src/njs_generator.c Wed Jan 10 16:26:35 2024 -0800 @@ -376,7 +376,7 @@ static njs_int_t njs_generate_index_rele njs_generator_t *generator, njs_index_t index); -#define njs_generate_code(generator, type, _code, _op, nargs, nd) \ +#define njs_generate_code(generator, type, _code, _op, nd) \ do { \ _code = (type *) njs_generate_reserve(vm, generator, sizeof(type)); \ if (njs_slow_path(_code == NULL)) { \ @@ -391,15 +391,14 @@ static njs_int_t njs_generate_index_rele \ generator->code_end += sizeof(type); \ \ - _code->code.operation = _op; \ - _code->code.operands = 3 - nargs; \ + _code->code = _op; \ } while (0) #define njs_generate_code_jump(generator, _code, _offset) \ do { \ njs_generate_code(generator, njs_vmcode_jump_t, _code, \ - NJS_VMCODE_JUMP, 0, NULL); \ + NJS_VMCODE_JUMP, NULL); \ _code->offset = _offset; \ } while (0) @@ -407,7 +406,7 @@ static njs_int_t njs_generate_index_rele #define njs_generate_code_move(generator, _code, _dst, _src, node) \ do { \ njs_generate_code(generator, njs_vmcode_move_t, _code, \ - NJS_VMCODE_MOVE, 2, node); \ + NJS_VMCODE_MOVE, node); \ _code->dst = _dst; \ _code->src = _src; \ } while (0) @@ -909,7 +908,7 @@ njs_generate_name(njs_vm_t *vm, njs_gene if (var->function && var->type == NJS_VARIABLE_FUNCTION) { njs_generate_code(generator, njs_vmcode_function_copy_t, copy, - NJS_VMCODE_FUNCTION_COPY, 0, node); + NJS_VMCODE_FUNCTION_COPY, node); copy->function = &var->value; copy->retval = node->index; } @@ -923,7 +922,7 @@ njs_generate_name(njs_vm_t *vm, njs_gene if (scope->dest_disable) { njs_generate_code(generator, njs_vmcode_variable_t, variable, - NJS_VMCODE_NOT_INITIALIZED, 1, node); + NJS_VMCODE_NOT_INITIALIZED, node); variable->dst = node->index; } } @@ -961,7 +960,7 @@ njs_generate_variable(njs_vm_t *vm, njs_ if (var->function && var->type == NJS_VARIABLE_FUNCTION) { njs_generate_code(generator, njs_vmcode_function_copy_t, copy, - NJS_VMCODE_FUNCTION_COPY, 0, node); + NJS_VMCODE_FUNCTION_COPY, node); copy->function = &var->value; copy->retval = node->index; } @@ -975,7 +974,7 @@ njs_generate_variable(njs_vm_t *vm, njs_ if ((!scope->dest_disable && njs_function_scope(var->scope) == scope)) { njs_generate_code(generator, njs_vmcode_variable_t, variable, - NJS_VMCODE_NOT_INITIALIZED, 1, node); + NJS_VMCODE_NOT_INITIALIZED, node); variable->dst = node->index; } } @@ -1129,7 +1128,7 @@ njs_generate_let(njs_vm_t *vm, njs_gener njs_vmcode_variable_t *code; njs_generate_code(generator, njs_vmcode_variable_t, code, - NJS_VMCODE_LET, 0, node); + NJS_VMCODE_LET, node); code->dst = var->index; return NJS_OK; @@ -1167,7 +1166,7 @@ njs_generate_if_statement_cond(njs_vm_t njs_vmcode_cond_jump_t *cond_jump; njs_generate_code(generator, njs_vmcode_cond_jump_t, cond_jump, - NJS_VMCODE_IF_FALSE_JUMP, 2, node); + NJS_VMCODE_IF_FALSE_JUMP, node); cond_jump->cond = node->left->index; ret = njs_generate_node_index_release(vm, generator, node->left); @@ -1278,7 +1277,7 @@ njs_generate_cond_expression_handler(njs njs_vmcode_cond_jump_t *cond_jump; njs_generate_code(generator, njs_vmcode_cond_jump_t, cond_jump, - NJS_VMCODE_IF_FALSE_JUMP, 2, node); + NJS_VMCODE_IF_FALSE_JUMP, node); jump_offset = njs_code_offset(generator, cond_jump); cond_jump->cond = node->left->index; @@ -1466,7 +1465,7 @@ njs_generate_switch_case_after(njs_vm_t node = branch->right; njs_generate_code(generator, njs_vmcode_equal_jump_t, equal, - NJS_VMCODE_IF_EQUAL_JUMP, 3, branch); + NJS_VMCODE_IF_EQUAL_JUMP, branch); equal->offset = offsetof(njs_vmcode_equal_jump_t, offset); equal->value1 = ctx->index; equal->value2 = node->left->index; @@ -1674,7 +1673,7 @@ njs_generate_while_end(njs_vm_t *vm, njs ctx = generator->context; njs_generate_code(generator, njs_vmcode_cond_jump_t, cond_jump, - NJS_VMCODE_IF_TRUE_JUMP, 2, node->right); + NJS_VMCODE_IF_TRUE_JUMP, node->right); cond_jump->offset = ctx->loop_offset - njs_code_offset(generator, cond_jump); cond_jump->cond = node->right->index; @@ -1741,7 +1740,7 @@ njs_generate_do_while_end(njs_vm_t *vm, ctx = generator->context; njs_generate_code(generator, njs_vmcode_cond_jump_t, cond_jump, - NJS_VMCODE_IF_TRUE_JUMP, 2, node->right); + NJS_VMCODE_IF_TRUE_JUMP, node->right); cond_jump->offset = ctx->loop_offset - njs_code_offset(generator, cond_jump); cond_jump->cond = node->right->index; @@ -1918,7 +1917,7 @@ njs_generate_for_end(njs_vm_t *vm, njs_g if (condition != NULL) { njs_generate_code(generator, njs_vmcode_cond_jump_t, cond_jump, - NJS_VMCODE_IF_TRUE_JUMP, 2, condition); + NJS_VMCODE_IF_TRUE_JUMP, condition); cond_jump->offset = ctx->loop_offset - njs_code_offset(generator, cond_jump); cond_jump->cond = condition->index; @@ -1964,7 +1963,7 @@ njs_generate_for_let_update(njs_vm_t *vm if (ref->variable->closure) { njs_generate_code(generator, njs_vmcode_variable_t, code_var, - NJS_VMCODE_LET_UPDATE, 0, let); + NJS_VMCODE_LET_UPDATE, let); code_var->dst = let->left->index; } @@ -2088,7 +2087,7 @@ njs_generate_for_in_body_wo_decl(njs_vm_ ctx->jump_offset); njs_generate_code(generator, njs_vmcode_prop_next_t, prop_next, - NJS_VMCODE_PROPERTY_NEXT, 3, node->left->left); + NJS_VMCODE_PROPERTY_NEXT, node->left->left); prop_offset = njs_code_offset(generator, prop_next); prop_next->retval = ctx->index_next_value; prop_next->object = foreach->right->index; @@ -2134,7 +2133,7 @@ njs_generate_for_in_object_wo_decl(njs_v } njs_generate_code(generator, njs_vmcode_prop_foreach_t, prop_foreach, - NJS_VMCODE_PROPERTY_FOREACH, 2, foreach); + NJS_VMCODE_PROPERTY_FOREACH, foreach); ctx->jump_offset = njs_code_offset(generator, prop_foreach); prop_foreach->object = foreach->right->index; @@ -2257,7 +2256,7 @@ njs_generate_for_in_object_left_hand_exp foreach = node->left; njs_generate_code(generator, njs_vmcode_prop_foreach_t, prop_foreach, - NJS_VMCODE_PROPERTY_FOREACH, 2, foreach); + NJS_VMCODE_PROPERTY_FOREACH, foreach); ctx->jump_offset = njs_code_offset(generator, prop_foreach); prop_foreach->object = foreach->right->index; @@ -2319,7 +2318,7 @@ njs_generate_for_in_set_prop_block(njs_v foreach = node->left; njs_generate_code(generator, njs_vmcode_prop_set_t, prop_set, - NJS_VMCODE_PROPERTY_SET, 3, foreach); + NJS_VMCODE_PROPERTY_SET, foreach); prop_set->object = foreach->left->left->index; prop_set->property = foreach->left->right->index; prop_set->value = ctx->index_next_value; @@ -2348,7 +2347,7 @@ njs_generate_for_in_object(njs_vm_t *vm, } njs_generate_code(generator, njs_vmcode_prop_foreach_t, prop_foreach, - NJS_VMCODE_PROPERTY_FOREACH, 2, foreach); + NJS_VMCODE_PROPERTY_FOREACH, foreach); ctx->jump_offset = njs_code_offset(generator, prop_foreach); prop_foreach->object = foreach->right->index; @@ -2392,7 +2391,7 @@ njs_generate_for_in_body_left_hand_expr( ctx->jump_offset); njs_generate_code(generator, njs_vmcode_prop_next_t, prop_next, - NJS_VMCODE_PROPERTY_NEXT, 3, node->left->left); + NJS_VMCODE_PROPERTY_NEXT, node->left->left); prop_offset = njs_code_offset(generator, prop_next); prop_next->retval = ctx->index_next_value; prop_next->object = foreach->right->index; @@ -2450,7 +2449,7 @@ njs_generate_for_in_body(njs_vm_t *vm, n ctx->jump_offset); njs_generate_code(generator, njs_vmcode_prop_next_t, prop_next, - NJS_VMCODE_PROPERTY_NEXT, 3, node->left->left); + NJS_VMCODE_PROPERTY_NEXT, node->left->left); prop_offset = njs_code_offset(generator, prop_next); prop_next->retval = foreach->left->index; prop_next->object = foreach->right->index; @@ -2837,7 +2836,7 @@ njs_generate_debugger_statement(njs_vm_t njs_vmcode_debugger_t *debugger; njs_generate_code(generator, njs_vmcode_debugger_t, debugger, - NJS_VMCODE_DEBUGGER, 0, node); + NJS_VMCODE_DEBUGGER, node); debugger->retval = njs_generate_dest_index(vm, generator, node); if (njs_slow_path(debugger->retval == NJS_INDEX_ERROR)) { @@ -2870,7 +2869,7 @@ njs_generate_statement(njs_vm_t *vm, njs || var->type == NJS_VARIABLE_CONST)) { njs_generate_code(generator, njs_vmcode_variable_t, code, - NJS_VMCODE_INITIALIZATION_TEST, 0, right); + NJS_VMCODE_INITIALIZATION_TEST, right); code->dst = right->index; } @@ -2977,7 +2976,7 @@ njs_generate_stop_statement_end(njs_vm_t njs_vmcode_stop_t *stop; njs_generate_code(generator, njs_vmcode_stop_t, stop, - NJS_VMCODE_STOP, 1, node); + NJS_VMCODE_STOP, node); index = njs_scope_undefined_index(vm, 0); node = node->right; @@ -3041,7 +3040,7 @@ njs_generate_global_property_set(njs_vm_ var = njs_variable_reference(vm, node_dst); if (var == NULL) { njs_generate_code(generator, njs_vmcode_prop_set_t, prop_set, - NJS_VMCODE_PROPERTY_SET, 3, node_src); + NJS_VMCODE_PROPERTY_SET, node_src); prop_set->value = node_dst->index; prop_set->object = njs_scope_global_this_index(); @@ -3097,7 +3096,7 @@ njs_generate_assignment(njs_vm_t *vm, nj if (var != NULL && var->type == NJS_VARIABLE_CONST) { njs_generate_code(generator, njs_vmcode_variable_t, var_code, - NJS_VMCODE_ASSIGNMENT_ERROR, 0, node); + NJS_VMCODE_ASSIGNMENT_ERROR, node); var_code->dst = var->index; return njs_generator_stack_pop(vm, generator, NULL); @@ -3248,7 +3247,7 @@ njs_generate_assignment_end(njs_vm_t *vm } else { njs_generate_code(generator, njs_vmcode_2addr_t, to_prop_key, - NJS_VMCODE_TO_PROPERTY_KEY, 2, property); + NJS_VMCODE_TO_PROPERTY_KEY, property); prop_index = njs_generate_temp_index_get(vm, generator, property); @@ -3260,7 +3259,7 @@ njs_generate_assignment_end(njs_vm_t *vm to_prop_key->dst = prop_index; njs_generate_code(generator, njs_vmcode_2addr_t, set_function, - NJS_VMCODE_SET_FUNCTION_NAME, 2, expr); + NJS_VMCODE_SET_FUNCTION_NAME, expr); set_function->dst = expr->index; set_function->src = prop_index; @@ -3268,18 +3267,18 @@ njs_generate_assignment_end(njs_vm_t *vm } njs_generate_code(generator, njs_vmcode_prop_set_t, prop_set, - NJS_VMCODE_PROPERTY_INIT, 3, expr); + NJS_VMCODE_PROPERTY_INIT, expr); break; case NJS_TOKEN_PROTO_INIT: njs_generate_code(generator, njs_vmcode_prop_set_t, prop_set, - NJS_VMCODE_PROTO_INIT, 3, expr); + NJS_VMCODE_PROTO_INIT, expr); break; default: /* NJS_VMCODE_PROPERTY_SET */ njs_generate_code(generator, njs_vmcode_prop_set_t, prop_set, - NJS_VMCODE_PROPERTY_SET, 3, expr); + NJS_VMCODE_PROPERTY_SET, expr); } prop_set->value = expr->index; @@ -3328,7 +3327,7 @@ njs_generate_operation_assignment(njs_vm if (var != NULL && var->type == NJS_VARIABLE_CONST) { njs_generate_code(generator, njs_vmcode_variable_t, var_code, - NJS_VMCODE_ASSIGNMENT_ERROR, 0, node); + NJS_VMCODE_ASSIGNMENT_ERROR, node); var_code->dst = var->index; return njs_generator_stack_pop(vm, generator, NULL); @@ -3341,7 +3340,7 @@ njs_generate_operation_assignment(njs_vm /* Preserve variable value if it may be changed by expression. */ njs_generate_code(generator, njs_vmcode_move_t, move, - NJS_VMCODE_MOVE, 2, expr); + NJS_VMCODE_MOVE, expr); move->src = lvalue->index; index = njs_generate_temp_index_get(vm, generator, expr); @@ -3396,7 +3395,7 @@ njs_generate_operation_assignment_name(n index = *((njs_index_t *) generator->context); njs_generate_code(generator, njs_vmcode_3addr_t, code, - node->u.operation, 3, expr); + node->u.operation, expr); code->dst = lvalue->index; code->src1 = index; code->src2 = expr->index; @@ -3472,7 +3471,7 @@ njs_generate_operation_assignment_prop(n } njs_generate_code(generator, njs_vmcode_3addr_t, to_property_key, - NJS_VMCODE_TO_PROPERTY_KEY_CHK, 2, property); + NJS_VMCODE_TO_PROPERTY_KEY_CHK, property); to_property_key->src2 = object->index; to_property_key->src1 = property->index; @@ -3485,7 +3484,7 @@ njs_generate_operation_assignment_prop(n } njs_generate_code(generator, njs_vmcode_prop_get_t, prop_get, - NJS_VMCODE_PROPERTY_GET, 3, property); + NJS_VMCODE_PROPERTY_GET, property); prop_get->value = index; prop_get->object = object->index; prop_get->property = prop_index; @@ -3515,13 +3514,13 @@ njs_generate_operation_assignment_end(nj prop_index = *((njs_index_t *) generator->context); njs_generate_code(generator, njs_vmcode_3addr_t, code, - node->u.operation, 3, expr); + node->u.operation, expr); code->dst = node->index; code->src1 = node->index; code->src2 = expr->index; njs_generate_code(generator, njs_vmcode_prop_set_t, prop_set, - NJS_VMCODE_PROPERTY_SET, 3, expr); + NJS_VMCODE_PROPERTY_SET, expr); prop_set->value = node->index; prop_set->object = lvalue->left->index; prop_set->property = prop_index; @@ -3547,7 +3546,7 @@ njs_generate_object(njs_vm_t *vm, njs_ge } njs_generate_code(generator, njs_vmcode_object_t, object, - NJS_VMCODE_OBJECT, 1, node); + NJS_VMCODE_OBJECT, node); object->retval = node->index; /* Initialize object. */ @@ -3599,7 +3598,7 @@ njs_generate_property_accessor_end(njs_v function = node->right; njs_generate_code(generator, njs_vmcode_prop_accessor_t, accessor, - NJS_VMCODE_PROPERTY_ACCESSOR, 3, function); + NJS_VMCODE_PROPERTY_ACCESSOR, function); accessor->value = function->index; accessor->object = lvalue->left->index; @@ -3623,7 +3622,7 @@ njs_generate_array(njs_vm_t *vm, njs_gen } njs_generate_code(generator, njs_vmcode_array_t, array, - NJS_VMCODE_ARRAY, 1, node); + NJS_VMCODE_ARRAY, node); array->ctor = node->ctor; array->retval = node->index; array->length = node->u.length; @@ -3684,7 +3683,7 @@ njs_generate_function_expression(njs_vm_ } njs_generate_code(generator, njs_vmcode_function_t, function, - NJS_VMCODE_FUNCTION, 1, node); + NJS_VMCODE_FUNCTION, node); function->lambda = lambda; function->async = (node->token_type == NJS_TOKEN_ASYNC_FUNCTION_EXPRESSION); @@ -3716,7 +3715,7 @@ njs_generate_function(njs_vm_t *vm, njs_ } njs_generate_code(generator, njs_vmcode_function_t, function, - NJS_VMCODE_FUNCTION, 1, node); + NJS_VMCODE_FUNCTION, node); function->lambda = lambda; function->async = (node->token_type == NJS_TOKEN_ASYNC_FUNCTION); @@ -3743,7 +3742,7 @@ njs_generate_regexp(njs_vm_t *vm, njs_ge } njs_generate_code(generator, njs_vmcode_regexp_t, regexp, - NJS_VMCODE_REGEXP, 1, node); + NJS_VMCODE_REGEXP, node); regexp->retval = node->index; regexp->pattern = node->u.value.data.u.data; @@ -3770,7 +3769,7 @@ njs_generate_template_literal_end(njs_vm njs_vmcode_template_literal_t *code; njs_generate_code(generator, njs_vmcode_template_literal_t, code, - NJS_VMCODE_TEMPLATE_LITERAL, 1, node); + NJS_VMCODE_TEMPLATE_LITERAL, node); code->retval = node->left->index; node->index = node->left->index; @@ -3800,7 +3799,7 @@ njs_generate_test_jump_expression_after( njs_vmcode_test_jump_t *test_jump; njs_generate_code(generator, njs_vmcode_test_jump_t, test_jump, - node->u.operation, 2, node); + node->u.operation, node); jump_offset = njs_code_offset(generator, test_jump); test_jump->value = node->left->index; @@ -3895,7 +3894,7 @@ njs_generate_3addr_operation_name(njs_vm if (njs_slow_path(njs_parser_has_side_effect(node->right))) { njs_generate_code(generator, njs_vmcode_move_t, move, - NJS_VMCODE_MOVE, 2, node); + NJS_VMCODE_MOVE, node); move->src = left->index; index = njs_generate_node_temp_index_get(vm, generator, left); @@ -3927,7 +3926,7 @@ njs_generate_3addr_operation_end(njs_vm_ right = node->right; njs_generate_code(generator, njs_vmcode_3addr_t, code, - node->u.operation, 3, node); + node->u.operation, node); swap = *((njs_bool_t *) generator->context); @@ -3976,7 +3975,7 @@ njs_generate_2addr_operation_end(njs_vm_ njs_vmcode_2addr_t *code; njs_generate_code(generator, njs_vmcode_2addr_t, code, - node->u.operation, 2, node); + node->u.operation, node); code->src = node->left->index; node->index = njs_generate_dest_index(vm, generator, node); @@ -4025,7 +4024,7 @@ njs_generate_typeof_operation_end(njs_vm njs_vmcode_2addr_t *code; njs_generate_code(generator, njs_vmcode_2addr_t, code, - node->u.operation, 2, node->left); + node->u.operation, node->left); code->src = node->left->index; node->index = njs_generate_dest_index(vm, generator, node); @@ -4064,7 +4063,7 @@ njs_generate_inc_dec_operation(njs_vm_t if (var != NULL && var->type == NJS_VARIABLE_CONST) { njs_generate_code(generator, njs_vmcode_variable_t, var_code, - NJS_VMCODE_ASSIGNMENT_ERROR, 0, node); + NJS_VMCODE_ASSIGNMENT_ERROR, node); var_code->dst = var->index; return njs_generator_stack_pop(vm, generator, NULL); @@ -4078,7 +4077,7 @@ njs_generate_inc_dec_operation(njs_vm_t node->index = index; njs_generate_code(generator, njs_vmcode_3addr_t, code, - node->u.operation, 3, node); + node->u.operation, node); code->dst = index; code->src1 = lvalue->index; code->src2 = lvalue->index; @@ -4152,7 +4151,7 @@ found: } njs_generate_code(generator, njs_vmcode_3addr_t, to_property_key, - NJS_VMCODE_TO_PROPERTY_KEY_CHK, 2, node); + NJS_VMCODE_TO_PROPERTY_KEY_CHK, node); to_property_key->src2 = lvalue->left->index; to_property_key->src1 = lvalue->right->index; @@ -4169,19 +4168,19 @@ found: } njs_generate_code(generator, njs_vmcode_prop_get_t, prop_get, - NJS_VMCODE_PROPERTY_GET, 3, node); + NJS_VMCODE_PROPERTY_GET, node); prop_get->value = index; prop_get->object = lvalue->left->index; prop_get->property = prop_index; njs_generate_code(generator, njs_vmcode_3addr_t, code, - node->u.operation, 3, node); + node->u.operation, node); code->dst = dest_index; code->src1 = index; code->src2 = index; njs_generate_code(generator, njs_vmcode_prop_set_t, prop_set, - NJS_VMCODE_PROPERTY_SET, 3, node); + NJS_VMCODE_PROPERTY_SET, node); prop_set->value = index; prop_set->object = lvalue->left->index; prop_set->property = prop_index; @@ -4442,7 +4441,7 @@ njs_generate_lambda_variables(njs_vm_t * if (var->arguments_object) { njs_generate_code(generator, njs_vmcode_arguments_t, arguments, - NJS_VMCODE_ARGUMENTS, 1, NULL); + NJS_VMCODE_ARGUMENTS, NULL); arguments->dst = var->index; } @@ -4495,7 +4494,7 @@ njs_generate_return_statement_end(njs_vm if (njs_fast_path(immediate == NULL)) { njs_generate_code(generator, njs_vmcode_return_t, code, - NJS_VMCODE_RETURN, 1, node); + NJS_VMCODE_RETURN, node); code->retval = index; node->index = index; @@ -4525,7 +4524,7 @@ njs_generate_return_statement_end(njs_vm } njs_generate_code(generator, njs_vmcode_try_return_t, try_return, - NJS_VMCODE_TRY_RETURN, 2, node); + NJS_VMCODE_TRY_RETURN, node); try_return->retval = index; try_return->save = top->index; try_return->offset = offsetof(njs_vmcode_try_return_t, offset); @@ -4587,7 +4586,7 @@ njs_generate_function_call_arguments(njs } njs_generate_code(generator, njs_vmcode_function_frame_t, func, - NJS_VMCODE_FUNCTION_FRAME, 2, node); + NJS_VMCODE_FUNCTION_FRAME, node); func_offset = njs_code_offset(generator, func); func->ctor = node->ctor; func->name = name->index; @@ -4669,7 +4668,7 @@ njs_generate_method_call_arguments(njs_v prop = node->left; njs_generate_code(generator, njs_vmcode_method_frame_t, method, - NJS_VMCODE_METHOD_FRAME, 3, prop); + NJS_VMCODE_METHOD_FRAME, prop); method_offset = njs_code_offset(generator, method); method->ctor = node->ctor; method->object = prop->left->index; @@ -4727,7 +4726,7 @@ njs_generate_call(njs_vm_t *vm, njs_gene node->index = retval; njs_generate_code(generator, njs_vmcode_function_call_t, call, - NJS_VMCODE_FUNCTION_CALL, 1, node); + NJS_VMCODE_FUNCTION_CALL, node); call->retval = retval; return NJS_OK; @@ -4747,7 +4746,7 @@ njs_generate_move_arguments(njs_vm_t *vm } njs_generate_code(generator, njs_vmcode_1addr_t, put_arg, - NJS_VMCODE_PUT_ARG, 0, node); + NJS_VMCODE_PUT_ARG, node); put_arg->index = node->left->index; func_offset = *((njs_jump_off_t *) generator->context); @@ -4771,7 +4770,7 @@ njs_generate_move_arguments(njs_vm_t *vm #define njs_generate_code_catch(generator, _code, _exception, node) \ do { \ njs_generate_code(generator, njs_vmcode_catch_t, _code, \ - NJS_VMCODE_CATCH, 2, node); \ + NJS_VMCODE_CATCH, node); \ _code->offset = sizeof(njs_vmcode_catch_t); \ _code->exception = _exception; \ } while (0) @@ -4780,7 +4779,7 @@ njs_generate_move_arguments(njs_vm_t *vm #define njs_generate_code_finally(generator, _code, _retval, _exit, node) \ do { \ njs_generate_code(generator, njs_vmcode_finally_t, _code, \ - NJS_VMCODE_FINALLY, 1, node); \ + NJS_VMCODE_FINALLY, node); \ _code->retval = _retval; \ _code->exit_value = _exit; \ _code->continue_offset = offsetof(njs_vmcode_finally_t, \ @@ -4802,7 +4801,7 @@ njs_generate_try_statement(njs_vm_t *vm, njs_memzero(&ctx, sizeof(njs_generator_try_ctx_t)); njs_generate_code(generator, njs_vmcode_try_start_t, try_start, - NJS_VMCODE_TRY_START, 2, node); + NJS_VMCODE_TRY_START, node); ctx.try_offset = njs_code_offset(generator, try_start); exception_index = njs_generate_temp_index_get(vm, generator, node); @@ -4868,7 +4867,7 @@ njs_generate_try_left(njs_vm_t *vm, njs_ exit_index = try_block->index; njs_generate_code(generator, njs_vmcode_try_end_t, try_end, - NJS_VMCODE_TRY_END, 0, NULL); + NJS_VMCODE_TRY_END, NULL); try_end_offset = njs_code_offset(generator, try_end); if (try_block->exit != NULL) { @@ -4881,7 +4880,7 @@ njs_generate_try_left(njs_vm_t *vm, njs_ NJS_GENERATOR_EXIT); njs_generate_code(generator, njs_vmcode_try_trampoline_t, try_break, - NJS_VMCODE_TRY_BREAK, 1, NULL); + NJS_VMCODE_TRY_BREAK, NULL); try_break->exit_value = exit_index; try_break->offset = -sizeof(njs_vmcode_try_end_t); @@ -4897,7 +4896,7 @@ njs_generate_try_left(njs_vm_t *vm, njs_ NJS_GENERATOR_CONTINUATION); njs_generate_code(generator, njs_vmcode_try_trampoline_t, try_continue, - NJS_VMCODE_TRY_CONTINUE, 1, NULL); + NJS_VMCODE_TRY_CONTINUE, NULL); try_continue->exit_value = exit_index; try_continue->offset = -sizeof(njs_vmcode_try_end_t); @@ -5097,7 +5096,7 @@ njs_generate_try_finally(njs_vm_t *vm, n catch_block = ctx->catch_block; njs_generate_code(generator, njs_vmcode_try_end_t, catch_end, - NJS_VMCODE_TRY_END, 0, node->left->right); + NJS_VMCODE_TRY_END, node->left->right); catch_end_offset = njs_code_offset(generator, catch_end); if (catch_block->exit != NULL) { @@ -5107,7 +5106,7 @@ njs_generate_try_finally(njs_vm_t *vm, n NJS_GENERATOR_EXIT); njs_generate_code(generator, njs_vmcode_try_trampoline_t, - try_break, NJS_VMCODE_TRY_BREAK, 1, NULL); + try_break, NJS_VMCODE_TRY_BREAK, NULL); try_break->exit_value = exit_index; @@ -5124,7 +5123,7 @@ njs_generate_try_finally(njs_vm_t *vm, n NJS_GENERATOR_CONTINUATION); njs_generate_code(generator, njs_vmcode_try_trampoline_t, - try_continue, NJS_VMCODE_TRY_CONTINUE, 1, + try_continue, NJS_VMCODE_TRY_CONTINUE, NULL); try_continue->exit_value = exit_index; @@ -5280,7 +5279,7 @@ njs_generate_throw_end(njs_vm_t *vm, njs njs_vmcode_throw_t *throw; njs_generate_code(generator, njs_vmcode_throw_t, throw, - NJS_VMCODE_THROW, 1, node); + NJS_VMCODE_THROW, node); node->index = node->right->index; throw->retval = node->index; @@ -5305,7 +5304,7 @@ njs_generate_import_statement(njs_vm_t * } njs_generate_code(generator, njs_vmcode_import_t, import, - NJS_VMCODE_IMPORT, 1, node); + NJS_VMCODE_IMPORT, node); import->module = node->u.module; import->retval = lvalue->index; @@ -5336,7 +5335,7 @@ njs_generate_export_statement_end(njs_vm obj = node->right; njs_generate_code(generator, njs_vmcode_return_t, code, - NJS_VMCODE_RETURN, 1, NULL); + NJS_VMCODE_RETURN, NULL); code->retval = obj->index; node->index = obj->index; @@ -5370,7 +5369,7 @@ njs_generate_await_end(njs_vm_t *vm, njs } njs_generate_code(generator, njs_vmcode_await_t, code, - NJS_VMCODE_AWAIT, 1, node); + NJS_VMCODE_AWAIT, node); code->retval = index; node->index = index; @@ -5428,7 +5427,7 @@ njs_generate_global_reference(njs_vm_t * njs_generate_code(generator, njs_vmcode_prop_get_t, prop_get, exception ? NJS_VMCODE_GLOBAL_GET: NJS_VMCODE_PROPERTY_GET, - 3, node); + node); prop_get->value = index; @@ -5483,7 +5482,7 @@ njs_generate_reference_error(njs_vm_t *v } njs_generate_code(generator, njs_vmcode_error_t, ref_err, NJS_VMCODE_ERROR, - 0, NULL); + NULL); ref_err->type = NJS_OBJ_TYPE_REF_ERROR; lex_entry = njs_lexer_entry(node->u.reference.unique_id); diff -r 476f7b3e617d -r a5f279148c9f src/njs_parser.c --- a/src/njs_parser.c Tue Jan 09 17:56:19 2024 -0800 +++ b/src/njs_parser.c Wed Jan 10 16:26:35 2024 -0800 @@ -3265,8 +3265,8 @@ njs_parser_left_hand_side_expression_opt static njs_int_t njs_parser_expression_node(njs_parser_t *parser, njs_lexer_token_t *token, - njs_queue_link_t *current, njs_token_type_t type, - njs_vmcode_operation_t operation, njs_parser_state_func_t after) + njs_queue_link_t *current, njs_token_type_t type, njs_vmcode_t operation, + njs_parser_state_func_t after) { njs_parser_node_t *node; @@ -3303,8 +3303,8 @@ static njs_int_t njs_parser_update_expression(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { - njs_parser_node_t *node; - njs_vmcode_operation_t operation; + njs_vmcode_t operation; + njs_parser_node_t *node; switch (token->type) { case NJS_TOKEN_INCREMENT: @@ -3342,9 +3342,9 @@ static njs_int_t njs_parser_update_expression_post(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { - njs_token_type_t type; - njs_parser_node_t *node; - njs_vmcode_operation_t operation; + njs_vmcode_t operation; + njs_token_type_t type; + njs_parser_node_t *node; /* [no LineTerminator here] */ @@ -3416,9 +3416,9 @@ static njs_int_t njs_parser_unary_expression(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { - njs_token_type_t type; - njs_parser_node_t *node; - njs_vmcode_operation_t operation; + njs_vmcode_t operation; + njs_token_type_t type; + njs_parser_node_t *node; switch (token->type) { case NJS_TOKEN_DELETE: @@ -3691,8 +3691,8 @@ static njs_int_t njs_parser_multiplicative_expression_match(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { - njs_parser_node_t *node; - njs_vmcode_operation_t operation; + njs_vmcode_t operation; + njs_parser_node_t *node; if (parser->target != NULL) { parser->target->right = parser->node; @@ -3754,8 +3754,8 @@ static njs_int_t njs_parser_additive_expression_match(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { - njs_parser_node_t *node; - njs_vmcode_operation_t operation; + njs_vmcode_t operation; + njs_parser_node_t *node; if (parser->target != NULL) { parser->target->right = parser->node; @@ -3813,8 +3813,8 @@ static njs_int_t njs_parser_shift_expression_match(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { - njs_parser_node_t *node; - njs_vmcode_operation_t operation; + njs_vmcode_t operation; + njs_parser_node_t *node; if (parser->target != NULL) { parser->target->right = parser->node; @@ -3876,8 +3876,8 @@ static njs_int_t njs_parser_relational_expression_match(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { - njs_parser_node_t *node; - njs_vmcode_operation_t operation; + njs_vmcode_t operation; + njs_parser_node_t *node; if (parser->target != NULL) { parser->target->right = parser->node; @@ -3955,8 +3955,8 @@ static njs_int_t njs_parser_equality_expression_match(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { - njs_parser_node_t *node; - njs_vmcode_operation_t operation; + njs_vmcode_t operation; + njs_parser_node_t *node; if (parser->target != NULL) { parser->target->right = parser->node; @@ -4408,9 +4408,9 @@ static njs_int_t njs_parser_assignment_operator(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { - njs_token_type_t type; - njs_parser_node_t *node; - njs_vmcode_operation_t operation; + njs_vmcode_t operation; + njs_token_type_t type; + njs_parser_node_t *node; switch (token->type) { case NJS_TOKEN_ASSIGNMENT: diff -r 476f7b3e617d -r a5f279148c9f src/njs_parser.h --- a/src/njs_parser.h Tue Jan 09 17:56:19 2024 -0800 +++ b/src/njs_parser.h Wed Jan 10 16:26:35 2024 -0800 @@ -41,7 +41,7 @@ struct njs_parser_node_s { uint32_t length; njs_variable_reference_t reference; njs_value_t value; - njs_vmcode_operation_t operation; + njs_vmcode_t operation; njs_parser_node_t *object; njs_mod_t *module; } u; diff -r 476f7b3e617d -r a5f279148c9f src/njs_vmcode.c --- a/src/njs_vmcode.c Tue Jan 09 17:56:19 2024 -0800 +++ b/src/njs_vmcode.c Wed Jan 10 16:26:35 2024 -0800 @@ -137,7 +137,7 @@ njs_vmcode_interpreter(njs_vm_t *vm, u_c #define BREAK pc += ret; NEXT #define NEXT vmcode = (njs_vmcode_generic_t *) pc; \ - SWITCH (vmcode->code.operation) + SWITCH (vmcode->code) #define NEXT_LBL #define FALLTHROUGH @@ -237,7 +237,7 @@ njs_vmcode_interpreter(njs_vm_t *vm, u_c NEXT_LBL; - SWITCH (vmcode->code.operation) { + SWITCH (vmcode->code) { CASE (NJS_VMCODE_MOVE): njs_vmcode_debug_opcode(); diff -r 476f7b3e617d -r a5f279148c9f src/njs_vmcode.h --- a/src/njs_vmcode.h Tue Jan 09 17:56:19 2024 -0800 +++ b/src/njs_vmcode.h Wed Jan 10 16:26:35 2024 -0800 @@ -23,11 +23,7 @@ typedef intptr_t njs_jump_off_t; -typedef uint8_t njs_vmcode_operation_t; - - -#define NJS_VMCODE_3OPERANDS 0 -#define NJS_VMCODE_2OPERANDS 1 +typedef uint8_t njs_vmcode_t; enum { @@ -119,12 +115,6 @@ enum { typedef struct { - njs_vmcode_operation_t operation; - uint8_t operands; /* 2 bits */ -} njs_vmcode_t; - - -typedef struct { njs_vmcode_t code; njs_index_t operand1; njs_index_t operand2; From zaihan at unrealasia.net Thu Jan 11 05:06:49 2024 From: zaihan at unrealasia.net (Muhammad Nuzaihan) Date: Thu, 11 Jan 2024 13:06:49 +0800 Subject: processing a request without body In-Reply-To: References: Message-ID: Hi Maxim, Sorry for asking too many questions. I did looked at he mirror module and i couldn't find the code which reads the body (from what i understand i need a ngx_chain_t type of value to build the request body from buffer) https://github.com/nginx/nginx/blob/master/src/http/modules/ngx_http_mirror_module.c I'm going with point 1. I know it will break proxying but it's more simpler and i can think of doing 2. later on. Thank you, Zaihan On Thu, Dec 21, 2023 at 8:59 AM Maxim Dounin wrote: > > Hello! > > On Tue, Dec 19, 2023 at 10:11:04PM +0800, Muhammad Nuzaihan wrote: > > > Thanks Maxim, Vasility, > > > > The problem i was going to solve is to i needed to run my specific > > function that takes the data of request URL path, Headers and request > > body and determine and validate that all that data is correct before > > sending upstream, or else i would deny the request with 4xx code > > errors. > > > > Handlers can only handle (from what i know) URL path and headers. > > > > Request body requires a request chain (ngx_chain_t)) to piece out the > > request body and handlers doesn't seem to have t ngx_chain_t unlike > > request body filters. > > > > Or maybe i am wrong in this case? > > It looks like you are trying to do something which simply cannot > be done. For example, consider a configuration with > "proxy_request_buffering off;" - in such a configuration request > body is being read _after_ the request is passed to the upstream > server, and you simply cannot validate request body before passing > request headers to the upstream server. > > As long as you have to examine both request body and request > headers, I think there can be two possible solutions: > > 1. Install a phase handler, in which read the request body > yourself, and check both request headers and request body once > it's read. See the mirror module as an example on how to read the > body in a phase handler and properly resume processing after it. > This will break proxying without request buffering, though might > be good enough for your particular task. > > 2. Install a phase handler to check request headers, and a request > body filter to check the request body. Do checking in both > places, and abort request processing when you see that data aren't > correct. This will work with proxying without request buffering, > but will be generally more complex to implement. And, obviously, > this in case of proxying without request buffering this won't let > you to validate request body before the request headers are sent > to upstream server. > > Hope this helps. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From arut at nginx.com Thu Jan 11 12:39:42 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 11 Jan 2024 16:39:42 +0400 Subject: [PATCH 5 of 6] Stream: the "accept_filter" parameter of the "listen" directive In-Reply-To: <9be627b7a3a35c00be13.1702654668@enoparse.local> References: <9be627b7a3a35c00be13.1702654668@enoparse.local> Message-ID: <20240111123942.67j6otpjbr5mqirv@N00W24XTQX> Hi, On Fri, Dec 15, 2023 at 07:37:48PM +0400, Sergey Kandaurov wrote: > # HG changeset patch > # User Sergey Kandaurov > # Date 1702650593 -14400 > # Fri Dec 15 18:29:53 2023 +0400 > # Node ID 9be627b7a3a35c00be13332f553e2d3b778877ae > # Parent cca722e447f8beaaa6b41a620c8b4239a5d1aa7d > Stream: the "accept_filter" parameter of the "listen" directive. > > The FreeBSD accept filters support. > > diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c > --- a/src/stream/ngx_stream.c > +++ b/src/stream/ngx_stream.c > @@ -1021,6 +1021,10 @@ ngx_stream_add_listening(ngx_conf_t *cf, > ls->keepcnt = addr->opt.tcp_keepcnt; > #endif > > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > + ls->accept_filter = addr->opt.accept_filter; > +#endif > + > #if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > ls->deferred_accept = addr->opt.deferred_accept; > #endif > diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h > --- a/src/stream/ngx_stream.h > +++ b/src/stream/ngx_stream.h > @@ -70,6 +70,10 @@ typedef struct { > int tcp_keepintvl; > int tcp_keepcnt; > #endif > + > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > + char *accept_filter; > +#endif > } ngx_stream_listen_opt_t; > > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -987,6 +987,20 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > continue; > } > > + if (ngx_strncmp(value[i].data, "accept_filter=", 14) == 0) { > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > + lsopt.accept_filter = (char *) &value[i].data[14]; > + lsopt.set = 1; > + lsopt.bind = 1; > +#else > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "accept filters \"%V\" are not supported " > + "on this platform, ignored", > + &value[i]); > +#endif > + continue; > + } > + > if (ngx_strcmp(value[i].data, "deferred") == 0) { > #if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > lsopt.deferred_accept = 1; Again, for UDP this needs to be disabled. Also, we need to check for similar parameters in http/quic. -- Roman Arutyunyan From arut at nginx.com Thu Jan 11 12:45:54 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 11 Jan 2024 16:45:54 +0400 Subject: [PATCH 6 of 6] Stream: the "setfib" parameter of the "listen" directive In-Reply-To: <219662ea1613ab68d4d5.1702654669@enoparse.local> References: <219662ea1613ab68d4d5.1702654669@enoparse.local> Message-ID: <20240111124554.tikzfpoycgkiy62b@N00W24XTQX> Hi, On Fri, Dec 15, 2023 at 07:37:49PM +0400, Sergey Kandaurov wrote: > # HG changeset patch > # User Sergey Kandaurov > # Date 1702651328 -14400 > # Fri Dec 15 18:42:08 2023 +0400 > # Node ID 219662ea1613ab68d4d5d4085394bba75993ae42 > # Parent 9be627b7a3a35c00be13332f553e2d3b778877ae > Stream: the "setfib" parameter of the "listen" directive. > > The FreeBSD SO_SETFIB support. > > diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c > --- a/src/stream/ngx_stream.c > +++ b/src/stream/ngx_stream.c > @@ -1033,6 +1033,10 @@ ngx_stream_add_listening(ngx_conf_t *cf, > ls->ipv6only = addr->opt.ipv6only; > #endif > > +#if (NGX_HAVE_SETFIB) > + ls->setfib = addr->opt.setfib; > +#endif > + > #if (NGX_HAVE_TCP_FASTOPEN) > ls->fastopen = addr->opt.fastopen; > #endif > diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h > --- a/src/stream/ngx_stream.h > +++ b/src/stream/ngx_stream.h > @@ -62,6 +62,9 @@ typedef struct { > int rcvbuf; > int sndbuf; > int type; > +#if (NGX_HAVE_SETFIB) > + int setfib; > +#endif > #if (NGX_HAVE_TCP_FASTOPEN) > int fastopen; > #endif > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -892,6 +892,9 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > lsopt.type = SOCK_STREAM; > lsopt.rcvbuf = -1; > lsopt.sndbuf = -1; > +#if (NGX_HAVE_SETFIB) > + lsopt.setfib = -1; > +#endif > #if (NGX_HAVE_TCP_FASTOPEN) > lsopt.fastopen = -1; > #endif > @@ -921,6 +924,22 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > continue; > } > > +#if (NGX_HAVE_SETFIB) > + if (ngx_strncmp(value[i].data, "setfib=", 7) == 0) { > + lsopt.setfib = ngx_atoi(value[i].data + 7, value[i].len - 7); > + lsopt.set = 1; > + lsopt.bind = 1; > + > + if (lsopt.setfib == NGX_ERROR) { > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "invalid setfib \"%V\"", &value[i]); > + return NGX_CONF_ERROR; > + } > + > + continue; > + } > +#endif > + > #if (NGX_HAVE_TCP_FASTOPEN) > if (ngx_strncmp(value[i].data, "fastopen=", 9) == 0) { > lsopt.fastopen = ngx_atoi(value[i].data + 9, value[i].len - 9); Looks good except possible UDP restriction. -- Roman Arutyunyan From zaihan at unrealasia.net Thu Jan 11 19:26:39 2024 From: zaihan at unrealasia.net (Muhammad Nuzaihan) Date: Fri, 12 Jan 2024 03:26:39 +0800 Subject: processing a request without body In-Reply-To: References: Message-ID: Hi Maxim, After searching the archives, I found the solution which you had answered before: https://www.ruby-forum.com/t/trouble-getting-the-request-body-of-a-http-post/180463/4 The code that reads the body is: rc = ngx_http_read_client_request_body(r, ngx_http_foo_body_handler); if (rc >= NGX_HTTP_SPECIAL_RESPONSE) { return rc; } Can i buy you coffee? Thank you! On Thu, Jan 11, 2024 at 1:06 PM Muhammad Nuzaihan wrote: > > Hi Maxim, > > Sorry for asking too many questions. > > I did looked at he mirror module and i couldn't find the code which > reads the body (from what i understand i need a ngx_chain_t type of > value to build the request body from buffer) > https://github.com/nginx/nginx/blob/master/src/http/modules/ngx_http_mirror_module.c > > I'm going with point 1. I know it will break proxying but it's more > simpler and i can think of doing 2. later on. > > Thank you, > Zaihan > > On Thu, Dec 21, 2023 at 8:59 AM Maxim Dounin wrote: > > > > Hello! > > > > On Tue, Dec 19, 2023 at 10:11:04PM +0800, Muhammad Nuzaihan wrote: > > > > > Thanks Maxim, Vasility, > > > > > > The problem i was going to solve is to i needed to run my specific > > > function that takes the data of request URL path, Headers and request > > > body and determine and validate that all that data is correct before > > > sending upstream, or else i would deny the request with 4xx code > > > errors. > > > > > > Handlers can only handle (from what i know) URL path and headers. > > > > > > Request body requires a request chain (ngx_chain_t)) to piece out the > > > request body and handlers doesn't seem to have t ngx_chain_t unlike > > > request body filters. > > > > > > Or maybe i am wrong in this case? > > > > It looks like you are trying to do something which simply cannot > > be done. For example, consider a configuration with > > "proxy_request_buffering off;" - in such a configuration request > > body is being read _after_ the request is passed to the upstream > > server, and you simply cannot validate request body before passing > > request headers to the upstream server. > > > > As long as you have to examine both request body and request > > headers, I think there can be two possible solutions: > > > > 1. Install a phase handler, in which read the request body > > yourself, and check both request headers and request body once > > it's read. See the mirror module as an example on how to read the > > body in a phase handler and properly resume processing after it. > > This will break proxying without request buffering, though might > > be good enough for your particular task. > > > > 2. Install a phase handler to check request headers, and a request > > body filter to check the request body. Do checking in both > > places, and abort request processing when you see that data aren't > > correct. This will work with proxying without request buffering, > > but will be generally more complex to implement. And, obviously, > > this in case of proxying without request buffering this won't let > > you to validate request body before the request headers are sent > > to upstream server. > > > > Hope this helps. > > > > -- > > Maxim Dounin > > http://mdounin.ru/ > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel From kaloyan.nikolov at 4dcoders.com Thu Jan 11 20:09:11 2024 From: kaloyan.nikolov at 4dcoders.com (kaloyan.nikolov at 4dcoders.com) Date: Thu, 11 Jan 2024 20:09:11 +0000 Subject: MIME: mjs file extension Message-ID: Hello everyone! I am proposing to add the mjs file extension to the mime-types, because the lack of it causes some problems with apps (in this case Nextcloud) . I have reviewed the old discussion about the change from application/javascript to text/javascript and I understand the position of devs, but the lack of mjs do really require workarounds that sometimes aren't that straightforward for most docker users. I tried sending the patch via hg but it sems gone, so adding it here to the bottom. It's my first contribution to Nginx, and I tried my best to follow the guidelines. # HG changeset patch # User Kaloyan Nikolov # Date 1704890349 -7200 # Wed Jan 10 14:39:09 2024 +0200 # Node ID 1d90e179232a3f2e8c6624a3aa955b9629019aa0 # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 MIME: Add mjs file extension to mime-types. Added Node.js mjs file extension inside the JavaScript mime-type section. diff -r ee40e2b1d083 -r 1d90e179232a conf/mime.types --- a/conf/mime.types Mon Dec 25 21:15:48 2023 +0400 +++ b/conf/mime.types Wed Jan 10 14:39:09 2024 +0200 @@ -5,7 +5,7 @@ text/xml xml; image/gif gif; image/jpeg jpeg jpg; - application/javascript js; + application/javascript js mjs; application/atom+xml atom; application/rss+xml rss; -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Thu Jan 11 21:59:36 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 12 Jan 2024 00:59:36 +0300 Subject: processing a request without body In-Reply-To: References: Message-ID: Hello! On Fri, Jan 12, 2024 at 03:26:39AM +0800, Muhammad Nuzaihan wrote: > Hi Maxim, > > After searching the archives, I found the solution which you had > answered before: > https://www.ruby-forum.com/t/trouble-getting-the-request-body-of-a-http-post/180463/4 > > The code that reads the body is: > rc = ngx_http_read_client_request_body(r, > ngx_http_foo_body_handler); > > if (rc >= NGX_HTTP_SPECIAL_RESPONSE) { > return rc; > } > > Can i buy you coffee? You may want to start with basic examples and the dev guide, specifically: https://nginx.org/en/docs/dev/development_guide.html#http_request_body Note though that reading the request body during phase processing, in contrast to doing so in a content handler, requires special precautions. In particular, you'll have to call ngx_http_finalize_request(NGX_DONE) to ensure correct request reference counting: https://hg.nginx.org/nginx/file/tip/src/http/modules/ngx_http_mirror_module.c#l120 Further, to resuming phase processing after reading the request body you'll have to restore r->write_event_handler and call ngx_http_core_run_phases(). See here in the mirror module for an example: https://hg.nginx.org/nginx/file/tip/src/http/modules/ngx_http_mirror_module.c#l144 Hope this helps. -- Maxim Dounin http://mdounin.ru/ From xeioex at nginx.com Thu Jan 11 23:15:35 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Thu, 11 Jan 2024 23:15:35 +0000 Subject: [njs] QueryString: avoiding arithmetic ops with NULL in parse(). Message-ID: details: https://hg.nginx.org/njs/rev/2b221f44efa6 branches: changeset: 2266:2b221f44efa6 user: Dmitry Volyntsev date: Thu Jan 11 15:13:43 2024 -0800 description: QueryString: avoiding arithmetic ops with NULL in parse(). Found by UndefinedBehaviorSanitizer. diffstat: external/njs_query_string_module.c | 5 ++--- 1 files changed, 2 insertions(+), 3 deletions(-) diffs (22 lines): diff -r a5f279148c9f -r 2b221f44efa6 external/njs_query_string_module.c --- a/external/njs_query_string_module.c Wed Jan 10 16:26:35 2024 -0800 +++ b/external/njs_query_string_module.c Thu Jan 11 15:13:43 2024 -0800 @@ -491,7 +491,7 @@ njs_query_string_parser(njs_vm_t *vm, u_ key = query; - do { + while (key < end) { if (count++ == max_keys) { break; } @@ -519,8 +519,7 @@ njs_query_string_parser(njs_vm_t *vm, u_ next: key = part + sep->length; - - } while (key < end); + } return NJS_OK; } From xeioex at nginx.com Thu Jan 11 23:15:37 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Thu, 11 Jan 2024 23:15:37 +0000 Subject: [njs] HTTP: avoiding arithmetic ops with NULL pointer in r.args getter. Message-ID: details: https://hg.nginx.org/njs/rev/4fba78789fe4 branches: changeset: 2267:4fba78789fe4 user: Dmitry Volyntsev date: Thu Jan 11 15:13:47 2024 -0800 description: HTTP: avoiding arithmetic ops with NULL pointer in r.args getter. Found by UndefinedBehaviorSanitizer. diffstat: nginx/ngx_http_js_module.c | 7 ++++--- 1 files changed, 4 insertions(+), 3 deletions(-) diffs (24 lines): diff -r 2b221f44efa6 -r 4fba78789fe4 nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Thu Jan 11 15:13:43 2024 -0800 +++ b/nginx/ngx_http_js_module.c Thu Jan 11 15:13:47 2024 -0800 @@ -2615,7 +2615,8 @@ static njs_int_t ngx_http_js_ext_get_args(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, njs_value_t *setval, njs_value_t *retval) { - njs_int_t ret; + u_char *data; + njs_int_t ret; njs_value_t *args; ngx_http_js_ctx_t *ctx; ngx_http_request_t *r; @@ -2631,8 +2632,8 @@ ngx_http_js_ext_get_args(njs_vm_t *vm, n args = njs_value_arg(&ctx->args); if (njs_value_is_null(args)) { - ret = njs_vm_query_string_parse(vm, r->args.data, - r->args.data + r->args.len, args); + data = (r->args.len != 0) ? r->args.data : (u_char *) ""; + ret = njs_vm_query_string_parse(vm, data, data + r->args.len, args); if (ret == NJS_ERROR) { return NJS_ERROR; From zaihan at unrealasia.net Fri Jan 12 14:17:42 2024 From: zaihan at unrealasia.net (Muhammad Nuzaihan) Date: Fri, 12 Jan 2024 22:17:42 +0800 Subject: processing a request without body In-Reply-To: References: Message-ID: Hi Maxim, Thank you so much for your explaination. I have another question. If i have an empty string in my payload, it skips the phase handler completely. Example: curl -X POST http://localhost/proxy/profile/alice/comment -d '' -H 'Content-Type: application/json' the flag "-d ''" I do doing it at NGX_HTTP_ACCESS_PHASE in the handler. It seems that if "content_length = 0", it skips the access phase handler as well? Does this have to do with what we discussed? On Fri, Jan 12, 2024 at 5:59 AM Maxim Dounin wrote: > > Hello! > > On Fri, Jan 12, 2024 at 03:26:39AM +0800, Muhammad Nuzaihan wrote: > > > Hi Maxim, > > > > After searching the archives, I found the solution which you had > > answered before: > > https://www.ruby-forum.com/t/trouble-getting-the-request-body-of-a-http-post/180463/4 > > > > The code that reads the body is: > > rc = ngx_http_read_client_request_body(r, > > ngx_http_foo_body_handler); > > > > if (rc >= NGX_HTTP_SPECIAL_RESPONSE) { > > return rc; > > } > > > > Can i buy you coffee? > > You may want to start with basic examples and the dev guide, > specifically: > > https://nginx.org/en/docs/dev/development_guide.html#http_request_body > > Note though that reading the request body during phase processing, > in contrast to doing so in a content handler, requires special > precautions. In particular, you'll have to call > ngx_http_finalize_request(NGX_DONE) to ensure correct request > reference counting: > > https://hg.nginx.org/nginx/file/tip/src/http/modules/ngx_http_mirror_module.c#l120 > > Further, to resuming phase processing after reading the request > body you'll have to restore r->write_event_handler and call > ngx_http_core_run_phases(). See here in the mirror module for an > example: > > https://hg.nginx.org/nginx/file/tip/src/http/modules/ngx_http_mirror_module.c#l144 > > Hope this helps. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From jakub.sysop at gmail.com Fri Jan 12 17:04:22 2024 From: jakub.sysop at gmail.com (Jakub Zelenka) Date: Fri, 12 Jan 2024 17:04:22 +0000 Subject: [PATCH] fastcgi_params: added REMOTE_HOST parameter Message-ID: Hi, I'm a PHP-FPM maintainer and some FPM users have issues with missing host header when using HTTP/3: https://github.com/php/php-src/issues/13021 . This is not an nginx issue as correctly noted in https://trac.nginx.org/nginx/ticket/2281 but it would be nice to have fastcgi_param set for getting host in default config. I was thinking how to best expose $host and REMOTE_HOST seems logical and so I think it could be useful addition. I can update FPM to also set REMOTE_HOST from HTTP_HOST if REMOTE_HOST is not set which would make it even more available for HTTP/1.1 and HTTP/2 users. Please let me know what you think! # HG changeset patch # User Jakub Zelenka # Date 1705078404 0 # Fri Jan 12 16:53:24 2024 +0000 # Node ID 1ff2f737bd318a730d0944a6037c8fd7c7da2656 # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 Added REMOTE_HOST parameter to fastcgi_params. When HTTP/3 is used, users will no longer get HTTP_HOST as host header is no longer set by most clients. It is useful / necessary for many setups to have such information and REMOTE_HOST is defined in CGI/1.1 for such purpose. diff -r ee40e2b1d083 -r 1ff2f737bd31 conf/fastcgi_params --- a/conf/fastcgi_params Mon Dec 25 21:15:48 2023 +0400 +++ b/conf/fastcgi_params Fri Jan 12 16:53:24 2024 +0000 @@ -17,6 +17,7 @@ fastcgi_param REMOTE_ADDR $remote_addr; fastcgi_param REMOTE_PORT $remote_port; +fastcgi_param REMOTE_HOST $host; fastcgi_param SERVER_ADDR $server_addr; fastcgi_param SERVER_PORT $server_port; fastcgi_param SERVER_NAME $server_name; -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Fri Jan 12 22:19:59 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 13 Jan 2024 01:19:59 +0300 Subject: [PATCH] fastcgi_params: added REMOTE_HOST parameter In-Reply-To: References: Message-ID: Hello! On Fri, Jan 12, 2024 at 05:04:22PM +0000, Jakub Zelenka wrote: > Hi, > > I'm a PHP-FPM maintainer and some FPM users have issues with missing host > header when using HTTP/3: https://github.com/php/php-src/issues/13021 . > This is not an nginx issue as correctly noted in > https://trac.nginx.org/nginx/ticket/2281 but it would be nice to have > fastcgi_param set for getting host in default config. I was thinking how to > best expose $host and REMOTE_HOST seems logical and so I think it could be > useful addition. I can update FPM to also set REMOTE_HOST from HTTP_HOST if > REMOTE_HOST is not set which would make it even more available for HTTP/1.1 > and HTTP/2 users. > > Please let me know what you think! > > # HG changeset patch > # User Jakub Zelenka > # Date 1705078404 0 > # Fri Jan 12 16:53:24 2024 +0000 > # Node ID 1ff2f737bd318a730d0944a6037c8fd7c7da2656 > # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 > Added REMOTE_HOST parameter to fastcgi_params. > > When HTTP/3 is used, users will no longer get HTTP_HOST as host header is no > longer set by most clients. It is useful / necessary for many setups to have > such information and REMOTE_HOST is defined in CGI/1.1 for such purpose. https://datatracker.ietf.org/doc/html/rfc3875#section-4.1.9 The REMOTE_HOST variable contains the fully qualified domain name of the client sending the request to the server, if available, otherwise NULL. That is, REMOTE_HOST is completely unrelated. It is not the hostname of the requested server, but the hostname of the client - result of a reverse DNS lookup of a client's IP address, something used to be provided by some servers when Internet was small (e.g, HostnameLookups in Apache). It is certainly not the right param to use for $host. IMO, proper param to use would be SERVER_NAME. It is set to $server_name by default, though can be modified locally to provide $host if needed in the particular configuration. > > diff -r ee40e2b1d083 -r 1ff2f737bd31 conf/fastcgi_params > --- a/conf/fastcgi_params Mon Dec 25 21:15:48 2023 +0400 > +++ b/conf/fastcgi_params Fri Jan 12 16:53:24 2024 +0000 > @@ -17,6 +17,7 @@ > > fastcgi_param REMOTE_ADDR $remote_addr; > fastcgi_param REMOTE_PORT $remote_port; > +fastcgi_param REMOTE_HOST $host; > fastcgi_param SERVER_ADDR $server_addr; > fastcgi_param SERVER_PORT $server_port; > fastcgi_param SERVER_NAME $server_name; -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Fri Jan 12 22:23:04 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 13 Jan 2024 01:23:04 +0300 Subject: processing a request without body In-Reply-To: References: Message-ID: Hello! On Fri, Jan 12, 2024 at 10:17:42PM +0800, Muhammad Nuzaihan wrote: > Hi Maxim, > > Thank you so much for your explaination. > > I have another question. If i have an empty string in my payload, it > skips the phase handler completely. > > Example: curl -X POST http://localhost/proxy/profile/alice/comment -d > '' -H 'Content-Type: application/json' > > the flag "-d ''" > > I do doing it at NGX_HTTP_ACCESS_PHASE in the handler. It seems that > if "content_length = 0", it skips the access phase handler as well? Access phase handlers are called for all requests (unless these are rejected at earlier stages). If in doubt, consider configuring debug logging and add appropriate debug logging to your module, it should make things obvious enough. -- Maxim Dounin http://mdounin.ru/ From jakub.sysop at gmail.com Fri Jan 12 23:03:45 2024 From: jakub.sysop at gmail.com (Jakub Zelenka) Date: Fri, 12 Jan 2024 23:03:45 +0000 Subject: [PATCH] fastcgi_params: added REMOTE_HOST parameter In-Reply-To: References: Message-ID: Hi, On Fri, Jan 12, 2024 at 10:20 PM Maxim Dounin wrote: > > # HG changeset patch > > # User Jakub Zelenka > > # Date 1705078404 0 > > # Fri Jan 12 16:53:24 2024 +0000 > > # Node ID 1ff2f737bd318a730d0944a6037c8fd7c7da2656 > > # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 > > Added REMOTE_HOST parameter to fastcgi_params. > > > > When HTTP/3 is used, users will no longer get HTTP_HOST as host header > is no > > longer set by most clients. It is useful / necessary for many setups to > have > > such information and REMOTE_HOST is defined in CGI/1.1 for such purpose. > > https://datatracker.ietf.org/doc/html/rfc3875#section-4.1.9 > > The REMOTE_HOST variable contains the fully qualified domain name of > the client sending the request to the server, if available, otherwise > NULL. > > That is, REMOTE_HOST is completely unrelated. It is not the > hostname of the requested server, but the hostname of the client - > result of a reverse DNS lookup of a client's IP address, something > used to be provided by some servers when Internet was small (e.g, > HostnameLookups in Apache). It is certainly not the right param > to use for $host. > > I think you are right. I somehow thought about nginx as a client for some reason (technically it's a client from FPM point of view) but I agree that the meaning is different here. IMO, proper param to use would be SERVER_NAME. It is set to > $server_name by default, though can be modified locally to provide > $host if needed in the particular configuration. I think it's probably the best option. Although current default value is a bit unfortunate as it's just server_name specified by user so most of the time is meaningless. Not sure if it can really comply with linked RFC either as it doesn't have to be hostname. On the other side it's been default for ages so I guess it won't be changed, right? It's all just a bit unfortunate because with HTTP/3, there is no longer any server host info in the default configuration that would be passed to FPM. Cheers Jakub -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Sat Jan 13 03:14:32 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 13 Jan 2024 06:14:32 +0300 Subject: [PATCH] fastcgi_params: added REMOTE_HOST parameter In-Reply-To: References: Message-ID: Hello! On Fri, Jan 12, 2024 at 11:03:45PM +0000, Jakub Zelenka wrote: > On Fri, Jan 12, 2024 at 10:20 PM Maxim Dounin wrote: > > > > # HG changeset patch > > > # User Jakub Zelenka > > > # Date 1705078404 0 > > > # Fri Jan 12 16:53:24 2024 +0000 > > > # Node ID 1ff2f737bd318a730d0944a6037c8fd7c7da2656 > > > # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 > > > Added REMOTE_HOST parameter to fastcgi_params. > > > > > > When HTTP/3 is used, users will no longer get HTTP_HOST as host header > > is no > > > longer set by most clients. It is useful / necessary for many setups to > > have > > > such information and REMOTE_HOST is defined in CGI/1.1 for such purpose. > > > > https://datatracker.ietf.org/doc/html/rfc3875#section-4.1.9 > > > > The REMOTE_HOST variable contains the fully qualified domain name of > > the client sending the request to the server, if available, otherwise > > NULL. > > > > That is, REMOTE_HOST is completely unrelated. It is not the > > hostname of the requested server, but the hostname of the client - > > result of a reverse DNS lookup of a client's IP address, something > > used to be provided by some servers when Internet was small (e.g, > > HostnameLookups in Apache). It is certainly not the right param > > to use for $host. > > > > > I think you are right. I somehow thought about nginx as a client for some > reason (technically it's a client from FPM point of view) but I agree that > the meaning is different here. > > > IMO, proper param to use would be SERVER_NAME. It is set to > > $server_name by default, though can be modified locally to provide > > $host if needed in the particular configuration. > > I think it's probably the best option. Although current default value is a > bit unfortunate as it's just server_name specified by user so most of the > time is meaningless. Not sure if it can really comply with linked RFC > either as it doesn't have to be hostname. On the other side it's been > default for ages so I guess it won't be changed, right? Well, $server_name is actually expected to be a proper/canonical name of the server. In particular, it is used by nginx itself when returning redirects with "server_name_in_redirect on;" (used to be the default, but not anymore, since the default server_name was changed to "" in nginx 0.8.48). I believe it is up to the particular server configuration if SERVER_NAME should be set to $server_name, that is, the canonical name of the server, or should use $host, as might be preferred in more dynamic configurations where multiple names are handled in a single server{} block or when server_name is not set at all. Changing the default as provided in various fastcgi_param files as shipped with nginx might be indeed troublesome though. For example, if a configuration expects SERVER_NAME to be a canonical name, changing SERVER_NAME to client-controlled $host might result in security issues. > It's all just a bit unfortunate because with HTTP/3, there is no longer any > server host info in the default configuration that would be passed to FPM. The "Host" header is not required to be present in HTTP/1.x either, and even in HTTP/1.1 where it is required to be present, it might not match the authority actually requested via the request line. That is, technically using the "Host" header for anything, except might be links sent to the client itself, is almost always wrong. OTOH, for HTTP/2 nginx emulates the "Host" header based on the ":authority" pseudo-header. Given the amount of pain the approach taken for HTTP/3 causes, we might consider doing the same for HTTP/3 as well. -- Maxim Dounin http://mdounin.ru/ From zaihan at unrealasia.net Sat Jan 13 07:11:11 2024 From: zaihan at unrealasia.net (Muhammad Nuzaihan) Date: Sat, 13 Jan 2024 15:11:11 +0800 Subject: processing a request without body In-Reply-To: References: Message-ID: Hi Maxim, I did enable debug logs before i posted the question. With json payload, my code is executed right after doing malloc and "http request body content length filter". Without a json payload, it doesn't execute my request validation.That's why i thought it might be due to content length is 0. Here is the debug log when i curl with an empty payload: 2024/01/13 15:01:19 [debug] 2452969#0: *11 rewrite phase: 0 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "/" 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "proxy/health" 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "proxy/unhealthy" 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "proxy/profile/alice/comment" 2024/01/13 15:01:19 [debug] 2452969#0: *11 using configuration "/proxy/profile/alice/comment" 2024/01/13 15:01:19 [debug] 2452969#0: *11 http cl:0 max:1048576 2024/01/13 15:01:19 [debug] 2452969#0: *11 rewrite phase: 2 2024/01/13 15:01:19 [debug] 2452969#0: *11 post rewrite phase: 3 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 4 2024/01/13 15:01:19 [debug] 2452969#0: *11 http request body content length filter 2024/01/13 15:01:19 [debug] 2452969#0: *11 http read client request body 2024/01/13 15:01:19 [debug] 2452969#0: *11 recv: eof:0, avail:0 2024/01/13 15:01:19 [debug] 2452969#0: *11 http client request body recv -2 2024/01/13 15:01:19 [debug] 2452969#0: *11 http client request body rest 1 2024/01/13 15:01:19 [debug] 2452969#0: *11 event timer add: 3: 60000:183385517 2024/01/13 15:01:19 [debug] 2452969#0: *11 http finalize request: -4, "/proxy/profile/alice/comment?" a:1, c:2 2024/01/13 15:01:19 [debug] 2452969#0: *11 http request count:2 blk:0 2024/01/13 15:01:19 [debug] 2452969#0: *11 access phase: 7 2024/01/13 15:01:19 [debug] 2452969#0: *11 access phase: 8 2024/01/13 15:01:19 [debug] 2452969#0: *11 post access phase: 9 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 10 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 11 2024/01/13 15:01:19 [debug] 2452969#0: *11 http init upstream, client timer: 1 2024/01/13 15:01:19 [debug] 2452969#0: *11 event timer del: 3: 183385517 2024/01/13 15:01:19 [debug] 2452969#0: *11 epoll add event: fd:3 op:3 ev:80002005 Here is the expected debug logs when i curl with a json payload: `curl -X POST http://localhost/proxy/profile/alice/comment -d '{"comment":"test"}' -H 'Content-Type: application/json'` 2024/01/13 15:05:59 [debug] 2452969#0: *17 rewrite phase: 0 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "/" 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "proxy/health" 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "proxy/unhealthy" 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "proxy/profile/alice/comment" 2024/01/13 15:05:59 [debug] 2452969#0: *17 using configuration "/proxy/profile/alice/comment" 2024/01/13 15:05:59 [debug] 2452969#0: *17 http cl:18 max:1048576 2024/01/13 15:05:59 [debug] 2452969#0: *17 rewrite phase: 2 2024/01/13 15:05:59 [debug] 2452969#0: *17 post rewrite phase: 3 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 4 2024/01/13 15:05:59 [debug] 2452969#0: *17 http client request body preread 18 2024/01/13 15:05:59 [debug] 2452969#0: *17 http request body content length filter 2024/01/13 15:05:59 [debug] 2452969#0: *17 malloc: 000055ADC47961F0:16634 2024/01/13 15:05:59 Hello from Go's ValidateRequestBody! 2024/01/13 15:05:59 RequestValidation is running... 2024/01/13 15:05:59 Request body length: 18 2024/01/13 15:05:59 Request body in Go: {"comment":"test"} 2024/01/13 15:05:59 [debug] 2452969#0: *17 http body new buf t:0 f:0 0000000000000000, pos 000055ADC478F300, size: 0 file: 0, size: 0 2024/01/13 15:05:59 [debug] 2452969#0: *17 malloc: 000055ADC479A300:16634 2024/01/13 15:05:59 Hello from Go's ValidateRequestBody! 2024/01/13 15:05:59 RequestValidation is running... 2024/01/13 15:05:59 Request body length: 18 2024/01/13 15:05:59 Request body in Go: {"comment":"test"} 2024/01/13 15:05:59 [debug] 2452969#0: *17 http body new buf t:0 f:0 0000000000000000, pos 000055ADC478F780, size: 0 file: 0, size: 0 2024/01/13 15:05:59 [alert] 2452969#0: *17 duplicate last buf in save filter, client: 127.0.0.1, server: localhost, request: "POST /proxy/profile/alice/comment HTTP/1.1", host: "localhost" 2024/01/13 15:05:59 [debug] 2452969#0: *17 free: 000055ADC479A300 2024/01/13 15:05:59 [debug] 2452969#0: *17 http finalize request: -4, "/proxy/profile/alice/comment?" a:1, c:2 2024/01/13 15:05:59 [debug] 2452969#0: *17 http request count:2 blk:0 2024/01/13 15:05:59 [debug] 2452969#0: *17 access phase: 7 2024/01/13 15:05:59 [debug] 2452969#0: *17 access phase: 8 2024/01/13 15:05:59 [debug] 2452969#0: *17 post access phase: 9 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 10 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 11 2024/01/13 15:05:59 [debug] 2452969#0: *17 http init upstream, client timer: 0 2024/01/13 15:05:59 [debug] 2452969#0: *17 epoll add event: fd:3 op:3 ev:80002005 Thank you Maxim for answering my questions! On Sat, Jan 13, 2024 at 6:23 AM Maxim Dounin wrote: > > Hello! > > On Fri, Jan 12, 2024 at 10:17:42PM +0800, Muhammad Nuzaihan wrote: > > > Hi Maxim, > > > > Thank you so much for your explaination. > > > > I have another question. If i have an empty string in my payload, it > > skips the phase handler completely. > > > > Example: curl -X POST http://localhost/proxy/profile/alice/comment -d > > '' -H 'Content-Type: application/json' > > > > the flag "-d ''" > > > > I do doing it at NGX_HTTP_ACCESS_PHASE in the handler. It seems that > > if "content_length = 0", it skips the access phase handler as well? > > Access phase handlers are called for all requests (unless these > are rejected at earlier stages). If in doubt, consider configuring > debug logging and add appropriate debug logging to your module, it > should make things obvious enough. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From zaihan at unrealasia.net Sat Jan 13 07:14:43 2024 From: zaihan at unrealasia.net (Muhammad Nuzaihan) Date: Sat, 13 Jan 2024 15:14:43 +0800 Subject: processing a request without body In-Reply-To: References: Message-ID: Hi Maxim, (added some info about URL) I did enable debug logs before i posted the question. With json payload, my code is executed right after doing malloc and "http request body content length filter". Without a json payload, it doesn't execute my request validation.That's why i thought it might be due to content length is 0. Here is the debug log when i curl with an empty payload: curl -X POST http://localhost/proxy/profile/alice/comment -d '' -H 'Content-Type: application/json' 2024/01/13 15:01:19 [debug] 2452969#0: *11 rewrite phase: 0 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "/" 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "proxy/health" 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "proxy/unhealthy" 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "proxy/profile/alice/comment" 2024/01/13 15:01:19 [debug] 2452969#0: *11 using configuration "/proxy/profile/alice/comment" 2024/01/13 15:01:19 [debug] 2452969#0: *11 http cl:0 max:1048576 2024/01/13 15:01:19 [debug] 2452969#0: *11 rewrite phase: 2 2024/01/13 15:01:19 [debug] 2452969#0: *11 post rewrite phase: 3 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 4 2024/01/13 15:01:19 [debug] 2452969#0: *11 http request body content length filter 2024/01/13 15:01:19 [debug] 2452969#0: *11 http read client request body 2024/01/13 15:01:19 [debug] 2452969#0: *11 recv: eof:0, avail:0 2024/01/13 15:01:19 [debug] 2452969#0: *11 http client request body recv -2 2024/01/13 15:01:19 [debug] 2452969#0: *11 http client request body rest 1 2024/01/13 15:01:19 [debug] 2452969#0: *11 event timer add: 3: 60000:183385517 2024/01/13 15:01:19 [debug] 2452969#0: *11 http finalize request: -4, "/proxy/profile/alice/comment?" a:1, c:2 2024/01/13 15:01:19 [debug] 2452969#0: *11 http request count:2 blk:0 2024/01/13 15:01:19 [debug] 2452969#0: *11 access phase: 7 2024/01/13 15:01:19 [debug] 2452969#0: *11 access phase: 8 2024/01/13 15:01:19 [debug] 2452969#0: *11 post access phase: 9 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 10 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 11 2024/01/13 15:01:19 [debug] 2452969#0: *11 http init upstream, client timer: 1 2024/01/13 15:01:19 [debug] 2452969#0: *11 event timer del: 3: 183385517 2024/01/13 15:01:19 [debug] 2452969#0: *11 epoll add event: fd:3 op:3 ev:80002005 Here is the expected debug logs when i curl with a json payload: `curl -X POST http://localhost/proxy/profile/alice/comment -d '{"comment":"test"}' -H 'Content-Type: application/json'` 2024/01/13 15:05:59 [debug] 2452969#0: *17 rewrite phase: 0 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "/" 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "proxy/health" 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "proxy/unhealthy" 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "proxy/profile/alice/comment" 2024/01/13 15:05:59 [debug] 2452969#0: *17 using configuration "/proxy/profile/alice/comment" 2024/01/13 15:05:59 [debug] 2452969#0: *17 http cl:18 max:1048576 2024/01/13 15:05:59 [debug] 2452969#0: *17 rewrite phase: 2 2024/01/13 15:05:59 [debug] 2452969#0: *17 post rewrite phase: 3 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 4 2024/01/13 15:05:59 [debug] 2452969#0: *17 http client request body preread 18 2024/01/13 15:05:59 [debug] 2452969#0: *17 http request body content length filter 2024/01/13 15:05:59 [debug] 2452969#0: *17 malloc: 000055ADC47961F0:16634 2024/01/13 15:05:59 Hello from Go's ValidateRequestBody! 2024/01/13 15:05:59 RequestValidation is running... 2024/01/13 15:05:59 Request body length: 18 2024/01/13 15:05:59 Request body in Go: {"comment":"test"} 2024/01/13 15:05:59 [debug] 2452969#0: *17 http body new buf t:0 f:0 0000000000000000, pos 000055ADC478F300, size: 0 file: 0, size: 0 2024/01/13 15:05:59 [debug] 2452969#0: *17 malloc: 000055ADC479A300:16634 2024/01/13 15:05:59 Hello from Go's ValidateRequestBody! 2024/01/13 15:05:59 RequestValidation is running... 2024/01/13 15:05:59 Request body length: 18 2024/01/13 15:05:59 Request body in Go: {"comment":"test"} 2024/01/13 15:05:59 [debug] 2452969#0: *17 http body new buf t:0 f:0 0000000000000000, pos 000055ADC478F780, size: 0 file: 0, size: 0 2024/01/13 15:05:59 [alert] 2452969#0: *17 duplicate last buf in save filter, client: 127.0.0.1, server: localhost, request: "POST /proxy/profile/alice/comment HTTP/1.1", host: "localhost" 2024/01/13 15:05:59 [debug] 2452969#0: *17 free: 000055ADC479A300 2024/01/13 15:05:59 [debug] 2452969#0: *17 http finalize request: -4, "/proxy/profile/alice/comment?" a:1, c:2 2024/01/13 15:05:59 [debug] 2452969#0: *17 http request count:2 blk:0 2024/01/13 15:05:59 [debug] 2452969#0: *17 access phase: 7 2024/01/13 15:05:59 [debug] 2452969#0: *17 access phase: 8 2024/01/13 15:05:59 [debug] 2452969#0: *17 post access phase: 9 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 10 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 11 2024/01/13 15:05:59 [debug] 2452969#0: *17 http init upstream, client timer: 0 2024/01/13 15:05:59 [debug] 2452969#0: *17 epoll add event: fd:3 op:3 ev:80002005 On Sat, Jan 13, 2024 at 3:11 PM Muhammad Nuzaihan wrote: > > Hi Maxim, > > I did enable debug logs before i posted the question. > > With json payload, my code is executed right after doing malloc and > "http request body content length filter". > > Without a json payload, it doesn't execute my request > validation.That's why i thought it might be due to content length is > 0. > > Here is the debug log when i curl with an empty payload: > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 rewrite phase: 0 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "/" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "proxy/health" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "proxy/unhealthy" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: > "proxy/profile/alice/comment" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 using configuration > "/proxy/profile/alice/comment" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http cl:0 max:1048576 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 rewrite phase: 2 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 post rewrite phase: 3 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 4 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http request body content > length filter > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http read client request body > 2024/01/13 15:01:19 [debug] 2452969#0: *11 recv: eof:0, avail:0 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http client request body recv -2 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http client request body rest 1 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 event timer add: 3: 60000:183385517 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http finalize request: -4, > "/proxy/profile/alice/comment?" a:1, c:2 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http request count:2 blk:0 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 access phase: 7 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 access phase: 8 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 post access phase: 9 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 10 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 11 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http init upstream, client timer: 1 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 event timer del: 3: 183385517 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 epoll add event: fd:3 op:3 > ev:80002005 > > Here is the expected debug logs when i curl with a json payload: `curl > -X POST http://localhost/proxy/profile/alice/comment -d > '{"comment":"test"}' -H 'Content-Type: application/json'` > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 rewrite phase: 0 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "/" > 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "proxy/health" > 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "proxy/unhealthy" > 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: > "proxy/profile/alice/comment" > 2024/01/13 15:05:59 [debug] 2452969#0: *17 using configuration > "/proxy/profile/alice/comment" > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http cl:18 max:1048576 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 rewrite phase: 2 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 post rewrite phase: 3 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 4 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http client request body preread 18 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http request body content > length filter > 2024/01/13 15:05:59 [debug] 2452969#0: *17 malloc: 000055ADC47961F0:16634 > 2024/01/13 15:05:59 Hello from Go's ValidateRequestBody! > 2024/01/13 15:05:59 RequestValidation is running... > 2024/01/13 15:05:59 Request body length: 18 > 2024/01/13 15:05:59 Request body in Go: {"comment":"test"} > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http body new buf t:0 f:0 > 0000000000000000, pos 000055ADC478F300, size: 0 file: 0, size: 0 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 malloc: 000055ADC479A300:16634 > 2024/01/13 15:05:59 Hello from Go's ValidateRequestBody! > 2024/01/13 15:05:59 RequestValidation is running... > 2024/01/13 15:05:59 Request body length: 18 > 2024/01/13 15:05:59 Request body in Go: {"comment":"test"} > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http body new buf t:0 f:0 > 0000000000000000, pos 000055ADC478F780, size: 0 file: 0, size: 0 > 2024/01/13 15:05:59 [alert] 2452969#0: *17 duplicate last buf in save > filter, client: 127.0.0.1, server: localhost, request: "POST > /proxy/profile/alice/comment HTTP/1.1", host: "localhost" > 2024/01/13 15:05:59 [debug] 2452969#0: *17 free: 000055ADC479A300 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http finalize request: -4, > "/proxy/profile/alice/comment?" a:1, c:2 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http request count:2 blk:0 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 access phase: 7 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 access phase: 8 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 post access phase: 9 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 10 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 11 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http init upstream, client timer: 0 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 epoll add event: fd:3 op:3 > ev:80002005 > > Thank you Maxim for answering my questions! > > On Sat, Jan 13, 2024 at 6:23 AM Maxim Dounin wrote: > > > > Hello! > > > > On Fri, Jan 12, 2024 at 10:17:42PM +0800, Muhammad Nuzaihan wrote: > > > > > Hi Maxim, > > > > > > Thank you so much for your explaination. > > > > > > I have another question. If i have an empty string in my payload, it > > > skips the phase handler completely. > > > > > > Example: curl -X POST http://localhost/proxy/profile/alice/comment -d > > > '' -H 'Content-Type: application/json' > > > > > > the flag "-d ''" > > > > > > I do doing it at NGX_HTTP_ACCESS_PHASE in the handler. It seems that > > > if "content_length = 0", it skips the access phase handler as well? > > > > Access phase handlers are called for all requests (unless these > > are rejected at earlier stages). If in doubt, consider configuring > > debug logging and add appropriate debug logging to your module, it > > should make things obvious enough. > > > > -- > > Maxim Dounin > > http://mdounin.ru/ > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel From zaihan at unrealasia.net Sat Jan 13 07:24:07 2024 From: zaihan at unrealasia.net (Muhammad Nuzaihan) Date: Sat, 13 Jan 2024 15:24:07 +0800 Subject: processing a request without body In-Reply-To: References: Message-ID: Hi Maxim, I've verified that even with "curl -X POST http://localhost/proxy/profile/alice/comment" works. as below debug logs. 2024/01/13 15:19:44 [debug] 2452969#0: *20 rewrite phase: 0 2024/01/13 15:19:44 [debug] 2452969#0: *20 test location: "/" 2024/01/13 15:19:44 [debug] 2452969#0: *20 test location: "proxy/health" 2024/01/13 15:19:44 [debug] 2452969#0: *20 test location: "proxy/unhealthy" 2024/01/13 15:19:44 [debug] 2452969#0: *20 test location: "proxy/profile/alice/comment" 2024/01/13 15:19:44 [debug] 2452969#0: *20 using configuration "/proxy/profile/alice/comment" 2024/01/13 15:19:44 [debug] 2452969#0: *20 http cl:-1 max:1048576 2024/01/13 15:19:44 [debug] 2452969#0: *20 rewrite phase: 2 2024/01/13 15:19:44 [debug] 2452969#0: *20 post rewrite phase: 3 2024/01/13 15:19:44 [debug] 2452969#0: *20 generic phase: 4 2024/01/13 15:19:44 [debug] 2452969#0: *20 malloc: 000055ADC47961F0:16634 2024/01/13 15:19:44 Hello from Go's ValidateRequestBody! 2024/01/13 15:19:44 RequestValidation is running... 2024/01/13 15:19:44 Request body length: 0 2024/01/13 15:19:44 Request body in Go: 2024/01/13 15:19:44 Middleware altered response body, original: , new: {"code":400,"title":"something's wrong with your request body","detail":"the request's body did not match your appspec: request body has an error: value is required but missing"} 2024/01/13 15:19:44 [debug] 2452969#0: *20 http output filter "/proxy/profile/alice/comment?" 2024/01/13 15:19:44 [debug] 2452969#0: *20 http copy filter: "/proxy/profile/alice/comment?" 2024/01/13 15:19:44 [debug] 2452969#0: *20 { "version":"1.0.0-alpha", "dateCreated":1705130384398, "executionTime":123456789, "request":{ "ip":"127.0.0.1", "httpProtocol":"HTTP\/1.1", "uri":"http:\/\/localhost\/proxy\/profile\/alice\/comment", "resource":"\/proxy\/profile\/alice\/comment", "method":"POST", "body":"", "headers":{ "Host":[ "localhost" ], "User-Agent":[ "curl\/7.81.0" ], "Accept":[ "*\/*" ] } }, "response":{ "statusCode":400, "body":"{\"code\":400,\"title\":\"something's wrong with your request body\",\"detail\":\"the request's body did not match your appspec: request body has an error: value is required but missing\"}", "headers":{} } } 2024/01/13 15:19:44 [debug] 2452969#0: *20 Buffer is successful 2024/01/13 15:19:44 [debug] 2452969#0: *20 Start of firetail send 2024/01/13 15:19:44 [debug] 2452969#0: *20 Sending next RESPONSE body 2024/01/13 15:19:44 [debug] 2452969#0: *20 Sending next REQUEST body 2024/01/13 15:19:44 [debug] 2452969#0: *20 Buffer is successful 2024/01/13 15:19:44 [debug] 2452969#0: *20 INCOMING Request Body: , json {} 2024/01/13 15:19:44 [debug] 2452969#0: *20 Sending next REQUEST body 2024/01/13 15:19:44 [alert] 2452969#0: *20 duplicate last buf in save filter, client: 127.0.0.1, server: localhost, request: "POST /proxy/profile/alice/comment HTTP/1.1", host: "localhost" 2024/01/13 15:19:44 [notice] 2452668#0: signal 17 (SIGCHLD) received from 2452969 2024/01/13 15:19:44 [alert] 2452668#0: worker process 2452969 exited on signal 11 (core dumped) 2024/01/13 15:19:44 [debug] 2452668#0: shmtx forced unlock 2024/01/13 15:19:44 [debug] 2452668#0: wake up, sigio 0 2024/01/13 15:19:44 [debug] 2452668#0: reap children 2024/01/13 15:19:44 [debug] 2452668#0: child: 0 2452969 e:0 t:1 d:0 r:1 j:0 2024/01/13 15:19:44 [debug] 2452668#0: channel 3:7 2024/01/13 15:19:44 [notice] 2452668#0: start worker process 2516641 2024/01/13 15:19:44 [debug] 2452668#0: sigsuspend 2024/01/13 15:19:44 [debug] 2516641#0: add cleanup: 000055ADC472CEC8 2024/01/13 15:19:44 [debug] 2516641#0: malloc: 000055ADC46D9520:8 2024/01/13 15:19:44 [debug] 2516641#0: notify eventfd: 9 2024/01/13 15:19:44 [debug] 2516641#0: testing the EPOLLRDHUP flag: success 2024/01/13 15:19:44 [debug] 2516641#0: malloc: 000055ADC47073D0:6144 2024/01/13 15:19:44 [debug] 2516641#0: malloc: 00007F4DF059E010:253952 2024/01/13 15:19:44 [debug] 2516641#0: malloc: 000055ADC4730510:98304 2024/01/13 15:19:44 [debug] 2516641#0: malloc: 000055ADC4748520:98304 2024/01/13 15:19:44 [debug] 2516641#0: epoll add event: fd:6 op:1 ev:00002001 only with an empty payload like this: curl -X POST http://localhost/proxy/profile/alice/comment -d '' -H 'Content-Type: application/json' it doesn't execute. On Sat, Jan 13, 2024 at 3:14 PM Muhammad Nuzaihan wrote: > > Hi Maxim, (added some info about URL) > > I did enable debug logs before i posted the question. > > With json payload, my code is executed right after doing malloc and > "http request body content length filter". > > Without a json payload, it doesn't execute my request > validation.That's why i thought it might be due to content length is > 0. > > Here is the debug log when i curl with an empty payload: > > curl -X POST http://localhost/proxy/profile/alice/comment -d '' -H > 'Content-Type: application/json' > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 rewrite phase: 0 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "/" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "proxy/health" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "proxy/unhealthy" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: > "proxy/profile/alice/comment" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 using configuration > "/proxy/profile/alice/comment" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http cl:0 max:1048576 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 rewrite phase: 2 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 post rewrite phase: 3 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 4 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http request body content > length filter > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http read client request body > 2024/01/13 15:01:19 [debug] 2452969#0: *11 recv: eof:0, avail:0 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http client request body recv -2 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http client request body rest 1 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 event timer add: 3: 60000:183385517 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http finalize request: -4, > "/proxy/profile/alice/comment?" a:1, c:2 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http request count:2 blk:0 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 access phase: 7 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 access phase: 8 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 post access phase: 9 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 10 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 11 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http init upstream, client timer: 1 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 event timer del: 3: 183385517 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 epoll add event: fd:3 op:3 > ev:80002005 > > Here is the expected debug logs when i curl with a json payload: `curl > -X POST http://localhost/proxy/profile/alice/comment -d > '{"comment":"test"}' -H 'Content-Type: application/json'` > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 rewrite phase: 0 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "/" > 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "proxy/health" > 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "proxy/unhealthy" > 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: > "proxy/profile/alice/comment" > 2024/01/13 15:05:59 [debug] 2452969#0: *17 using configuration > "/proxy/profile/alice/comment" > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http cl:18 max:1048576 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 rewrite phase: 2 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 post rewrite phase: 3 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 4 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http client request body preread 18 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http request body content > length filter > 2024/01/13 15:05:59 [debug] 2452969#0: *17 malloc: 000055ADC47961F0:16634 > 2024/01/13 15:05:59 Hello from Go's ValidateRequestBody! > 2024/01/13 15:05:59 RequestValidation is running... > 2024/01/13 15:05:59 Request body length: 18 > 2024/01/13 15:05:59 Request body in Go: {"comment":"test"} > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http body new buf t:0 f:0 > 0000000000000000, pos 000055ADC478F300, size: 0 file: 0, size: 0 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 malloc: 000055ADC479A300:16634 > 2024/01/13 15:05:59 Hello from Go's ValidateRequestBody! > 2024/01/13 15:05:59 RequestValidation is running... > 2024/01/13 15:05:59 Request body length: 18 > 2024/01/13 15:05:59 Request body in Go: {"comment":"test"} > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http body new buf t:0 f:0 > 0000000000000000, pos 000055ADC478F780, size: 0 file: 0, size: 0 > 2024/01/13 15:05:59 [alert] 2452969#0: *17 duplicate last buf in save > filter, client: 127.0.0.1, server: localhost, request: "POST > /proxy/profile/alice/comment HTTP/1.1", host: "localhost" > 2024/01/13 15:05:59 [debug] 2452969#0: *17 free: 000055ADC479A300 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http finalize request: -4, > "/proxy/profile/alice/comment?" a:1, c:2 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http request count:2 blk:0 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 access phase: 7 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 access phase: 8 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 post access phase: 9 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 10 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 11 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http init upstream, client timer: 0 > 2024/01/13 15:05:59 [debug] 2452969#0: *17 epoll add event: fd:3 op:3 > ev:80002005 > > On Sat, Jan 13, 2024 at 3:11 PM Muhammad Nuzaihan wrote: > > > > Hi Maxim, > > > > I did enable debug logs before i posted the question. > > > > With json payload, my code is executed right after doing malloc and > > "http request body content length filter". > > > > Without a json payload, it doesn't execute my request > > validation.That's why i thought it might be due to content length is > > 0. > > > > Here is the debug log when i curl with an empty payload: > > > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 rewrite phase: 0 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "/" > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "proxy/health" > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "proxy/unhealthy" > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: > > "proxy/profile/alice/comment" > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 using configuration > > "/proxy/profile/alice/comment" > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http cl:0 max:1048576 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 rewrite phase: 2 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 post rewrite phase: 3 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 4 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http request body content > > length filter > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http read client request body > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 recv: eof:0, avail:0 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http client request body recv -2 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http client request body rest 1 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 event timer add: 3: 60000:183385517 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http finalize request: -4, > > "/proxy/profile/alice/comment?" a:1, c:2 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http request count:2 blk:0 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 access phase: 7 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 access phase: 8 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 post access phase: 9 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 10 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 11 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http init upstream, client timer: 1 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 event timer del: 3: 183385517 > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 epoll add event: fd:3 op:3 > > ev:80002005 > > > > Here is the expected debug logs when i curl with a json payload: `curl > > -X POST http://localhost/proxy/profile/alice/comment -d > > '{"comment":"test"}' -H 'Content-Type: application/json'` > > > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 rewrite phase: 0 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "/" > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "proxy/health" > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: "proxy/unhealthy" > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 test location: > > "proxy/profile/alice/comment" > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 using configuration > > "/proxy/profile/alice/comment" > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http cl:18 max:1048576 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 rewrite phase: 2 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 post rewrite phase: 3 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 4 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http client request body preread 18 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http request body content > > length filter > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 malloc: 000055ADC47961F0:16634 > > 2024/01/13 15:05:59 Hello from Go's ValidateRequestBody! > > 2024/01/13 15:05:59 RequestValidation is running... > > 2024/01/13 15:05:59 Request body length: 18 > > 2024/01/13 15:05:59 Request body in Go: {"comment":"test"} > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http body new buf t:0 f:0 > > 0000000000000000, pos 000055ADC478F300, size: 0 file: 0, size: 0 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 malloc: 000055ADC479A300:16634 > > 2024/01/13 15:05:59 Hello from Go's ValidateRequestBody! > > 2024/01/13 15:05:59 RequestValidation is running... > > 2024/01/13 15:05:59 Request body length: 18 > > 2024/01/13 15:05:59 Request body in Go: {"comment":"test"} > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http body new buf t:0 f:0 > > 0000000000000000, pos 000055ADC478F780, size: 0 file: 0, size: 0 > > 2024/01/13 15:05:59 [alert] 2452969#0: *17 duplicate last buf in save > > filter, client: 127.0.0.1, server: localhost, request: "POST > > /proxy/profile/alice/comment HTTP/1.1", host: "localhost" > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 free: 000055ADC479A300 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http finalize request: -4, > > "/proxy/profile/alice/comment?" a:1, c:2 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http request count:2 blk:0 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 access phase: 7 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 access phase: 8 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 post access phase: 9 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 10 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 generic phase: 11 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 http init upstream, client timer: 0 > > 2024/01/13 15:05:59 [debug] 2452969#0: *17 epoll add event: fd:3 op:3 > > ev:80002005 > > > > Thank you Maxim for answering my questions! > > > > On Sat, Jan 13, 2024 at 6:23 AM Maxim Dounin wrote: > > > > > > Hello! > > > > > > On Fri, Jan 12, 2024 at 10:17:42PM +0800, Muhammad Nuzaihan wrote: > > > > > > > Hi Maxim, > > > > > > > > Thank you so much for your explaination. > > > > > > > > I have another question. If i have an empty string in my payload, it > > > > skips the phase handler completely. > > > > > > > > Example: curl -X POST http://localhost/proxy/profile/alice/comment -d > > > > '' -H 'Content-Type: application/json' > > > > > > > > the flag "-d ''" > > > > > > > > I do doing it at NGX_HTTP_ACCESS_PHASE in the handler. It seems that > > > > if "content_length = 0", it skips the access phase handler as well? > > > > > > Access phase handlers are called for all requests (unless these > > > are rejected at earlier stages). If in doubt, consider configuring > > > debug logging and add appropriate debug logging to your module, it > > > should make things obvious enough. > > > > > > -- > > > Maxim Dounin > > > http://mdounin.ru/ > > > _______________________________________________ > > > nginx-devel mailing list > > > nginx-devel at nginx.org > > > https://mailman.nginx.org/mailman/listinfo/nginx-devel From mdounin at mdounin.ru Sat Jan 13 13:38:01 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 13 Jan 2024 16:38:01 +0300 Subject: processing a request without body In-Reply-To: References: Message-ID: Hello! On Sat, Jan 13, 2024 at 03:11:11PM +0800, Muhammad Nuzaihan wrote: > Hi Maxim, > > I did enable debug logs before i posted the question. > > With json payload, my code is executed right after doing malloc and > "http request body content length filter". > > Without a json payload, it doesn't execute my request > validation.That's why i thought it might be due to content length is > 0. > > Here is the debug log when i curl with an empty payload: > > 2024/01/13 15:01:19 [debug] 2452969#0: *11 rewrite phase: 0 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "/" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "proxy/health" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: "proxy/unhealthy" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 test location: > "proxy/profile/alice/comment" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 using configuration > "/proxy/profile/alice/comment" > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http cl:0 max:1048576 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 rewrite phase: 2 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 post rewrite phase: 3 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 generic phase: 4 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http request body content > length filter > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http read client request body Clearly request body reading is called here, at the preaccess phase. This implies that your code is called - nginx itself won't try to read the request body that early. Everything else is up to your code. > 2024/01/13 15:01:19 [debug] 2452969#0: *11 recv: eof:0, avail:0 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http client request body recv -2 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http client request body rest 1 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 event timer add: 3: 60000:183385517 Note that request body reading code blocks waiting for more data. > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http finalize request: -4, > "/proxy/profile/alice/comment?" a:1, c:2 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 http request count:2 blk:0 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 access phase: 7 > 2024/01/13 15:01:19 [debug] 2452969#0: *11 access phase: 8 Note that phase handling continues here: it shouldn't, since request body reading is in progress. This suggested that your code fails to stop phase handling after calling ngx_http_read_client_request_body(): note you should return NGX_DONE to stop processing unless there is an immediate error. [...] -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Jan 16 22:26:41 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 17 Jan 2024 01:26:41 +0300 Subject: This is a question about the "$status" log value when "proxy_read_timeout" occurs. In-Reply-To: References: Message-ID: Hello! On Tue, Jan 16, 2024 at 01:15:09PM +0900, 박규철 wrote: > This is a question about the "$status" log value when "proxy_read_timeout" > occurs. > Nginx version in use: v1.25.3 > > Contents of 1Mbyte size were requested to [Origin Server]. > A response up to approximately 500Kbytes in size, including the header, was > received without delay. > However, after 500Kbytes, no response was received from Origin for 3 > seconds and the connection (time-out) > Since the message "upstream timed out...while reading upstream" was logged > in the error log, I think the connection was lost due to the > "proxy_read_timeout 3s" setting. > > While checking the log, I noticed that the "$status" value in the access > log was different from what I thought. > In my opinion, if the connection was terminated by "proxy_read_timeout", > the "$status" value would be 5xx, but the "$status" value in the saved > access log was 200. > > A normal response was not completed due to "proxy_read_timeout", so I would > like to know why the "$status" value is stored as 200 instead of 5xx. > Should I check a variable other than "$status" for responses to abnormal > timeouts such as "proxy_read_timeout"? The $status variable shows the status as sent to the client in the response headers. When proxy_read_timeout happens, the response headers are already sent, so $status contains 200 as sent to the client. For errors happened during sending the response body, consider looking into the error log. Some generic information about successful request completion might be found in the $request_completion variable (http://nginx.org/r/$request_completion). Note though that it might not be set for variety of reasons. -- Maxim Dounin http://mdounin.ru/ From jakub.sysop at gmail.com Wed Jan 17 13:37:43 2024 From: jakub.sysop at gmail.com (Jakub Zelenka) Date: Wed, 17 Jan 2024 13:37:43 +0000 Subject: [PATCH] fastcgi_params: added REMOTE_HOST parameter In-Reply-To: References: Message-ID: Hi, On Sat, Jan 13, 2024 at 3:14 AM Maxim Dounin wrote: > Hello! > > On Fri, Jan 12, 2024 at 11:03:45PM +0000, Jakub Zelenka wrote: > > > On Fri, Jan 12, 2024 at 10:20 PM Maxim Dounin > wrote: > > > > > > # HG changeset patch > > > > # User Jakub Zelenka > > > > # Date 1705078404 0 > > > > # Fri Jan 12 16:53:24 2024 +0000 > > > > # Node ID 1ff2f737bd318a730d0944a6037c8fd7c7da2656 > > > > # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 > > > > Added REMOTE_HOST parameter to fastcgi_params. > > > > > > > > When HTTP/3 is used, users will no longer get HTTP_HOST as host > header > > > is no > > > > longer set by most clients. It is useful / necessary for many setups > to > > > have > > > > such information and REMOTE_HOST is defined in CGI/1.1 for such > purpose. > > > > > > https://datatracker.ietf.org/doc/html/rfc3875#section-4.1.9 > > > > > > The REMOTE_HOST variable contains the fully qualified domain name of > > > the client sending the request to the server, if available, > otherwise > > > NULL. > > > > > > That is, REMOTE_HOST is completely unrelated. It is not the > > > hostname of the requested server, but the hostname of the client - > > > result of a reverse DNS lookup of a client's IP address, something > > > used to be provided by some servers when Internet was small (e.g, > > > HostnameLookups in Apache). It is certainly not the right param > > > to use for $host. > > > > > > > > I think you are right. I somehow thought about nginx as a client for some > > reason (technically it's a client from FPM point of view) but I agree > that > > the meaning is different here. > > > > > IMO, proper param to use would be SERVER_NAME. It is set to > > > $server_name by default, though can be modified locally to provide > > > $host if needed in the particular configuration. > > > > I think it's probably the best option. Although current default value is > a > > bit unfortunate as it's just server_name specified by user so most of the > > time is meaningless. Not sure if it can really comply with linked RFC > > either as it doesn't have to be hostname. On the other side it's been > > default for ages so I guess it won't be changed, right? > > Well, $server_name is actually expected to be a proper/canonical > name of the server. In particular, it is used by nginx itself > when returning redirects with "server_name_in_redirect on;" (used > to be the default, but not anymore, since the default server_name > was changed to "" in nginx 0.8.48). > > Yeah that's exactly the thing. Lots of configurations just have empty string or "_" to make it more generic. > I believe it is up to the particular server configuration if > SERVER_NAME should be set to $server_name, that is, the canonical > name of the server, or should use $host, as might be preferred in > more dynamic configurations where multiple names are handled in a > single server{} block or when server_name is not set at all. > > The problem is that most users will just need to spend time to figuring that out so it makes switch to HTTP/3 more difficult for them. > Changing the default as provided in various fastcgi_param files as > shipped with nginx might be indeed troublesome though. For > example, if a configuration expects SERVER_NAME to be a canonical > name, changing SERVER_NAME to client-controlled $host might result > in security issues. > Yeah agreed that could do more harm than good. > > > It's all just a bit unfortunate because with HTTP/3, there is no longer > any > > server host info in the default configuration that would be passed to > FPM. > > The "Host" header is not required to be present in HTTP/1.x > either, and even in HTTP/1.1 where it is required to be present, > it might not match the authority actually requested via the > request line. That is, technically using the "Host" header for > anything, except might be links sent to the client itself, is > almost always wrong. > > True but it's still most of the time enough for the applications to do the right thing which works for normal users - e.g. apply the right app configuration. > OTOH, for HTTP/2 nginx emulates the "Host" header based on the > ":authority" pseudo-header. Given the amount of pain the approach > taken for HTTP/3 causes, we might consider doing the same for > HTTP/3 as well. > I think this would be great and probably the best option. It would safe time to many users and maintainers as I'm sure there will be more users asking for it as soon as HTTP/3 becomes more in use. Cheers Jakub -------------- next part -------------- An HTML attachment was scrubbed... URL: From pluknet at nginx.com Thu Jan 18 13:43:08 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 18 Jan 2024 17:43:08 +0400 Subject: [PATCH 1 of 3] Stream: socket peek in preread phase In-Reply-To: <20240104160327.q2cmayipp7ozrxs7@N00W24XTQX> References: <966331bb4936888ef2f0.1699610839@arut-laptop> <20231213140659.nt4kcbem26hkyrsd@N00W24XTQX> <20231227143458.n36haxmr57zfhdua@Y9MQ9X2QVV> <20240104160327.q2cmayipp7ozrxs7@N00W24XTQX> Message-ID: > On 4 Jan 2024, at 20:03, Roman Arutyunyan wrote: > > Hi, > > On Wed, Dec 27, 2023 at 06:34:58PM +0400, Sergey Kandaurov wrote: >> On Wed, Dec 13, 2023 at 06:06:59PM +0400, Roman Arutyunyan wrote: >> >>> # HG changeset patch >>> # User Roman Arutyunyan >>> # Date 1702476295 -14400 >>> # Wed Dec 13 18:04:55 2023 +0400 >>> # Node ID 844486cdd43a32d10b78493d7e7b80e9e2239d7e >>> # Parent 6c8595b77e667bd58fd28186939ed820f2e55e0e >>> Stream: socket peek in preread phase. >>> >>> Previously, preread buffer was always read out from socket, which made it >>> impossible to terminate SSL on the connection without introducing additional >>> SSL BIOs. The following patches will rely on this. >>> >>> Now, when possible, recv(MSG_PEEK) is used instead, which keeps data in socket. >>> It's called if SSL is not already terminated and if an egde-triggered event >>> method is used. For epoll, EPOLLRDHUP support is also required. >>> >>> diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c >>> --- a/src/stream/ngx_stream_core_module.c >>> +++ b/src/stream/ngx_stream_core_module.c >>> @@ -10,6 +10,10 @@ >>> #include >>> >>> >>> +static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, >>> + ngx_stream_phase_handler_t *ph); >>> +static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, >>> + ngx_stream_phase_handler_t *ph); >>> static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf); >>> static void *ngx_stream_core_create_main_conf(ngx_conf_t *cf); >>> static char *ngx_stream_core_init_main_conf(ngx_conf_t *cf, void *conf); >>> @@ -203,8 +207,6 @@ ngx_int_t >>> ngx_stream_core_preread_phase(ngx_stream_session_t *s, >>> ngx_stream_phase_handler_t *ph) >>> { >>> - size_t size; >>> - ssize_t n; >>> ngx_int_t rc; >>> ngx_connection_t *c; >>> ngx_stream_core_srv_conf_t *cscf; >>> @@ -217,56 +219,40 @@ ngx_stream_core_preread_phase(ngx_stream >>> >>> if (c->read->timedout) { >>> rc = NGX_STREAM_OK; >>> + goto done; >>> + } >>> >>> - } else if (c->read->timer_set) { >>> - rc = NGX_AGAIN; >>> + if (!c->read->timer_set) { >>> + rc = ph->handler(s); >>> >>> - } else { >>> - rc = ph->handler(s); >>> + if (rc != NGX_AGAIN) { >>> + goto done; >>> + } >>> } >>> >>> - while (rc == NGX_AGAIN) { >>> - >>> + if (c->buffer == NULL) { >>> + c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); >>> if (c->buffer == NULL) { >>> - c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); >>> - if (c->buffer == NULL) { >>> - rc = NGX_ERROR; >>> - break; >>> - } >>> + rc = NGX_ERROR; >>> + goto done; >>> } >>> - >>> - size = c->buffer->end - c->buffer->last; >>> - >>> - if (size == 0) { >>> - ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); >>> - rc = NGX_STREAM_BAD_REQUEST; >>> - break; >>> - } >>> + } >>> >>> - if (c->read->eof) { >>> - rc = NGX_STREAM_OK; >>> - break; >>> - } >>> - >>> - if (!c->read->ready) { >>> - break; >>> - } >>> - >>> - n = c->recv(c, c->buffer->last, size); >>> + if (c->ssl == NULL >>> + && (ngx_event_flags & NGX_USE_CLEAR_EVENT) >>> + && ((ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0 >>> +#if (NGX_HAVE_EPOLLRDHUP) >>> + || ngx_use_epoll_rdhup >>> +#endif >> >> BTW, c->ssl needs to be guarded under an appropriate macro test. >> Probably, it makes sense to rewrite this in a more readable way. >> For example: >> >> : peak = 0; >> : >> : #if (NGX_HAVE_KQUEUE) >> : if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { >> : peak = 1; >> : } >> : #endif >> : >> : #if (NGX_HAVE_EPOLLRDHUP) >> : if ((ngx_event_flags & NGX_USE_EPOLL_EVENT) && ngx_use_epoll_rdhup) { >> : peak = 1; >> : } >> : #endif >> : >> : #if (NGX_STREAM_SSL) >> : if (c->ssl) { >> : peak = 0; >> : } >> : #endif > > [..] > > I think it's still too complicated. I suggest a separate function: > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -10,6 +10,7 @@ > #include > > > +static ngx_int_t ngx_stream_preread_can_peek(ngx_connection_t *c); > static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, > ngx_stream_phase_handler_t *ph); > static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, > @@ -238,14 +239,7 @@ ngx_stream_core_preread_phase(ngx_stream > } > } > > - if (c->ssl == NULL > - && (ngx_event_flags & NGX_USE_CLEAR_EVENT) > - && ((ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0 > -#if (NGX_HAVE_EPOLLRDHUP) > - || ngx_use_epoll_rdhup > -#endif > - )) > - { > + if (ngx_stream_preread_can_peek(c)) { > rc = ngx_stream_preread_peek(s, ph); > > } else { > @@ -298,6 +292,35 @@ done: > > > static ngx_int_t ngx_uint_t may be? > +ngx_stream_preread_can_peek(ngx_connection_t *c) > +{ > +#if (NGX_STREAM_SSL) > + if (c->ssl) { > + return 0; > + } > +#endif > + > + if ((ngx_event_flags & NGX_USE_CLEAR_EVENT) == 0) { > + return 0; > + } BTW, the only purpose of this check seems to allow testing level triggered events with epoll/kqueue using --with-cc-opt="-DNGX_HAVE_CLEAR_EVENT=0". > + > +#if (NGX_HAVE_KQUEUE) > + if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { > + return 1; > + } > +#endif > + > +#if (NGX_HAVE_EPOLLRDHUP) > + if ((ngx_event_flags & NGX_USE_EPOLL_EVENT) && ngx_use_epoll_rdhup) { > + return 1; > + } > +#endif > + > + return 0; > +} > + > + > +static ngx_int_t > ngx_stream_preread_peek(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > { > ssize_t n; > Looks good. -- Sergey Kandaurov From pluknet at nginx.com Thu Jan 18 14:51:32 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 18 Jan 2024 18:51:32 +0400 Subject: [PATCH 4 of 6] Stream: the "deferred" parameter of the "listen" directive In-Reply-To: <20240109153935.syjw55gwb63t6hoa@N00W24XTQX> References: <20240109153935.syjw55gwb63t6hoa@N00W24XTQX> Message-ID: <5126CD6B-770B-4230-B837-350D767B94B9@nginx.com> > On 9 Jan 2024, at 19:39, Roman Arutyunyan wrote: > > Hi, > > On Fri, Dec 15, 2023 at 07:37:47PM +0400, Sergey Kandaurov wrote: >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1702650289 -14400 >> # Fri Dec 15 18:24:49 2023 +0400 >> # Node ID cca722e447f8beaaa6b41a620c8b4239a5d1aa7d >> # Parent 4d90cb223fdb9e3e6c148726e36cec7835b2f0f8 >> Stream: the "deferred" parameter of the "listen" directive. >> >> The Linux TCP_DEFER_ACCEPT support. >> >> diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c >> --- a/src/stream/ngx_stream.c >> +++ b/src/stream/ngx_stream.c >> @@ -1021,6 +1021,10 @@ ngx_stream_add_listening(ngx_conf_t *cf, >> ls->keepcnt = addr->opt.tcp_keepcnt; >> #endif >> >> +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) >> + ls->deferred_accept = addr->opt.deferred_accept; >> +#endif >> + >> #if (NGX_HAVE_INET6) >> ls->ipv6only = addr->opt.ipv6only; >> #endif >> diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h >> --- a/src/stream/ngx_stream.h >> +++ b/src/stream/ngx_stream.h >> @@ -53,6 +53,7 @@ typedef struct { >> #if (NGX_HAVE_INET6) >> unsigned ipv6only:1; >> #endif >> + unsigned deferred_accept:1; >> unsigned reuseport:1; >> unsigned so_keepalive:2; >> unsigned proxy_protocol:1; >> diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c >> --- a/src/stream/ngx_stream_core_module.c >> +++ b/src/stream/ngx_stream_core_module.c >> @@ -987,6 +987,19 @@ ngx_stream_core_listen(ngx_conf_t *cf, n >> continue; >> } >> >> + if (ngx_strcmp(value[i].data, "deferred") == 0) { >> +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) >> + lsopt.deferred_accept = 1; >> + lsopt.set = 1; >> + lsopt.bind = 1; >> +#else >> + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, >> + "the deferred accept is not supported " >> + "on this platform, ignored"); >> +#endif >> + continue; >> + } >> + >> if (ngx_strncmp(value[i].data, "ipv6only=o", 10) == 0) { >> #if (NGX_HAVE_INET6 && defined IPV6_V6ONLY) >> if (ngx_strcmp(&value[i].data[10], "n") == 0) { > > We should trigger an error if this option (TCP_DEFER_ACCEPT) is set for UDP. > We have a block "if (lsopt.type == SOCK_DGRAM) {}" later in this function. > Sure, this and the next change needs appropriate checks. SO_SETFIB used to set the routing table (next hop in ip_output) doesn't impose restriction on the socket type, so it is ok. Note that such checks are also missing for HTTP/3 (see the relevant discussion in nginx-ru@ in December). Below is an updated patch series (reviewed changes skipped for brevity). It now includes an updated patch for HTTP/3 as reported by Izorkin. # HG changeset patch # User Sergey Kandaurov # Date 1705588165 -14400 # Thu Jan 18 18:29:25 2024 +0400 # Node ID df9c52b48971bc0b3d17b27ea261e8df7abb8f00 # Parent dcca80118ff37f4ffc86ccf3693a098fb1fa9ffc HTTP/3: added more compatibility checks for "listen ... quic". Now "fastopen", "backlog", "accept_filter", "deferred", and "so_keepalive" parameters are not allowed with "quic" in the "listen" directive. Reported by Izorkin. diff --git a/src/http/ngx_http_core_module.c b/src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c +++ b/src/http/ngx_http_core_module.c @@ -3961,7 +3961,7 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx ngx_str_t *value, size; ngx_url_t u; - ngx_uint_t n, i; + ngx_uint_t n, i, backlog; ngx_http_listen_opt_t lsopt; cscf->listen = 1; @@ -4000,6 +4000,8 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx lsopt.ipv6only = 1; #endif + backlog = 0; + for (n = 2; n < cf->args->nelts; n++) { if (ngx_strcmp(value[n].data, "default_server") == 0 @@ -4058,6 +4060,8 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx return NGX_CONF_ERROR; } + backlog = 1; + continue; } @@ -4305,9 +4309,29 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx return NGX_CONF_ERROR; } -#if (NGX_HTTP_V3) - if (lsopt.quic) { +#if (NGX_HAVE_TCP_FASTOPEN) + if (lsopt.fastopen != -1) { + return "\"fastopen\" parameter is incompatible with \"quic\""; + } +#endif + + if (backlog) { + return "\"backlog\" parameter is incompatible with \"quic\""; + } + +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) + if (lsopt.accept_filter) { + return "\"accept_filter\" parameter is incompatible with \"quic\""; + } +#endif + +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) + if (lsopt.deferred_accept) { + return "\"deferred\" parameter is incompatible with \"quic\""; + } +#endif + #if (NGX_HTTP_SSL) if (lsopt.ssl) { return "\"ssl\" parameter is incompatible with \"quic\""; @@ -4320,13 +4344,15 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx } #endif + if (lsopt.so_keepalive) { + return "\"so_keepalive\" parameter is incompatible with \"quic\""; + } + if (lsopt.proxy_protocol) { return "\"proxy_protocol\" parameter is incompatible with \"quic\""; } } -#endif - for (n = 0; n < u.naddrs; n++) { for (i = 0; i < n; i++) { # HG changeset patch # User Sergey Kandaurov # Date 1705589071 -14400 # Thu Jan 18 18:44:31 2024 +0400 # Node ID b20e6b93489fda0778700b68cf3f85514c7e2547 # Parent df9c52b48971bc0b3d17b27ea261e8df7abb8f00 Stream: the "deferred" parameter of the "listen" directive. The Linux TCP_DEFER_ACCEPT support. diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c --- a/src/stream/ngx_stream.c +++ b/src/stream/ngx_stream.c @@ -1021,6 +1021,10 @@ ngx_stream_add_listening(ngx_conf_t *cf, ls->keepcnt = addr->opt.tcp_keepcnt; #endif +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) + ls->deferred_accept = addr->opt.deferred_accept; +#endif + #if (NGX_HAVE_INET6) ls->ipv6only = addr->opt.ipv6only; #endif diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h --- a/src/stream/ngx_stream.h +++ b/src/stream/ngx_stream.h @@ -53,6 +53,7 @@ typedef struct { #if (NGX_HAVE_INET6) unsigned ipv6only:1; #endif + unsigned deferred_accept:1; unsigned reuseport:1; unsigned so_keepalive:2; unsigned proxy_protocol:1; diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c +++ b/src/stream/ngx_stream_core_module.c @@ -1015,6 +1015,19 @@ ngx_stream_core_listen(ngx_conf_t *cf, n continue; } + if (ngx_strcmp(value[i].data, "deferred") == 0) { +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) + lsopt.deferred_accept = 1; + lsopt.set = 1; + lsopt.bind = 1; +#else + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "the deferred accept is not supported " + "on this platform, ignored"); +#endif + continue; + } + if (ngx_strncmp(value[i].data, "ipv6only=o", 10) == 0) { #if (NGX_HAVE_INET6 && defined IPV6_V6ONLY) if (ngx_strcmp(&value[i].data[10], "n") == 0) { @@ -1173,6 +1186,12 @@ ngx_stream_core_listen(ngx_conf_t *cf, n return "\"backlog\" parameter is incompatible with \"udp\""; } +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) + if (lsopt.deferred_accept) { + return "\"deferred\" parameter is incompatible with \"udp\""; + } +#endif + #if (NGX_STREAM_SSL) if (lsopt.ssl) { return "\"ssl\" parameter is incompatible with \"udp\""; # HG changeset patch # User Sergey Kandaurov # Date 1705589072 -14400 # Thu Jan 18 18:44:32 2024 +0400 # Node ID af5b23845d81168b8839512fd34fa5d39d316af2 # Parent b20e6b93489fda0778700b68cf3f85514c7e2547 Stream: the "accept_filter" parameter of the "listen" directive. The FreeBSD accept filters support. diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c --- a/src/stream/ngx_stream.c +++ b/src/stream/ngx_stream.c @@ -1021,6 +1021,10 @@ ngx_stream_add_listening(ngx_conf_t *cf, ls->keepcnt = addr->opt.tcp_keepcnt; #endif +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) + ls->accept_filter = addr->opt.accept_filter; +#endif + #if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) ls->deferred_accept = addr->opt.deferred_accept; #endif diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h --- a/src/stream/ngx_stream.h +++ b/src/stream/ngx_stream.h @@ -70,6 +70,10 @@ typedef struct { int tcp_keepintvl; int tcp_keepcnt; #endif + +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) + char *accept_filter; +#endif } ngx_stream_listen_opt_t; diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c +++ b/src/stream/ngx_stream_core_module.c @@ -1015,6 +1015,20 @@ ngx_stream_core_listen(ngx_conf_t *cf, n continue; } + if (ngx_strncmp(value[i].data, "accept_filter=", 14) == 0) { +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) + lsopt.accept_filter = (char *) &value[i].data[14]; + lsopt.set = 1; + lsopt.bind = 1; +#else + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "accept filters \"%V\" are not supported " + "on this platform, ignored", + &value[i]); +#endif + continue; + } + if (ngx_strcmp(value[i].data, "deferred") == 0) { #if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) lsopt.deferred_accept = 1; @@ -1186,6 +1200,12 @@ ngx_stream_core_listen(ngx_conf_t *cf, n return "\"backlog\" parameter is incompatible with \"udp\""; } +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) + if (lsopt.accept_filter) { + return "\"accept_filter\" parameter is incompatible with \"udp\""; + } +#endif + #if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) if (lsopt.deferred_accept) { return "\"deferred\" parameter is incompatible with \"udp\""; -- Sergey Kandaurov From arut at nginx.com Thu Jan 18 15:06:06 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 18 Jan 2024 19:06:06 +0400 Subject: [PATCH 1 of 3] Stream: socket peek in preread phase In-Reply-To: References: <966331bb4936888ef2f0.1699610839@arut-laptop> <20231213140659.nt4kcbem26hkyrsd@N00W24XTQX> <20231227143458.n36haxmr57zfhdua@Y9MQ9X2QVV> <20240104160327.q2cmayipp7ozrxs7@N00W24XTQX> Message-ID: <20240118150606.tm776a4c6zajrieo@N00W24XTQX> Hi, On Thu, Jan 18, 2024 at 05:43:08PM +0400, Sergey Kandaurov wrote: > > > On 4 Jan 2024, at 20:03, Roman Arutyunyan wrote: > > > > Hi, > > > > On Wed, Dec 27, 2023 at 06:34:58PM +0400, Sergey Kandaurov wrote: > >> On Wed, Dec 13, 2023 at 06:06:59PM +0400, Roman Arutyunyan wrote: > >> > >>> # HG changeset patch > >>> # User Roman Arutyunyan > >>> # Date 1702476295 -14400 > >>> # Wed Dec 13 18:04:55 2023 +0400 > >>> # Node ID 844486cdd43a32d10b78493d7e7b80e9e2239d7e > >>> # Parent 6c8595b77e667bd58fd28186939ed820f2e55e0e > >>> Stream: socket peek in preread phase. > >>> > >>> Previously, preread buffer was always read out from socket, which made it > >>> impossible to terminate SSL on the connection without introducing additional > >>> SSL BIOs. The following patches will rely on this. > >>> > >>> Now, when possible, recv(MSG_PEEK) is used instead, which keeps data in socket. > >>> It's called if SSL is not already terminated and if an egde-triggered event > >>> method is used. For epoll, EPOLLRDHUP support is also required. > >>> > >>> diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > >>> --- a/src/stream/ngx_stream_core_module.c > >>> +++ b/src/stream/ngx_stream_core_module.c > >>> @@ -10,6 +10,10 @@ > >>> #include > >>> > >>> > >>> +static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, > >>> + ngx_stream_phase_handler_t *ph); > >>> +static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, > >>> + ngx_stream_phase_handler_t *ph); > >>> static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf); > >>> static void *ngx_stream_core_create_main_conf(ngx_conf_t *cf); > >>> static char *ngx_stream_core_init_main_conf(ngx_conf_t *cf, void *conf); > >>> @@ -203,8 +207,6 @@ ngx_int_t > >>> ngx_stream_core_preread_phase(ngx_stream_session_t *s, > >>> ngx_stream_phase_handler_t *ph) > >>> { > >>> - size_t size; > >>> - ssize_t n; > >>> ngx_int_t rc; > >>> ngx_connection_t *c; > >>> ngx_stream_core_srv_conf_t *cscf; > >>> @@ -217,56 +219,40 @@ ngx_stream_core_preread_phase(ngx_stream > >>> > >>> if (c->read->timedout) { > >>> rc = NGX_STREAM_OK; > >>> + goto done; > >>> + } > >>> > >>> - } else if (c->read->timer_set) { > >>> - rc = NGX_AGAIN; > >>> + if (!c->read->timer_set) { > >>> + rc = ph->handler(s); > >>> > >>> - } else { > >>> - rc = ph->handler(s); > >>> + if (rc != NGX_AGAIN) { > >>> + goto done; > >>> + } > >>> } > >>> > >>> - while (rc == NGX_AGAIN) { > >>> - > >>> + if (c->buffer == NULL) { > >>> + c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > >>> if (c->buffer == NULL) { > >>> - c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > >>> - if (c->buffer == NULL) { > >>> - rc = NGX_ERROR; > >>> - break; > >>> - } > >>> + rc = NGX_ERROR; > >>> + goto done; > >>> } > >>> - > >>> - size = c->buffer->end - c->buffer->last; > >>> - > >>> - if (size == 0) { > >>> - ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > >>> - rc = NGX_STREAM_BAD_REQUEST; > >>> - break; > >>> - } > >>> + } > >>> > >>> - if (c->read->eof) { > >>> - rc = NGX_STREAM_OK; > >>> - break; > >>> - } > >>> - > >>> - if (!c->read->ready) { > >>> - break; > >>> - } > >>> - > >>> - n = c->recv(c, c->buffer->last, size); > >>> + if (c->ssl == NULL > >>> + && (ngx_event_flags & NGX_USE_CLEAR_EVENT) > >>> + && ((ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0 > >>> +#if (NGX_HAVE_EPOLLRDHUP) > >>> + || ngx_use_epoll_rdhup > >>> +#endif > >> > >> BTW, c->ssl needs to be guarded under an appropriate macro test. > >> Probably, it makes sense to rewrite this in a more readable way. > >> For example: > >> > >> : peak = 0; > >> : > >> : #if (NGX_HAVE_KQUEUE) > >> : if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { > >> : peak = 1; > >> : } > >> : #endif > >> : > >> : #if (NGX_HAVE_EPOLLRDHUP) > >> : if ((ngx_event_flags & NGX_USE_EPOLL_EVENT) && ngx_use_epoll_rdhup) { > >> : peak = 1; > >> : } > >> : #endif > >> : > >> : #if (NGX_STREAM_SSL) > >> : if (c->ssl) { > >> : peak = 0; > >> : } > >> : #endif > > > > [..] > > > > I think it's still too complicated. I suggest a separate function: > > > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > > --- a/src/stream/ngx_stream_core_module.c > > +++ b/src/stream/ngx_stream_core_module.c > > @@ -10,6 +10,7 @@ > > #include > > > > > > +static ngx_int_t ngx_stream_preread_can_peek(ngx_connection_t *c); > > static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, > > ngx_stream_phase_handler_t *ph); > > static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, > > @@ -238,14 +239,7 @@ ngx_stream_core_preread_phase(ngx_stream > > } > > } > > > > - if (c->ssl == NULL > > - && (ngx_event_flags & NGX_USE_CLEAR_EVENT) > > - && ((ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0 > > -#if (NGX_HAVE_EPOLLRDHUP) > > - || ngx_use_epoll_rdhup > > -#endif > > - )) > > - { > > + if (ngx_stream_preread_can_peek(c)) { > > rc = ngx_stream_preread_peek(s, ph); > > > > } else { > > @@ -298,6 +292,35 @@ done: > > > > > > static ngx_int_t > > ngx_uint_t may be? Yes, indeed. > > +ngx_stream_preread_can_peek(ngx_connection_t *c) > > +{ > > +#if (NGX_STREAM_SSL) > > + if (c->ssl) { > > + return 0; > > + } > > +#endif > > + > > + if ((ngx_event_flags & NGX_USE_CLEAR_EVENT) == 0) { > > + return 0; > > + } > > BTW, the only purpose of this check seems to allow testing level triggered > events with epoll/kqueue using --with-cc-opt="-DNGX_HAVE_CLEAR_EVENT=0". Sure, both of them can work in level-triggered mode. > > + > > +#if (NGX_HAVE_KQUEUE) > > + if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { > > + return 1; > > + } > > +#endif > > + > > +#if (NGX_HAVE_EPOLLRDHUP) > > + if ((ngx_event_flags & NGX_USE_EPOLL_EVENT) && ngx_use_epoll_rdhup) { > > + return 1; > > + } > > +#endif > > + > > + return 0; > > +} > > + > > + > > +static ngx_int_t > > ngx_stream_preread_peek(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > > { > > ssize_t n; > > > > Looks good. > > -- > Sergey Kandaurov > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel Final version attached. -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1702476295 -14400 # Wed Dec 13 18:04:55 2023 +0400 # Node ID 7324e8e73595c3093fcc2cbd2b5d6b1a947be3b0 # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 Stream: socket peek in preread phase. Previously, preread buffer was always read out from socket, which made it impossible to terminate SSL on the connection without introducing additional SSL BIOs. The following patches will rely on this. Now, when possible, recv(MSG_PEEK) is used instead, which keeps data in socket. It's called if SSL is not already terminated and if an egde-triggered event method is used. For epoll, EPOLLRDHUP support is also required. diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c +++ b/src/stream/ngx_stream_core_module.c @@ -10,6 +10,11 @@ #include +static ngx_uint_t ngx_stream_preread_can_peek(ngx_connection_t *c); +static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, + ngx_stream_phase_handler_t *ph); +static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, + ngx_stream_phase_handler_t *ph); static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf); static void *ngx_stream_core_create_main_conf(ngx_conf_t *cf); static char *ngx_stream_core_init_main_conf(ngx_conf_t *cf, void *conf); @@ -203,8 +208,6 @@ ngx_int_t ngx_stream_core_preread_phase(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) { - size_t size; - ssize_t n; ngx_int_t rc; ngx_connection_t *c; ngx_stream_core_srv_conf_t *cscf; @@ -217,56 +220,33 @@ ngx_stream_core_preread_phase(ngx_stream if (c->read->timedout) { rc = NGX_STREAM_OK; + goto done; + } - } else if (c->read->timer_set) { - rc = NGX_AGAIN; + if (!c->read->timer_set) { + rc = ph->handler(s); - } else { - rc = ph->handler(s); + if (rc != NGX_AGAIN) { + goto done; + } } - while (rc == NGX_AGAIN) { - + if (c->buffer == NULL) { + c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); if (c->buffer == NULL) { - c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); - if (c->buffer == NULL) { - rc = NGX_ERROR; - break; - } + rc = NGX_ERROR; + goto done; } - - size = c->buffer->end - c->buffer->last; - - if (size == 0) { - ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); - rc = NGX_STREAM_BAD_REQUEST; - break; - } + } - if (c->read->eof) { - rc = NGX_STREAM_OK; - break; - } - - if (!c->read->ready) { - break; - } - - n = c->recv(c, c->buffer->last, size); + if (ngx_stream_preread_can_peek(c)) { + rc = ngx_stream_preread_peek(s, ph); - if (n == NGX_ERROR || n == 0) { - rc = NGX_STREAM_OK; - break; - } + } else { + rc = ngx_stream_preread(s, ph); + } - if (n == NGX_AGAIN) { - break; - } - - c->buffer->last += n; - - rc = ph->handler(s); - } +done: if (rc == NGX_AGAIN) { if (ngx_handle_read_event(c->read, 0) != NGX_OK) { @@ -311,6 +291,129 @@ ngx_stream_core_preread_phase(ngx_stream } +static ngx_uint_t +ngx_stream_preread_can_peek(ngx_connection_t *c) +{ +#if (NGX_STREAM_SSL) + if (c->ssl) { + return 0; + } +#endif + + if ((ngx_event_flags & NGX_USE_CLEAR_EVENT) == 0) { + return 0; + } + +#if (NGX_HAVE_KQUEUE) + if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { + return 1; + } +#endif + +#if (NGX_HAVE_EPOLLRDHUP) + if ((ngx_event_flags & NGX_USE_EPOLL_EVENT) && ngx_use_epoll_rdhup) { + return 1; + } +#endif + + return 0; +} + + +static ngx_int_t +ngx_stream_preread_peek(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) +{ + ssize_t n; + ngx_int_t rc; + ngx_err_t err; + ngx_connection_t *c; + + c = s->connection; + + n = recv(c->fd, (char *) c->buffer->last, + c->buffer->end - c->buffer->last, MSG_PEEK); + + err = ngx_socket_errno; + + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, "stream recv(): %z", n); + + if (n == -1) { + if (err == NGX_EAGAIN) { + c->read->ready = 0; + return NGX_AGAIN; + } + + ngx_connection_error(c, err, "recv() failed"); + return NGX_STREAM_OK; + } + + if (n == 0) { + return NGX_STREAM_OK; + } + + c->buffer->last += n; + + rc = ph->handler(s); + + if (rc != NGX_AGAIN) { + c->buffer->last = c->buffer->pos; + return rc; + } + + if (c->buffer->last == c->buffer->end) { + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); + return NGX_STREAM_BAD_REQUEST; + } + + if (c->read->pending_eof) { + return NGX_STREAM_OK; + } + + c->buffer->last = c->buffer->pos; + + return NGX_AGAIN; +} + + +static ngx_int_t +ngx_stream_preread(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) +{ + ssize_t n; + ngx_int_t rc; + ngx_connection_t *c; + + c = s->connection; + + while (c->read->ready) { + + n = c->recv(c, c->buffer->last, c->buffer->end - c->buffer->last); + + if (n == NGX_AGAIN) { + return NGX_AGAIN; + } + + if (n == NGX_ERROR || n == 0) { + return NGX_STREAM_OK; + } + + c->buffer->last += n; + + rc = ph->handler(s); + + if (rc != NGX_AGAIN) { + return rc; + } + + if (c->buffer->last == c->buffer->end) { + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); + return NGX_STREAM_BAD_REQUEST; + } + } + + return NGX_AGAIN; +} + + ngx_int_t ngx_stream_core_content_phase(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) From arut at nginx.com Thu Jan 18 15:24:21 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 18 Jan 2024 19:24:21 +0400 Subject: [PATCH 4 of 6] Stream: the "deferred" parameter of the "listen" directive In-Reply-To: <5126CD6B-770B-4230-B837-350D767B94B9@nginx.com> References: <20240109153935.syjw55gwb63t6hoa@N00W24XTQX> <5126CD6B-770B-4230-B837-350D767B94B9@nginx.com> Message-ID: <20240118152421.c2n7n7qetvhmo5km@N00W24XTQX> Hi, On Thu, Jan 18, 2024 at 06:51:32PM +0400, Sergey Kandaurov wrote: > > > On 9 Jan 2024, at 19:39, Roman Arutyunyan wrote: > > > > Hi, > > > > On Fri, Dec 15, 2023 at 07:37:47PM +0400, Sergey Kandaurov wrote: > >> # HG changeset patch > >> # User Sergey Kandaurov > >> # Date 1702650289 -14400 > >> # Fri Dec 15 18:24:49 2023 +0400 > >> # Node ID cca722e447f8beaaa6b41a620c8b4239a5d1aa7d > >> # Parent 4d90cb223fdb9e3e6c148726e36cec7835b2f0f8 > >> Stream: the "deferred" parameter of the "listen" directive. > >> > >> The Linux TCP_DEFER_ACCEPT support. > >> > >> diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c > >> --- a/src/stream/ngx_stream.c > >> +++ b/src/stream/ngx_stream.c > >> @@ -1021,6 +1021,10 @@ ngx_stream_add_listening(ngx_conf_t *cf, > >> ls->keepcnt = addr->opt.tcp_keepcnt; > >> #endif > >> > >> +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > >> + ls->deferred_accept = addr->opt.deferred_accept; > >> +#endif > >> + > >> #if (NGX_HAVE_INET6) > >> ls->ipv6only = addr->opt.ipv6only; > >> #endif > >> diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h > >> --- a/src/stream/ngx_stream.h > >> +++ b/src/stream/ngx_stream.h > >> @@ -53,6 +53,7 @@ typedef struct { > >> #if (NGX_HAVE_INET6) > >> unsigned ipv6only:1; > >> #endif > >> + unsigned deferred_accept:1; > >> unsigned reuseport:1; > >> unsigned so_keepalive:2; > >> unsigned proxy_protocol:1; > >> diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > >> --- a/src/stream/ngx_stream_core_module.c > >> +++ b/src/stream/ngx_stream_core_module.c > >> @@ -987,6 +987,19 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > >> continue; > >> } > >> > >> + if (ngx_strcmp(value[i].data, "deferred") == 0) { > >> +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > >> + lsopt.deferred_accept = 1; > >> + lsopt.set = 1; > >> + lsopt.bind = 1; > >> +#else > >> + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > >> + "the deferred accept is not supported " > >> + "on this platform, ignored"); > >> +#endif > >> + continue; > >> + } > >> + > >> if (ngx_strncmp(value[i].data, "ipv6only=o", 10) == 0) { > >> #if (NGX_HAVE_INET6 && defined IPV6_V6ONLY) > >> if (ngx_strcmp(&value[i].data[10], "n") == 0) { > > > > We should trigger an error if this option (TCP_DEFER_ACCEPT) is set for UDP. > > We have a block "if (lsopt.type == SOCK_DGRAM) {}" later in this function. > > > > Sure, this and the next change needs appropriate checks. > SO_SETFIB used to set the routing table (next hop in ip_output) > doesn't impose restriction on the socket type, so it is ok. > > Note that such checks are also missing for HTTP/3 > (see the relevant discussion in nginx-ru@ in December). > > Below is an updated patch series (reviewed changes skipped for brevity). > It now includes an updated patch for HTTP/3 as reported by Izorkin. > > # HG changeset patch > # User Sergey Kandaurov > # Date 1705588165 -14400 > # Thu Jan 18 18:29:25 2024 +0400 > # Node ID df9c52b48971bc0b3d17b27ea261e8df7abb8f00 > # Parent dcca80118ff37f4ffc86ccf3693a098fb1fa9ffc > HTTP/3: added more compatibility checks for "listen ... quic". > > Now "fastopen", "backlog", "accept_filter", "deferred", and "so_keepalive" > parameters are not allowed with "quic" in the "listen" directive. > > Reported by Izorkin. > > diff --git a/src/http/ngx_http_core_module.c b/src/http/ngx_http_core_module.c > --- a/src/http/ngx_http_core_module.c > +++ b/src/http/ngx_http_core_module.c > @@ -3961,7 +3961,7 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx > > ngx_str_t *value, size; > ngx_url_t u; > - ngx_uint_t n, i; > + ngx_uint_t n, i, backlog; > ngx_http_listen_opt_t lsopt; > > cscf->listen = 1; > @@ -4000,6 +4000,8 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx > lsopt.ipv6only = 1; > #endif > > + backlog = 0; > + > for (n = 2; n < cf->args->nelts; n++) { > > if (ngx_strcmp(value[n].data, "default_server") == 0 > @@ -4058,6 +4060,8 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx > return NGX_CONF_ERROR; > } > > + backlog = 1; > + > continue; > } > > @@ -4305,9 +4309,29 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx > return NGX_CONF_ERROR; > } > > -#if (NGX_HTTP_V3) > - > if (lsopt.quic) { > +#if (NGX_HAVE_TCP_FASTOPEN) > + if (lsopt.fastopen != -1) { > + return "\"fastopen\" parameter is incompatible with \"quic\""; > + } > +#endif > + > + if (backlog) { > + return "\"backlog\" parameter is incompatible with \"quic\""; > + } > + > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > + if (lsopt.accept_filter) { > + return "\"accept_filter\" parameter is incompatible with \"quic\""; > + } > +#endif > + > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > + if (lsopt.deferred_accept) { > + return "\"deferred\" parameter is incompatible with \"quic\""; > + } > +#endif > + > #if (NGX_HTTP_SSL) > if (lsopt.ssl) { > return "\"ssl\" parameter is incompatible with \"quic\""; > @@ -4320,13 +4344,15 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx > } > #endif > > + if (lsopt.so_keepalive) { > + return "\"so_keepalive\" parameter is incompatible with \"quic\""; > + } > + > if (lsopt.proxy_protocol) { > return "\"proxy_protocol\" parameter is incompatible with \"quic\""; > } > } > > -#endif > - > for (n = 0; n < u.naddrs; n++) { > > for (i = 0; i < n; i++) { > # HG changeset patch > # User Sergey Kandaurov > # Date 1705589071 -14400 > # Thu Jan 18 18:44:31 2024 +0400 > # Node ID b20e6b93489fda0778700b68cf3f85514c7e2547 > # Parent df9c52b48971bc0b3d17b27ea261e8df7abb8f00 > Stream: the "deferred" parameter of the "listen" directive. > > The Linux TCP_DEFER_ACCEPT support. > > diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c > --- a/src/stream/ngx_stream.c > +++ b/src/stream/ngx_stream.c > @@ -1021,6 +1021,10 @@ ngx_stream_add_listening(ngx_conf_t *cf, > ls->keepcnt = addr->opt.tcp_keepcnt; > #endif > > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > + ls->deferred_accept = addr->opt.deferred_accept; > +#endif > + > #if (NGX_HAVE_INET6) > ls->ipv6only = addr->opt.ipv6only; > #endif > diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h > --- a/src/stream/ngx_stream.h > +++ b/src/stream/ngx_stream.h > @@ -53,6 +53,7 @@ typedef struct { > #if (NGX_HAVE_INET6) > unsigned ipv6only:1; > #endif > + unsigned deferred_accept:1; > unsigned reuseport:1; > unsigned so_keepalive:2; > unsigned proxy_protocol:1; > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -1015,6 +1015,19 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > continue; > } > > + if (ngx_strcmp(value[i].data, "deferred") == 0) { > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > + lsopt.deferred_accept = 1; > + lsopt.set = 1; > + lsopt.bind = 1; > +#else > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "the deferred accept is not supported " > + "on this platform, ignored"); > +#endif > + continue; > + } > + > if (ngx_strncmp(value[i].data, "ipv6only=o", 10) == 0) { > #if (NGX_HAVE_INET6 && defined IPV6_V6ONLY) > if (ngx_strcmp(&value[i].data[10], "n") == 0) { > @@ -1173,6 +1186,12 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > return "\"backlog\" parameter is incompatible with \"udp\""; > } > > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > + if (lsopt.deferred_accept) { > + return "\"deferred\" parameter is incompatible with \"udp\""; > + } > +#endif > + > #if (NGX_STREAM_SSL) > if (lsopt.ssl) { > return "\"ssl\" parameter is incompatible with \"udp\""; > # HG changeset patch > # User Sergey Kandaurov > # Date 1705589072 -14400 > # Thu Jan 18 18:44:32 2024 +0400 > # Node ID af5b23845d81168b8839512fd34fa5d39d316af2 > # Parent b20e6b93489fda0778700b68cf3f85514c7e2547 > Stream: the "accept_filter" parameter of the "listen" directive. > > The FreeBSD accept filters support. > > diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c > --- a/src/stream/ngx_stream.c > +++ b/src/stream/ngx_stream.c > @@ -1021,6 +1021,10 @@ ngx_stream_add_listening(ngx_conf_t *cf, > ls->keepcnt = addr->opt.tcp_keepcnt; > #endif > > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > + ls->accept_filter = addr->opt.accept_filter; > +#endif > + > #if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > ls->deferred_accept = addr->opt.deferred_accept; > #endif > diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h > --- a/src/stream/ngx_stream.h > +++ b/src/stream/ngx_stream.h > @@ -70,6 +70,10 @@ typedef struct { > int tcp_keepintvl; > int tcp_keepcnt; > #endif > + > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > + char *accept_filter; > +#endif > } ngx_stream_listen_opt_t; > > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -1015,6 +1015,20 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > continue; > } > > + if (ngx_strncmp(value[i].data, "accept_filter=", 14) == 0) { > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > + lsopt.accept_filter = (char *) &value[i].data[14]; > + lsopt.set = 1; > + lsopt.bind = 1; > +#else > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "accept filters \"%V\" are not supported " > + "on this platform, ignored", > + &value[i]); > +#endif > + continue; > + } > + > if (ngx_strcmp(value[i].data, "deferred") == 0) { > #if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > lsopt.deferred_accept = 1; > @@ -1186,6 +1200,12 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > return "\"backlog\" parameter is incompatible with \"udp\""; > } > > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > + if (lsopt.accept_filter) { > + return "\"accept_filter\" parameter is incompatible with \"udp\""; > + } > +#endif > + > #if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > if (lsopt.deferred_accept) { > return "\"deferred\" parameter is incompatible with \"udp\""; > The patches look ok. However for even better visual compatibility between Stream and HTTP I suggest a small patch on top of everything which moves the fastopen check up. -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1705590758 -14400 # Thu Jan 18 19:12:38 2024 +0400 # Node ID c8c4fe87c61c39ced688ad66655f40951cde6bcc # Parent 0257dc20b29f2a897f90e78dc356d384c8d7f66d Stream: moved fastopen compatibility check. The move makes the code look similar to the corresponding code in http module. diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c +++ b/src/stream/ngx_stream_core_module.c @@ -1215,6 +1215,12 @@ ngx_stream_core_listen(ngx_conf_t *cf, n } if (lsopt.type == SOCK_DGRAM) { +#if (NGX_HAVE_TCP_FASTOPEN) + if (lsopt.fastopen != -1) { + return "\"fastopen\" parameter is incompatible with \"udp\""; + } +#endif + if (backlog) { return "\"backlog\" parameter is incompatible with \"udp\""; } @@ -1244,12 +1250,6 @@ ngx_stream_core_listen(ngx_conf_t *cf, n if (lsopt.proxy_protocol) { return "\"proxy_protocol\" parameter is incompatible with \"udp\""; } - -#if (NGX_HAVE_TCP_FASTOPEN) - if (lsopt.fastopen != -1) { - return "\"fastopen\" parameter is incompatible with \"udp\""; - } -#endif } for (n = 0; n < u.naddrs; n++) { From pluknet at nginx.com Thu Jan 18 16:15:33 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 18 Jan 2024 20:15:33 +0400 Subject: [PATCH 1 of 3] Stream: socket peek in preread phase In-Reply-To: <20240118150606.tm776a4c6zajrieo@N00W24XTQX> References: <966331bb4936888ef2f0.1699610839@arut-laptop> <20231213140659.nt4kcbem26hkyrsd@N00W24XTQX> <20231227143458.n36haxmr57zfhdua@Y9MQ9X2QVV> <20240104160327.q2cmayipp7ozrxs7@N00W24XTQX> <20240118150606.tm776a4c6zajrieo@N00W24XTQX> Message-ID: <20240118161533.dtnqbdhyxiq36g4x@Y9MQ9X2QVV> On Thu, Jan 18, 2024 at 07:06:06PM +0400, Roman Arutyunyan wrote: > Hi, > > # HG changeset patch > # User Roman Arutyunyan > # Date 1702476295 -14400 > # Wed Dec 13 18:04:55 2023 +0400 > # Node ID 7324e8e73595c3093fcc2cbd2b5d6b1a947be3b0 > # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 > Stream: socket peek in preread phase. > > Previously, preread buffer was always read out from socket, which made it > impossible to terminate SSL on the connection without introducing additional > SSL BIOs. The following patches will rely on this. > > Now, when possible, recv(MSG_PEEK) is used instead, which keeps data in socket. > It's called if SSL is not already terminated and if an egde-triggered event > method is used. For epoll, EPOLLRDHUP support is also required. > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -10,6 +10,11 @@ > #include > > > +static ngx_uint_t ngx_stream_preread_can_peek(ngx_connection_t *c); > +static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, > + ngx_stream_phase_handler_t *ph); > +static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, > + ngx_stream_phase_handler_t *ph); > static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf); > static void *ngx_stream_core_create_main_conf(ngx_conf_t *cf); > static char *ngx_stream_core_init_main_conf(ngx_conf_t *cf, void *conf); > @@ -203,8 +208,6 @@ ngx_int_t > ngx_stream_core_preread_phase(ngx_stream_session_t *s, > ngx_stream_phase_handler_t *ph) > { > - size_t size; > - ssize_t n; > ngx_int_t rc; > ngx_connection_t *c; > ngx_stream_core_srv_conf_t *cscf; > @@ -217,56 +220,33 @@ ngx_stream_core_preread_phase(ngx_stream > > if (c->read->timedout) { > rc = NGX_STREAM_OK; > + goto done; > + } > > - } else if (c->read->timer_set) { > - rc = NGX_AGAIN; > + if (!c->read->timer_set) { > + rc = ph->handler(s); > > - } else { > - rc = ph->handler(s); > + if (rc != NGX_AGAIN) { > + goto done; > + } > } > > - while (rc == NGX_AGAIN) { > - > + if (c->buffer == NULL) { > + c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > if (c->buffer == NULL) { > - c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > - if (c->buffer == NULL) { > - rc = NGX_ERROR; > - break; > - } > + rc = NGX_ERROR; > + goto done; > } > - > - size = c->buffer->end - c->buffer->last; > - > - if (size == 0) { > - ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > - rc = NGX_STREAM_BAD_REQUEST; > - break; > - } > + } > > - if (c->read->eof) { > - rc = NGX_STREAM_OK; > - break; > - } > - > - if (!c->read->ready) { > - break; > - } > - > - n = c->recv(c, c->buffer->last, size); > + if (ngx_stream_preread_can_peek(c)) { > + rc = ngx_stream_preread_peek(s, ph); > > - if (n == NGX_ERROR || n == 0) { > - rc = NGX_STREAM_OK; > - break; > - } > + } else { > + rc = ngx_stream_preread(s, ph); > + } > > - if (n == NGX_AGAIN) { > - break; > - } > - > - c->buffer->last += n; > - > - rc = ph->handler(s); > - } > +done: > > if (rc == NGX_AGAIN) { > if (ngx_handle_read_event(c->read, 0) != NGX_OK) { > @@ -311,6 +291,129 @@ ngx_stream_core_preread_phase(ngx_stream > } > > > +static ngx_uint_t > +ngx_stream_preread_can_peek(ngx_connection_t *c) > +{ > +#if (NGX_STREAM_SSL) > + if (c->ssl) { > + return 0; > + } > +#endif > + > + if ((ngx_event_flags & NGX_USE_CLEAR_EVENT) == 0) { > + return 0; > + } > + > +#if (NGX_HAVE_KQUEUE) > + if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { > + return 1; > + } > +#endif > + > +#if (NGX_HAVE_EPOLLRDHUP) > + if ((ngx_event_flags & NGX_USE_EPOLL_EVENT) && ngx_use_epoll_rdhup) { > + return 1; > + } > +#endif > + > + return 0; > +} > + > + > +static ngx_int_t > +ngx_stream_preread_peek(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > +{ > + ssize_t n; > + ngx_int_t rc; > + ngx_err_t err; > + ngx_connection_t *c; > + > + c = s->connection; > + > + n = recv(c->fd, (char *) c->buffer->last, > + c->buffer->end - c->buffer->last, MSG_PEEK); > + > + err = ngx_socket_errno; > + > + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, "stream recv(): %z", n); > + > + if (n == -1) { > + if (err == NGX_EAGAIN) { > + c->read->ready = 0; > + return NGX_AGAIN; > + } > + > + ngx_connection_error(c, err, "recv() failed"); > + return NGX_STREAM_OK; > + } > + > + if (n == 0) { > + return NGX_STREAM_OK; > + } > + > + c->buffer->last += n; > + > + rc = ph->handler(s); > + > + if (rc != NGX_AGAIN) { > + c->buffer->last = c->buffer->pos; > + return rc; > + } > + > + if (c->buffer->last == c->buffer->end) { > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > + return NGX_STREAM_BAD_REQUEST; > + } > + > + if (c->read->pending_eof) { > + return NGX_STREAM_OK; > + } > + > + c->buffer->last = c->buffer->pos; > + > + return NGX_AGAIN; > +} > + > + > +static ngx_int_t > +ngx_stream_preread(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > +{ > + ssize_t n; > + ngx_int_t rc; > + ngx_connection_t *c; > + > + c = s->connection; > + > + while (c->read->ready) { > + > + n = c->recv(c, c->buffer->last, c->buffer->end - c->buffer->last); > + > + if (n == NGX_AGAIN) { > + return NGX_AGAIN; > + } > + > + if (n == NGX_ERROR || n == 0) { > + return NGX_STREAM_OK; > + } > + > + c->buffer->last += n; > + > + rc = ph->handler(s); > + > + if (rc != NGX_AGAIN) { > + return rc; > + } > + > + if (c->buffer->last == c->buffer->end) { > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > + return NGX_STREAM_BAD_REQUEST; > + } > + } > + > + return NGX_AGAIN; > +} > + > + > ngx_int_t > ngx_stream_core_content_phase(ngx_stream_session_t *s, > ngx_stream_phase_handler_t *ph) Looks good. From pluknet at nginx.com Thu Jan 18 16:20:18 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 18 Jan 2024 20:20:18 +0400 Subject: [PATCH 4 of 6] Stream: the "deferred" parameter of the "listen" directive In-Reply-To: <20240118152421.c2n7n7qetvhmo5km@N00W24XTQX> References: <20240109153935.syjw55gwb63t6hoa@N00W24XTQX> <5126CD6B-770B-4230-B837-350D767B94B9@nginx.com> <20240118152421.c2n7n7qetvhmo5km@N00W24XTQX> Message-ID: <20240118162018.3w26yrmxfs4dsgsy@Y9MQ9X2QVV> On Thu, Jan 18, 2024 at 07:24:21PM +0400, Roman Arutyunyan wrote: > Hi, > > On Thu, Jan 18, 2024 at 06:51:32PM +0400, Sergey Kandaurov wrote: > > > > > On 9 Jan 2024, at 19:39, Roman Arutyunyan wrote: > > > > > > We should trigger an error if this option (TCP_DEFER_ACCEPT) is set for UDP. > > > We have a block "if (lsopt.type == SOCK_DGRAM) {}" later in this function. > > > > > > > Sure, this and the next change needs appropriate checks. > > SO_SETFIB used to set the routing table (next hop in ip_output) > > doesn't impose restriction on the socket type, so it is ok. > > > > Note that such checks are also missing for HTTP/3 > > (see the relevant discussion in nginx-ru@ in December). > > > > Below is an updated patch series (reviewed changes skipped for brevity). > > It now includes an updated patch for HTTP/3 as reported by Izorkin. > > [..] > > The patches look ok. > > However for even better visual compatibility between Stream and HTTP I suggest > a small patch on top of everything which moves the fastopen check up. > > # HG changeset patch > # User Roman Arutyunyan > # Date 1705590758 -14400 > # Thu Jan 18 19:12:38 2024 +0400 > # Node ID c8c4fe87c61c39ced688ad66655f40951cde6bcc > # Parent 0257dc20b29f2a897f90e78dc356d384c8d7f66d > Stream: moved fastopen compatibility check. > > The move makes the code look similar to the corresponding code in http module. > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -1215,6 +1215,12 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > } > > if (lsopt.type == SOCK_DGRAM) { > +#if (NGX_HAVE_TCP_FASTOPEN) > + if (lsopt.fastopen != -1) { > + return "\"fastopen\" parameter is incompatible with \"udp\""; > + } > +#endif > + > if (backlog) { > return "\"backlog\" parameter is incompatible with \"udp\""; > } > @@ -1244,12 +1250,6 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > if (lsopt.proxy_protocol) { > return "\"proxy_protocol\" parameter is incompatible with \"udp\""; > } > - > -#if (NGX_HAVE_TCP_FASTOPEN) > - if (lsopt.fastopen != -1) { > - return "\"fastopen\" parameter is incompatible with \"udp\""; > - } > -#endif > } > > for (n = 0; n < u.naddrs; n++) { Makes sense, agreed. From mdounin at mdounin.ru Thu Jan 18 19:44:50 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 18 Jan 2024 22:44:50 +0300 Subject: [PATCH 1 of 3] Stream: socket peek in preread phase In-Reply-To: <20240118161533.dtnqbdhyxiq36g4x@Y9MQ9X2QVV> References: <966331bb4936888ef2f0.1699610839@arut-laptop> <20231213140659.nt4kcbem26hkyrsd@N00W24XTQX> <20231227143458.n36haxmr57zfhdua@Y9MQ9X2QVV> <20240104160327.q2cmayipp7ozrxs7@N00W24XTQX> <20240118150606.tm776a4c6zajrieo@N00W24XTQX> <20240118161533.dtnqbdhyxiq36g4x@Y9MQ9X2QVV> Message-ID: Hello! On Thu, Jan 18, 2024 at 08:15:33PM +0400, Sergey Kandaurov wrote: > On Thu, Jan 18, 2024 at 07:06:06PM +0400, Roman Arutyunyan wrote: > > Hi, > > > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1702476295 -14400 > > # Wed Dec 13 18:04:55 2023 +0400 > > # Node ID 7324e8e73595c3093fcc2cbd2b5d6b1a947be3b0 > > # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 > > Stream: socket peek in preread phase. > > > > Previously, preread buffer was always read out from socket, which made it > > impossible to terminate SSL on the connection without introducing additional > > SSL BIOs. The following patches will rely on this. > > > > Now, when possible, recv(MSG_PEEK) is used instead, which keeps data in socket. > > It's called if SSL is not already terminated and if an egde-triggered event > > method is used. For epoll, EPOLLRDHUP support is also required. > > > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > > --- a/src/stream/ngx_stream_core_module.c > > +++ b/src/stream/ngx_stream_core_module.c > > @@ -10,6 +10,11 @@ > > #include > > > > > > +static ngx_uint_t ngx_stream_preread_can_peek(ngx_connection_t *c); > > +static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, > > + ngx_stream_phase_handler_t *ph); > > +static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, > > + ngx_stream_phase_handler_t *ph); > > static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf); > > static void *ngx_stream_core_create_main_conf(ngx_conf_t *cf); > > static char *ngx_stream_core_init_main_conf(ngx_conf_t *cf, void *conf); > > @@ -203,8 +208,6 @@ ngx_int_t > > ngx_stream_core_preread_phase(ngx_stream_session_t *s, > > ngx_stream_phase_handler_t *ph) > > { > > - size_t size; > > - ssize_t n; > > ngx_int_t rc; > > ngx_connection_t *c; > > ngx_stream_core_srv_conf_t *cscf; > > @@ -217,56 +220,33 @@ ngx_stream_core_preread_phase(ngx_stream > > > > if (c->read->timedout) { > > rc = NGX_STREAM_OK; > > + goto done; > > + } > > > > - } else if (c->read->timer_set) { > > - rc = NGX_AGAIN; > > + if (!c->read->timer_set) { > > + rc = ph->handler(s); > > > > - } else { > > - rc = ph->handler(s); > > + if (rc != NGX_AGAIN) { > > + goto done; > > + } > > } > > > > - while (rc == NGX_AGAIN) { > > - > > + if (c->buffer == NULL) { > > + c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > > if (c->buffer == NULL) { > > - c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > > - if (c->buffer == NULL) { > > - rc = NGX_ERROR; > > - break; > > - } > > + rc = NGX_ERROR; > > + goto done; > > } > > - > > - size = c->buffer->end - c->buffer->last; > > - > > - if (size == 0) { > > - ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > > - rc = NGX_STREAM_BAD_REQUEST; > > - break; > > - } > > + } > > > > - if (c->read->eof) { > > - rc = NGX_STREAM_OK; > > - break; > > - } > > - > > - if (!c->read->ready) { > > - break; > > - } > > - > > - n = c->recv(c, c->buffer->last, size); > > + if (ngx_stream_preread_can_peek(c)) { > > + rc = ngx_stream_preread_peek(s, ph); > > > > - if (n == NGX_ERROR || n == 0) { > > - rc = NGX_STREAM_OK; > > - break; > > - } > > + } else { > > + rc = ngx_stream_preread(s, ph); > > + } > > > > - if (n == NGX_AGAIN) { > > - break; > > - } > > - > > - c->buffer->last += n; > > - > > - rc = ph->handler(s); > > - } > > +done: > > > > if (rc == NGX_AGAIN) { > > if (ngx_handle_read_event(c->read, 0) != NGX_OK) { > > @@ -311,6 +291,129 @@ ngx_stream_core_preread_phase(ngx_stream > > } > > > > > > +static ngx_uint_t > > +ngx_stream_preread_can_peek(ngx_connection_t *c) > > +{ > > +#if (NGX_STREAM_SSL) > > + if (c->ssl) { > > + return 0; > > + } > > +#endif > > + > > + if ((ngx_event_flags & NGX_USE_CLEAR_EVENT) == 0) { > > + return 0; > > + } > > + > > +#if (NGX_HAVE_KQUEUE) > > + if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { > > + return 1; > > + } > > +#endif > > + > > +#if (NGX_HAVE_EPOLLRDHUP) > > + if ((ngx_event_flags & NGX_USE_EPOLL_EVENT) && ngx_use_epoll_rdhup) { > > + return 1; > > + } > > +#endif > > + > > + return 0; > > +} > > + > > + > > +static ngx_int_t > > +ngx_stream_preread_peek(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > > +{ > > + ssize_t n; > > + ngx_int_t rc; > > + ngx_err_t err; > > + ngx_connection_t *c; > > + > > + c = s->connection; > > + > > + n = recv(c->fd, (char *) c->buffer->last, > > + c->buffer->end - c->buffer->last, MSG_PEEK); > > + > > + err = ngx_socket_errno; > > + > > + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, "stream recv(): %z", n); > > + > > + if (n == -1) { > > + if (err == NGX_EAGAIN) { > > + c->read->ready = 0; > > + return NGX_AGAIN; > > + } > > + > > + ngx_connection_error(c, err, "recv() failed"); > > + return NGX_STREAM_OK; > > + } > > + > > + if (n == 0) { > > + return NGX_STREAM_OK; > > + } > > + > > + c->buffer->last += n; > > + > > + rc = ph->handler(s); > > + > > + if (rc != NGX_AGAIN) { > > + c->buffer->last = c->buffer->pos; > > + return rc; > > + } > > + > > + if (c->buffer->last == c->buffer->end) { > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > > + return NGX_STREAM_BAD_REQUEST; > > + } > > + > > + if (c->read->pending_eof) { > > + return NGX_STREAM_OK; > > + } > > + > > + c->buffer->last = c->buffer->pos; > > + > > + return NGX_AGAIN; > > +} > > + > > + > > +static ngx_int_t > > +ngx_stream_preread(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > > +{ > > + ssize_t n; > > + ngx_int_t rc; > > + ngx_connection_t *c; > > + > > + c = s->connection; > > + > > + while (c->read->ready) { > > + > > + n = c->recv(c, c->buffer->last, c->buffer->end - c->buffer->last); > > + > > + if (n == NGX_AGAIN) { > > + return NGX_AGAIN; > > + } > > + > > + if (n == NGX_ERROR || n == 0) { > > + return NGX_STREAM_OK; > > + } > > + > > + c->buffer->last += n; > > + > > + rc = ph->handler(s); > > + > > + if (rc != NGX_AGAIN) { > > + return rc; > > + } > > + > > + if (c->buffer->last == c->buffer->end) { > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > > + return NGX_STREAM_BAD_REQUEST; > > + } > > + } > > + > > + return NGX_AGAIN; > > +} > > + > > + > > ngx_int_t > > ngx_stream_core_content_phase(ngx_stream_session_t *s, > > ngx_stream_phase_handler_t *ph) > > Looks good. I'm somewhat sceptical about the idea of functionality which depends on the edge-triggered event methods being available. If/when the patch series is reviewed, please make sure it gets my approval before commit. -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Fri Jan 19 11:42:37 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 19 Jan 2024 15:42:37 +0400 Subject: [PATCH 1 of 3] Stream: socket peek in preread phase In-Reply-To: References: <966331bb4936888ef2f0.1699610839@arut-laptop> <20231213140659.nt4kcbem26hkyrsd@N00W24XTQX> <20231227143458.n36haxmr57zfhdua@Y9MQ9X2QVV> <20240104160327.q2cmayipp7ozrxs7@N00W24XTQX> <20240118150606.tm776a4c6zajrieo@N00W24XTQX> <20240118161533.dtnqbdhyxiq36g4x@Y9MQ9X2QVV> Message-ID: > On 18 Jan 2024, at 23:44, Maxim Dounin wrote: > > Hello! > > On Thu, Jan 18, 2024 at 08:15:33PM +0400, Sergey Kandaurov wrote: > >> On Thu, Jan 18, 2024 at 07:06:06PM +0400, Roman Arutyunyan wrote: >>> Hi, >>> >>> # HG changeset patch >>> # User Roman Arutyunyan >>> # Date 1702476295 -14400 >>> # Wed Dec 13 18:04:55 2023 +0400 >>> # Node ID 7324e8e73595c3093fcc2cbd2b5d6b1a947be3b0 >>> # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 >>> Stream: socket peek in preread phase. >>> >>> Previously, preread buffer was always read out from socket, which made it >>> impossible to terminate SSL on the connection without introducing additional >>> SSL BIOs. The following patches will rely on this. >>> >>> Now, when possible, recv(MSG_PEEK) is used instead, which keeps data in socket. >>> It's called if SSL is not already terminated and if an egde-triggered event >>> method is used. For epoll, EPOLLRDHUP support is also required. >>> >>> diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c >>> --- a/src/stream/ngx_stream_core_module.c >>> +++ b/src/stream/ngx_stream_core_module.c >>> @@ -10,6 +10,11 @@ >>> #include >>> >>> >>> +static ngx_uint_t ngx_stream_preread_can_peek(ngx_connection_t *c); >>> +static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, >>> + ngx_stream_phase_handler_t *ph); >>> +static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, >>> + ngx_stream_phase_handler_t *ph); >>> static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf); >>> static void *ngx_stream_core_create_main_conf(ngx_conf_t *cf); >>> static char *ngx_stream_core_init_main_conf(ngx_conf_t *cf, void *conf); >>> @@ -203,8 +208,6 @@ ngx_int_t >>> ngx_stream_core_preread_phase(ngx_stream_session_t *s, >>> ngx_stream_phase_handler_t *ph) >>> { >>> - size_t size; >>> - ssize_t n; >>> ngx_int_t rc; >>> ngx_connection_t *c; >>> ngx_stream_core_srv_conf_t *cscf; >>> @@ -217,56 +220,33 @@ ngx_stream_core_preread_phase(ngx_stream >>> >>> if (c->read->timedout) { >>> rc = NGX_STREAM_OK; >>> + goto done; >>> + } >>> >>> - } else if (c->read->timer_set) { >>> - rc = NGX_AGAIN; >>> + if (!c->read->timer_set) { >>> + rc = ph->handler(s); >>> >>> - } else { >>> - rc = ph->handler(s); >>> + if (rc != NGX_AGAIN) { >>> + goto done; >>> + } >>> } >>> >>> - while (rc == NGX_AGAIN) { >>> - >>> + if (c->buffer == NULL) { >>> + c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); >>> if (c->buffer == NULL) { >>> - c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); >>> - if (c->buffer == NULL) { >>> - rc = NGX_ERROR; >>> - break; >>> - } >>> + rc = NGX_ERROR; >>> + goto done; >>> } >>> - >>> - size = c->buffer->end - c->buffer->last; >>> - >>> - if (size == 0) { >>> - ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); >>> - rc = NGX_STREAM_BAD_REQUEST; >>> - break; >>> - } >>> + } >>> >>> - if (c->read->eof) { >>> - rc = NGX_STREAM_OK; >>> - break; >>> - } >>> - >>> - if (!c->read->ready) { >>> - break; >>> - } >>> - >>> - n = c->recv(c, c->buffer->last, size); >>> + if (ngx_stream_preread_can_peek(c)) { >>> + rc = ngx_stream_preread_peek(s, ph); >>> >>> - if (n == NGX_ERROR || n == 0) { >>> - rc = NGX_STREAM_OK; >>> - break; >>> - } >>> + } else { >>> + rc = ngx_stream_preread(s, ph); >>> + } >>> >>> - if (n == NGX_AGAIN) { >>> - break; >>> - } >>> - >>> - c->buffer->last += n; >>> - >>> - rc = ph->handler(s); >>> - } >>> +done: >>> >>> if (rc == NGX_AGAIN) { >>> if (ngx_handle_read_event(c->read, 0) != NGX_OK) { >>> @@ -311,6 +291,129 @@ ngx_stream_core_preread_phase(ngx_stream >>> } >>> >>> >>> +static ngx_uint_t >>> +ngx_stream_preread_can_peek(ngx_connection_t *c) >>> +{ >>> +#if (NGX_STREAM_SSL) >>> + if (c->ssl) { >>> + return 0; >>> + } >>> +#endif >>> + >>> + if ((ngx_event_flags & NGX_USE_CLEAR_EVENT) == 0) { >>> + return 0; >>> + } >>> + >>> +#if (NGX_HAVE_KQUEUE) >>> + if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { >>> + return 1; >>> + } >>> +#endif >>> + >>> +#if (NGX_HAVE_EPOLLRDHUP) >>> + if ((ngx_event_flags & NGX_USE_EPOLL_EVENT) && ngx_use_epoll_rdhup) { >>> + return 1; >>> + } >>> +#endif >>> + >>> + return 0; >>> +} >>> + >>> + >>> +static ngx_int_t >>> +ngx_stream_preread_peek(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) >>> +{ >>> + ssize_t n; >>> + ngx_int_t rc; >>> + ngx_err_t err; >>> + ngx_connection_t *c; >>> + >>> + c = s->connection; >>> + >>> + n = recv(c->fd, (char *) c->buffer->last, >>> + c->buffer->end - c->buffer->last, MSG_PEEK); >>> + >>> + err = ngx_socket_errno; >>> + >>> + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, "stream recv(): %z", n); >>> + >>> + if (n == -1) { >>> + if (err == NGX_EAGAIN) { >>> + c->read->ready = 0; >>> + return NGX_AGAIN; >>> + } >>> + >>> + ngx_connection_error(c, err, "recv() failed"); >>> + return NGX_STREAM_OK; >>> + } >>> + >>> + if (n == 0) { >>> + return NGX_STREAM_OK; >>> + } >>> + >>> + c->buffer->last += n; >>> + >>> + rc = ph->handler(s); >>> + >>> + if (rc != NGX_AGAIN) { >>> + c->buffer->last = c->buffer->pos; >>> + return rc; >>> + } >>> + >>> + if (c->buffer->last == c->buffer->end) { >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); >>> + return NGX_STREAM_BAD_REQUEST; >>> + } >>> + >>> + if (c->read->pending_eof) { >>> + return NGX_STREAM_OK; >>> + } >>> + >>> + c->buffer->last = c->buffer->pos; >>> + >>> + return NGX_AGAIN; >>> +} >>> + >>> + >>> +static ngx_int_t >>> +ngx_stream_preread(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) >>> +{ >>> + ssize_t n; >>> + ngx_int_t rc; >>> + ngx_connection_t *c; >>> + >>> + c = s->connection; >>> + >>> + while (c->read->ready) { >>> + >>> + n = c->recv(c, c->buffer->last, c->buffer->end - c->buffer->last); >>> + >>> + if (n == NGX_AGAIN) { >>> + return NGX_AGAIN; >>> + } >>> + >>> + if (n == NGX_ERROR || n == 0) { >>> + return NGX_STREAM_OK; >>> + } >>> + >>> + c->buffer->last += n; >>> + >>> + rc = ph->handler(s); >>> + >>> + if (rc != NGX_AGAIN) { >>> + return rc; >>> + } >>> + >>> + if (c->buffer->last == c->buffer->end) { >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); >>> + return NGX_STREAM_BAD_REQUEST; >>> + } >>> + } >>> + >>> + return NGX_AGAIN; >>> +} >>> + >>> + >>> ngx_int_t >>> ngx_stream_core_content_phase(ngx_stream_session_t *s, >>> ngx_stream_phase_handler_t *ph) >> >> Looks good. > > I'm somewhat sceptical about the idea of functionality which > depends on the edge-triggered event methods being available. > We discussed this with Roman days back: https://mailman.nginx.org/pipermail/nginx-devel/2023-December/O6XC56D5BPSE6F45IZSRYEKOSGL3VVYB.html > If/when the patch series is reviewed, please make sure it gets my > approval before commit. > You're welcome to participate. -- Sergey Kandaurov From mdounin at mdounin.ru Fri Jan 19 20:26:37 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 19 Jan 2024 23:26:37 +0300 Subject: [PATCH 1 of 3] Stream: socket peek in preread phase In-Reply-To: References: <966331bb4936888ef2f0.1699610839@arut-laptop> <20231213140659.nt4kcbem26hkyrsd@N00W24XTQX> <20231227143458.n36haxmr57zfhdua@Y9MQ9X2QVV> <20240104160327.q2cmayipp7ozrxs7@N00W24XTQX> <20240118150606.tm776a4c6zajrieo@N00W24XTQX> <20240118161533.dtnqbdhyxiq36g4x@Y9MQ9X2QVV> Message-ID: Hello! On Fri, Jan 19, 2024 at 03:42:37PM +0400, Sergey Kandaurov wrote: > > > On 18 Jan 2024, at 23:44, Maxim Dounin wrote: > > > > Hello! > > > > On Thu, Jan 18, 2024 at 08:15:33PM +0400, Sergey Kandaurov wrote: > > > >> On Thu, Jan 18, 2024 at 07:06:06PM +0400, Roman Arutyunyan wrote: > >>> Hi, > >>> > >>> # HG changeset patch > >>> # User Roman Arutyunyan > >>> # Date 1702476295 -14400 > >>> # Wed Dec 13 18:04:55 2023 +0400 > >>> # Node ID 7324e8e73595c3093fcc2cbd2b5d6b1a947be3b0 > >>> # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 > >>> Stream: socket peek in preread phase. > >>> > >>> Previously, preread buffer was always read out from socket, which made it > >>> impossible to terminate SSL on the connection without introducing additional > >>> SSL BIOs. The following patches will rely on this. > >>> > >>> Now, when possible, recv(MSG_PEEK) is used instead, which keeps data in socket. > >>> It's called if SSL is not already terminated and if an egde-triggered event > >>> method is used. For epoll, EPOLLRDHUP support is also required. > >>> > >>> diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > >>> --- a/src/stream/ngx_stream_core_module.c > >>> +++ b/src/stream/ngx_stream_core_module.c > >>> @@ -10,6 +10,11 @@ > >>> #include > >>> > >>> > >>> +static ngx_uint_t ngx_stream_preread_can_peek(ngx_connection_t *c); > >>> +static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, > >>> + ngx_stream_phase_handler_t *ph); > >>> +static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, > >>> + ngx_stream_phase_handler_t *ph); > >>> static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf); > >>> static void *ngx_stream_core_create_main_conf(ngx_conf_t *cf); > >>> static char *ngx_stream_core_init_main_conf(ngx_conf_t *cf, void *conf); > >>> @@ -203,8 +208,6 @@ ngx_int_t > >>> ngx_stream_core_preread_phase(ngx_stream_session_t *s, > >>> ngx_stream_phase_handler_t *ph) > >>> { > >>> - size_t size; > >>> - ssize_t n; > >>> ngx_int_t rc; > >>> ngx_connection_t *c; > >>> ngx_stream_core_srv_conf_t *cscf; > >>> @@ -217,56 +220,33 @@ ngx_stream_core_preread_phase(ngx_stream > >>> > >>> if (c->read->timedout) { > >>> rc = NGX_STREAM_OK; > >>> + goto done; > >>> + } > >>> > >>> - } else if (c->read->timer_set) { > >>> - rc = NGX_AGAIN; > >>> + if (!c->read->timer_set) { > >>> + rc = ph->handler(s); > >>> > >>> - } else { > >>> - rc = ph->handler(s); > >>> + if (rc != NGX_AGAIN) { > >>> + goto done; > >>> + } > >>> } > >>> > >>> - while (rc == NGX_AGAIN) { > >>> - > >>> + if (c->buffer == NULL) { > >>> + c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > >>> if (c->buffer == NULL) { > >>> - c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > >>> - if (c->buffer == NULL) { > >>> - rc = NGX_ERROR; > >>> - break; > >>> - } > >>> + rc = NGX_ERROR; > >>> + goto done; > >>> } > >>> - > >>> - size = c->buffer->end - c->buffer->last; > >>> - > >>> - if (size == 0) { > >>> - ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > >>> - rc = NGX_STREAM_BAD_REQUEST; > >>> - break; > >>> - } > >>> + } > >>> > >>> - if (c->read->eof) { > >>> - rc = NGX_STREAM_OK; > >>> - break; > >>> - } > >>> - > >>> - if (!c->read->ready) { > >>> - break; > >>> - } > >>> - > >>> - n = c->recv(c, c->buffer->last, size); > >>> + if (ngx_stream_preread_can_peek(c)) { > >>> + rc = ngx_stream_preread_peek(s, ph); > >>> > >>> - if (n == NGX_ERROR || n == 0) { > >>> - rc = NGX_STREAM_OK; > >>> - break; > >>> - } > >>> + } else { > >>> + rc = ngx_stream_preread(s, ph); > >>> + } > >>> > >>> - if (n == NGX_AGAIN) { > >>> - break; > >>> - } > >>> - > >>> - c->buffer->last += n; > >>> - > >>> - rc = ph->handler(s); > >>> - } > >>> +done: > >>> > >>> if (rc == NGX_AGAIN) { > >>> if (ngx_handle_read_event(c->read, 0) != NGX_OK) { > >>> @@ -311,6 +291,129 @@ ngx_stream_core_preread_phase(ngx_stream > >>> } > >>> > >>> > >>> +static ngx_uint_t > >>> +ngx_stream_preread_can_peek(ngx_connection_t *c) > >>> +{ > >>> +#if (NGX_STREAM_SSL) > >>> + if (c->ssl) { > >>> + return 0; > >>> + } > >>> +#endif > >>> + > >>> + if ((ngx_event_flags & NGX_USE_CLEAR_EVENT) == 0) { > >>> + return 0; > >>> + } > >>> + > >>> +#if (NGX_HAVE_KQUEUE) > >>> + if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { > >>> + return 1; > >>> + } > >>> +#endif > >>> + > >>> +#if (NGX_HAVE_EPOLLRDHUP) > >>> + if ((ngx_event_flags & NGX_USE_EPOLL_EVENT) && ngx_use_epoll_rdhup) { > >>> + return 1; > >>> + } > >>> +#endif > >>> + > >>> + return 0; > >>> +} > >>> + > >>> + > >>> +static ngx_int_t > >>> +ngx_stream_preread_peek(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > >>> +{ > >>> + ssize_t n; > >>> + ngx_int_t rc; > >>> + ngx_err_t err; > >>> + ngx_connection_t *c; > >>> + > >>> + c = s->connection; > >>> + > >>> + n = recv(c->fd, (char *) c->buffer->last, > >>> + c->buffer->end - c->buffer->last, MSG_PEEK); > >>> + > >>> + err = ngx_socket_errno; > >>> + > >>> + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, "stream recv(): %z", n); > >>> + > >>> + if (n == -1) { > >>> + if (err == NGX_EAGAIN) { > >>> + c->read->ready = 0; > >>> + return NGX_AGAIN; > >>> + } > >>> + > >>> + ngx_connection_error(c, err, "recv() failed"); > >>> + return NGX_STREAM_OK; > >>> + } > >>> + > >>> + if (n == 0) { > >>> + return NGX_STREAM_OK; > >>> + } > >>> + > >>> + c->buffer->last += n; > >>> + > >>> + rc = ph->handler(s); > >>> + > >>> + if (rc != NGX_AGAIN) { > >>> + c->buffer->last = c->buffer->pos; > >>> + return rc; > >>> + } > >>> + > >>> + if (c->buffer->last == c->buffer->end) { > >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > >>> + return NGX_STREAM_BAD_REQUEST; > >>> + } > >>> + > >>> + if (c->read->pending_eof) { > >>> + return NGX_STREAM_OK; > >>> + } > >>> + > >>> + c->buffer->last = c->buffer->pos; > >>> + > >>> + return NGX_AGAIN; > >>> +} > >>> + > >>> + > >>> +static ngx_int_t > >>> +ngx_stream_preread(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > >>> +{ > >>> + ssize_t n; > >>> + ngx_int_t rc; > >>> + ngx_connection_t *c; > >>> + > >>> + c = s->connection; > >>> + > >>> + while (c->read->ready) { > >>> + > >>> + n = c->recv(c, c->buffer->last, c->buffer->end - c->buffer->last); > >>> + > >>> + if (n == NGX_AGAIN) { > >>> + return NGX_AGAIN; > >>> + } > >>> + > >>> + if (n == NGX_ERROR || n == 0) { > >>> + return NGX_STREAM_OK; > >>> + } > >>> + > >>> + c->buffer->last += n; > >>> + > >>> + rc = ph->handler(s); > >>> + > >>> + if (rc != NGX_AGAIN) { > >>> + return rc; > >>> + } > >>> + > >>> + if (c->buffer->last == c->buffer->end) { > >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > >>> + return NGX_STREAM_BAD_REQUEST; > >>> + } > >>> + } > >>> + > >>> + return NGX_AGAIN; > >>> +} > >>> + > >>> + > >>> ngx_int_t > >>> ngx_stream_core_content_phase(ngx_stream_session_t *s, > >>> ngx_stream_phase_handler_t *ph) > >> > >> Looks good. > > > > I'm somewhat sceptical about the idea of functionality which > > depends on the edge-triggered event methods being available. > > > > We discussed this with Roman days back: > https://mailman.nginx.org/pipermail/nginx-devel/2023-December/O6XC56D5BPSE6F45IZSRYEKOSGL3VVYB.html Yes, I've seen the discussion. I'm not convinced that proposed approach is a good solution though, and would like to re-visit this after the initial review. > > If/when the patch series is reviewed, please make sure it gets my > > approval before commit. > > > > You're welcome to participate. Sure. -- Maxim Dounin http://mdounin.ru/ From xeioex at nginx.com Sat Jan 20 02:16:47 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Sat, 20 Jan 2024 02:16:47 +0000 Subject: [njs] Fixed tracking of unhandled rejected promises. Message-ID: details: https://hg.nginx.org/njs/rev/da8b044e1c61 branches: changeset: 2268:da8b044e1c61 user: Dmitry Volyntsev date: Thu Jan 18 18:03:24 2024 -0800 description: Fixed tracking of unhandled rejected promises. Checking for unhandled promise rejections while looping for pending jobs produces false-positive reports when an rejected promised is handled by one of the pending jobs later. The fix is to check for unhandled promise rejections only at top level calls like ngx_js_name_invoke() and ngx_js_name_call() and only after all pending jobs are processed. The issue was introduced in bc80bcb3102c (not released yet). diffstat: external/njs_shell.c | 10 +++++++++- nginx/ngx_js.c | 11 +++++++++-- src/test/njs_externals_test.c | 2 +- src/test/njs_unit_test.c | 4 +--- 4 files changed, 20 insertions(+), 7 deletions(-) diffs (88 lines): diff -r 4fba78789fe4 -r da8b044e1c61 external/njs_shell.c --- a/external/njs_shell.c Thu Jan 11 15:13:47 2024 -0800 +++ b/external/njs_shell.c Thu Jan 18 18:03:24 2024 -0800 @@ -1100,7 +1100,7 @@ njs_process_script(njs_vm_t *vm, void *r for ( ;; ) { ret = njs_vm_execute_pending_job(vm); if (ret <= NJS_OK) { - if (ret == NJS_ERROR || njs_vm_unhandled_rejection(vm)) { + if (ret == NJS_ERROR) { njs_process_output(vm, NULL, ret); if (!njs_vm_options(vm)->interactive) { @@ -1112,6 +1112,14 @@ njs_process_script(njs_vm_t *vm, void *r } } + if (njs_vm_unhandled_rejection(vm)) { + njs_process_output(vm, NULL, NJS_ERROR); + + if (!njs_vm_options(vm)->interactive) { + return NJS_ERROR; + } + } + ret = njs_process_events(runtime); if (njs_slow_path(ret == NJS_ERROR)) { break; diff -r 4fba78789fe4 -r da8b044e1c61 nginx/ngx_js.c --- a/nginx/ngx_js.c Thu Jan 11 15:13:47 2024 -0800 +++ b/nginx/ngx_js.c Thu Jan 18 18:03:24 2024 -0800 @@ -357,7 +357,7 @@ ngx_js_call(njs_vm_t *vm, njs_function_t if (ret <= NJS_OK) { c = ngx_external_connection(vm, njs_vm_external_ptr(vm)); - if (ret == NJS_ERROR || njs_vm_unhandled_rejection(vm)) { + if (ret == NJS_ERROR) { ngx_js_exception(vm, &exception); ngx_log_error(NGX_LOG_ERR, c->log, 0, @@ -417,7 +417,7 @@ ngx_js_name_invoke(njs_vm_t *vm, ngx_str for ( ;; ) { ret = njs_vm_execute_pending_job(vm); if (ret <= NJS_OK) { - if (ret == NJS_ERROR || njs_vm_unhandled_rejection(vm)) { + if (ret == NJS_ERROR) { ngx_js_exception(vm, &exception); ngx_log_error(NGX_LOG_ERR, log, 0, @@ -429,6 +429,13 @@ ngx_js_name_invoke(njs_vm_t *vm, ngx_str } } + if (njs_vm_unhandled_rejection(vm)) { + ngx_js_exception(vm, &exception); + + ngx_log_error(NGX_LOG_ERR, log, 0, "js exception: %V", &exception); + return NGX_ERROR; + } + ctx = ngx_external_ctx(vm, njs_vm_external_ptr(vm)); return njs_rbtree_is_empty(&ctx->waiting_events) ? NGX_OK : NGX_AGAIN; diff -r 4fba78789fe4 -r da8b044e1c61 src/test/njs_externals_test.c --- a/src/test/njs_externals_test.c Thu Jan 11 15:13:47 2024 -0800 +++ b/src/test/njs_externals_test.c Thu Jan 18 18:03:24 2024 -0800 @@ -1485,7 +1485,7 @@ njs_external_call(njs_vm_t *vm, const nj for ( ;; ) { ret = njs_vm_execute_pending_job(vm); if (ret <= NJS_OK) { - if (ret == NJS_ERROR || njs_vm_unhandled_rejection(vm)) { + if (ret == NJS_ERROR) { return NJS_ERROR; } diff -r 4fba78789fe4 -r da8b044e1c61 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Thu Jan 11 15:13:47 2024 -0800 +++ b/src/test/njs_unit_test.c Thu Jan 18 18:03:24 2024 -0800 @@ -23708,9 +23708,7 @@ njs_process_test(njs_external_state_t *s for ( ;; ) { ret = njs_vm_execute_pending_job(state->vm); if (ret <= NJS_OK) { - if (ret == NJS_ERROR - || njs_vm_unhandled_rejection(state->vm)) - { + if (ret == NJS_ERROR) { return NJS_ERROR; } From xeioex at nginx.com Sat Jan 20 02:16:49 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Sat, 20 Jan 2024 02:16:49 +0000 Subject: [njs] Moving out HostPromiseRejectionTracker from njs core. Message-ID: details: https://hg.nginx.org/njs/rev/8aad26845b18 branches: changeset: 2269:8aad26845b18 user: Dmitry Volyntsev date: Thu Jan 18 18:03:35 2024 -0800 description: Moving out HostPromiseRejectionTracker from njs core. HostPromiseRejectionTracker should be implemented by host environment according to ECMAScript specs. The following method was removed: njs_vm_unhandled_rejection(). The following method was introduced: njs_vm_set_rejection_tracker(). diffstat: external/njs_shell.c | 103 +++++++++++++++++++++++++++++++++++++++++- nginx/ngx_http_js_module.c | 1 - nginx/ngx_js.c | 101 +++++++++++++++++++++++++++++++++++++++-- nginx/ngx_js.h | 2 + nginx/ngx_stream_js_module.c | 1 - src/njs.h | 15 ++--- src/njs_promise.c | 74 ++++-------------------------- src/njs_value.c | 7 ++ src/njs_vm.c | 40 +++------------ src/njs_vm.h | 4 +- 10 files changed, 230 insertions(+), 118 deletions(-) diffs (651 lines): diff -r da8b044e1c61 -r 8aad26845b18 external/njs_shell.c --- a/external/njs_shell.c Thu Jan 18 18:03:24 2024 -0800 +++ b/external/njs_shell.c Thu Jan 18 18:03:35 2024 -0800 @@ -93,6 +93,12 @@ typedef struct { typedef struct { + void *promise; + njs_opaque_value_t message; +} njs_rejected_promise_t; + + +typedef struct { njs_vm_t *vm; uint32_t event_id; @@ -101,6 +107,8 @@ typedef struct { njs_queue_t labels; + njs_arr_t *rejected_promises; + njs_bool_t suppress_stdout; njs_completion_t completion; @@ -422,7 +430,7 @@ njs_options_parse(njs_opts_t *opts, int opts->denormals = 1; opts->exit_code = EXIT_FAILURE; - opts->unhandled_rejection = NJS_VM_OPT_UNHANDLED_REJECTION_THROW; + opts->unhandled_rejection = 1; p = getenv("NJS_EXIT_CODE"); if (p != NULL) { @@ -528,7 +536,7 @@ njs_options_parse(njs_opts_t *opts, int break; case 'r': - opts->unhandled_rejection = NJS_VM_OPT_UNHANDLED_REJECTION_IGNORE; + opts->unhandled_rejection = 0; break; case 's': @@ -636,6 +644,8 @@ njs_console_init(njs_vm_t *vm, njs_conso njs_queue_init(&console->posted_events); njs_queue_init(&console->labels); + console->rejected_promises = NULL; + console->completion.completions = njs_vm_completions(vm, NULL); if (console->completion.completions == NULL) { return NJS_ERROR; @@ -749,6 +759,53 @@ njs_externals_init(njs_vm_t *vm) } +static void +njs_rejection_tracker(njs_vm_t *vm, njs_external_ptr_t external, + njs_bool_t is_handled, njs_value_t *promise, njs_value_t *reason) +{ + void *promise_obj; + uint32_t i, length; + njs_console_t *console; + njs_rejected_promise_t *rejected_promise; + + console = external; + + if (is_handled && console->rejected_promises != NULL) { + rejected_promise = console->rejected_promises->start; + length = console->rejected_promises->items; + + promise_obj = njs_value_ptr(promise); + + for (i = 0; i < length; i++) { + if (rejected_promise[i].promise == promise_obj) { + njs_arr_remove(console->rejected_promises, + &rejected_promise[i]); + + break; + } + } + + return; + } + + if (console->rejected_promises == NULL) { + console->rejected_promises = njs_arr_create(njs_vm_memory_pool(vm), 4, + sizeof(njs_rejected_promise_t)); + if (njs_slow_path(console->rejected_promises == NULL)) { + return; + } + } + + rejected_promise = njs_arr_add(console->rejected_promises); + if (njs_slow_path(rejected_promise == NULL)) { + return; + } + + rejected_promise->promise = njs_value_ptr(promise); + njs_value_assign(&rejected_promise->message, reason); +} + + static njs_vm_t * njs_create_vm(njs_opts_t *opts) { @@ -784,7 +841,6 @@ njs_create_vm(njs_opts_t *opts) vm_options.argv = opts->argv; vm_options.argc = opts->argc; vm_options.ast = opts->ast; - vm_options.unhandled_rejection = opts->unhandled_rejection; if (opts->stack_size != 0) { vm_options.max_stack_size = opts->stack_size; @@ -796,6 +852,11 @@ njs_create_vm(njs_opts_t *opts) return NULL; } + if (opts->unhandled_rejection) { + njs_vm_set_rejection_tracker(vm, njs_rejection_tracker, + njs_vm_external_ptr(vm)); + } + for (i = 0; i < opts->n_paths; i++) { path.start = (u_char *) opts->paths[i]; path.length = njs_strlen(opts->paths[i]); @@ -914,6 +975,40 @@ njs_process_events(void *runtime) static njs_int_t +njs_unhandled_rejection(void *runtime) +{ + njs_int_t ret; + njs_str_t message; + njs_console_t *console; + njs_rejected_promise_t *rejected_promise; + + console = runtime; + + if (console->rejected_promises == NULL + || console->rejected_promises->items == 0) + { + return 0; + } + + rejected_promise = console->rejected_promises->start; + + ret = njs_vm_value_to_string(console->vm, &message, + njs_value_arg(&rejected_promise->message)); + if (njs_slow_path(ret != NJS_OK)) { + return -1; + } + + njs_vm_error(console->vm, "unhandled promise rejection: %V", + &message); + + njs_arr_destroy(console->rejected_promises); + console->rejected_promises = NULL; + + return 1; +} + + +static njs_int_t njs_read_file(njs_opts_t *opts, njs_str_t *content) { int fd; @@ -1112,7 +1207,7 @@ njs_process_script(njs_vm_t *vm, void *r } } - if (njs_vm_unhandled_rejection(vm)) { + if (njs_unhandled_rejection(runtime)) { njs_process_output(vm, NULL, NJS_ERROR); if (!njs_vm_options(vm)->interactive) { diff -r da8b044e1c61 -r 8aad26845b18 nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Thu Jan 18 18:03:24 2024 -0800 +++ b/nginx/ngx_http_js_module.c Thu Jan 18 18:03:35 2024 -0800 @@ -4505,7 +4505,6 @@ ngx_http_js_init_conf_vm(ngx_conf_t *cf, ngx_http_js_uptr[NGX_JS_MAIN_CONF_INDEX] = (uintptr_t) jmcf; options.backtrace = 1; - options.unhandled_rejection = NJS_VM_OPT_UNHANDLED_REJECTION_THROW; options.metas = &ngx_http_js_metas; options.addons = njs_http_js_addon_modules; options.argv = ngx_argv; diff -r da8b044e1c61 -r 8aad26845b18 nginx/ngx_js.c --- a/nginx/ngx_js.c Thu Jan 18 18:03:24 2024 -0800 +++ b/nginx/ngx_js.c Thu Jan 18 18:03:35 2024 -0800 @@ -12,17 +12,23 @@ typedef struct { - ngx_queue_t labels; + ngx_queue_t labels; } ngx_js_console_t; typedef struct { - njs_str_t name; - uint64_t time; - ngx_queue_t queue; + njs_str_t name; + uint64_t time; + ngx_queue_t queue; } ngx_js_timelabel_t; +typedef struct { + void *promise; + njs_opaque_value_t message; +} ngx_js_rejected_promise_t; + + static njs_int_t ngx_js_ext_build(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, njs_value_t *setval, njs_value_t *retval); static njs_int_t ngx_js_ext_conf_file_path(njs_vm_t *vm, @@ -49,6 +55,7 @@ static njs_int_t njs_set_immediate(njs_v njs_uint_t nargs, njs_index_t unused, njs_value_t *retval); static njs_int_t njs_clear_timeout(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval); +static njs_int_t ngx_js_unhandled_rejection(ngx_js_ctx_t *ctx); static void ngx_js_cleanup_vm(void *data); static njs_int_t ngx_js_core_init(njs_vm_t *vm); @@ -429,15 +436,15 @@ ngx_js_name_invoke(njs_vm_t *vm, ngx_str } } - if (njs_vm_unhandled_rejection(vm)) { + ctx = ngx_external_ctx(vm, njs_vm_external_ptr(vm)); + + if (ngx_js_unhandled_rejection(ctx)) { ngx_js_exception(vm, &exception); ngx_log_error(NGX_LOG_ERR, log, 0, "js exception: %V", &exception); return NGX_ERROR; } - ctx = ngx_external_ctx(vm, njs_vm_external_ptr(vm)); - return njs_rbtree_is_empty(&ctx->waiting_events) ? NGX_OK : NGX_AGAIN; } @@ -1661,6 +1668,53 @@ ngx_js_merge_vm(ngx_conf_t *cf, ngx_js_l } +static void +ngx_js_rejection_tracker(njs_vm_t *vm, njs_external_ptr_t unused, + njs_bool_t is_handled, njs_value_t *promise, njs_value_t *reason) +{ + void *promise_obj; + uint32_t i, length; + ngx_js_ctx_t *ctx; + ngx_js_rejected_promise_t *rejected_promise; + + ctx = ngx_external_ctx(vm, njs_vm_external_ptr(vm)); + + if (is_handled && ctx->rejected_promises != NULL) { + rejected_promise = ctx->rejected_promises->start; + length = ctx->rejected_promises->items; + + promise_obj = njs_value_ptr(promise); + + for (i = 0; i < length; i++) { + if (rejected_promise[i].promise == promise_obj) { + njs_arr_remove(ctx->rejected_promises, + &rejected_promise[i]); + + break; + } + } + + return; + } + + if (ctx->rejected_promises == NULL) { + ctx->rejected_promises = njs_arr_create(njs_vm_memory_pool(vm), 4, + sizeof(ngx_js_rejected_promise_t)); + if (njs_slow_path(ctx->rejected_promises == NULL)) { + return; + } + } + + rejected_promise = njs_arr_add(ctx->rejected_promises); + if (njs_slow_path(rejected_promise == NULL)) { + return; + } + + rejected_promise->promise = njs_value_ptr(promise); + njs_value_assign(&rejected_promise->message, reason); +} + + ngx_int_t ngx_js_init_conf_vm(ngx_conf_t *cf, ngx_js_loc_conf_t *conf, njs_vm_opt_t *options) @@ -1738,6 +1792,9 @@ ngx_js_init_conf_vm(ngx_conf_t *cf, ngx_ cln->handler = ngx_js_cleanup_vm; cln->data = conf; + njs_vm_set_rejection_tracker(conf->vm, ngx_js_rejection_tracker, + NULL); + path.start = ngx_cycle->conf_prefix.data; path.length = ngx_cycle->conf_prefix.len; @@ -1810,6 +1867,36 @@ ngx_js_init_conf_vm(ngx_conf_t *cf, ngx_ } +static njs_int_t +ngx_js_unhandled_rejection(ngx_js_ctx_t *ctx) +{ + njs_int_t ret; + njs_str_t message; + ngx_js_rejected_promise_t *rejected_promise; + + if (ctx->rejected_promises == NULL + || ctx->rejected_promises->items == 0) + { + return 0; + } + + rejected_promise = ctx->rejected_promises->start; + + ret = njs_vm_value_to_string(ctx->vm, &message, + njs_value_arg(&rejected_promise->message)); + if (njs_slow_path(ret != NJS_OK)) { + return -1; + } + + njs_vm_error(ctx->vm, "unhandled promise rejection: %V", &message); + + njs_arr_destroy(ctx->rejected_promises); + ctx->rejected_promises = NULL; + + return 1; +} + + static void ngx_js_cleanup_vm(void *data) { diff -r da8b044e1c61 -r 8aad26845b18 nginx/ngx_js.h --- a/nginx/ngx_js.h Thu Jan 18 18:03:24 2024 -0800 +++ b/nginx/ngx_js.h Thu Jan 18 18:03:35 2024 -0800 @@ -15,6 +15,7 @@ #include #include #include +#include #include "ngx_js_fetch.h" #include "ngx_js_shared_dict.h" @@ -111,6 +112,7 @@ struct ngx_js_event_s { #define NGX_JS_COMMON_CTX \ njs_vm_t *vm; \ + njs_arr_t *rejected_promises; \ njs_rbtree_t waiting_events; \ ngx_socket_t event_id diff -r da8b044e1c61 -r 8aad26845b18 nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Thu Jan 18 18:03:24 2024 -0800 +++ b/nginx/ngx_stream_js_module.c Thu Jan 18 18:03:35 2024 -0800 @@ -1778,7 +1778,6 @@ ngx_stream_js_init_conf_vm(ngx_conf_t *c ngx_stream_js_uptr[NGX_JS_MAIN_CONF_INDEX] = (uintptr_t) jmcf; options.backtrace = 1; - options.unhandled_rejection = NJS_VM_OPT_UNHANDLED_REJECTION_THROW; options.metas = &ngx_stream_js_metas; options.addons = njs_stream_js_addon_modules; options.argv = ngx_argv; diff -r da8b044e1c61 -r 8aad26845b18 src/njs.h --- a/src/njs.h Thu Jan 18 18:03:24 2024 -0800 +++ b/src/njs.h Thu Jan 18 18:03:35 2024 -0800 @@ -196,6 +196,9 @@ typedef void * njs_ typedef njs_mod_t *(*njs_module_loader_t)(njs_vm_t *vm, njs_external_ptr_t external, njs_str_t *name); +typedef void (*njs_rejection_tracker_t)(njs_vm_t *vm, + njs_external_ptr_t external, njs_bool_t is_handled, njs_value_t *promise, + njs_value_t *reason); typedef struct { @@ -225,9 +228,6 @@ typedef struct { njs_uint_t max_stack_size; -#define NJS_VM_OPT_UNHANDLED_REJECTION_IGNORE 0 -#define NJS_VM_OPT_UNHANDLED_REJECTION_THROW 1 - /* * interactive - enables "interactive" mode. * (REPL). Allows starting parent VM without cloning. @@ -240,9 +240,6 @@ typedef struct { * - Function constructors. * module - ES6 "module" mode. Script mode is default. * ast - print AST. - * unhandled_rejection IGNORE | THROW - tracks unhandled promise rejections: - * - throwing inside a Promise without a catch block. - * - throwing inside in a finally or catch block. */ uint8_t interactive; /* 1 bit */ uint8_t trailer; /* 1 bit */ @@ -260,7 +257,6 @@ typedef struct { #ifdef NJS_DEBUG_GENERATOR uint8_t generator_debug; /* 1 bit */ #endif - uint8_t unhandled_rejection; } njs_vm_opt_t; @@ -304,7 +300,9 @@ NJS_EXPORT njs_int_t njs_vm_enqueue_job( */ NJS_EXPORT njs_int_t njs_vm_execute_pending_job(njs_vm_t *vm); NJS_EXPORT njs_int_t njs_vm_pending(njs_vm_t *vm); -NJS_EXPORT njs_int_t njs_vm_unhandled_rejection(njs_vm_t *vm); + +NJS_EXPORT void njs_vm_set_rejection_tracker(njs_vm_t *vm, + njs_rejection_tracker_t rejection_tracker, void *opaque); NJS_EXPORT void *njs_vm_completions(njs_vm_t *vm, njs_str_t *expression); @@ -468,6 +466,7 @@ NJS_EXPORT double njs_value_number(const NJS_EXPORT njs_function_t *njs_value_function(const njs_value_t *value); NJS_EXPORT njs_function_native_t njs_value_native_function( const njs_value_t *value); +NJS_EXPORT void *njs_value_ptr(const njs_value_t *value); njs_external_ptr_t njs_value_external(const njs_value_t *value); NJS_EXPORT njs_int_t njs_value_external_tag(const njs_value_t *value); diff -r da8b044e1c61 -r 8aad26845b18 src/njs_promise.c --- a/src/njs_promise.c Thu Jan 18 18:03:24 2024 -0800 +++ b/src/njs_promise.c Thu Jan 18 18:03:35 2024 -0800 @@ -61,8 +61,6 @@ static njs_int_t njs_promise_value_const static njs_int_t njs_promise_capability_executor(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval); -static njs_int_t njs_promise_host_rejection_tracker(njs_vm_t *vm, - njs_promise_t *promise, njs_promise_rejection_type_t operation); static njs_int_t njs_promise_resolve_function(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval); static njs_int_t njs_promise_reject_function(njs_vm_t *vm, njs_value_t *args, @@ -513,8 +511,8 @@ njs_promise_fulfill(njs_vm_t *vm, njs_pr njs_inline njs_value_t * njs_promise_reject(njs_vm_t *vm, njs_promise_t *promise, njs_value_t *reason) { - njs_int_t ret; njs_queue_t queue; + njs_value_t promise_value; njs_promise_data_t *data; data = njs_data(&promise->value); @@ -523,10 +521,10 @@ njs_promise_reject(njs_vm_t *vm, njs_pro data->state = NJS_PROMISE_REJECTED; if (!data->is_handled) { - ret = njs_promise_host_rejection_tracker(vm, promise, - NJS_PROMISE_REJECT); - if (njs_slow_path(ret != NJS_OK)) { - return njs_value_arg(&njs_value_null); + if (vm->rejection_tracker != NULL) { + njs_set_promise(&promise_value, promise); + vm->rejection_tracker(vm, vm->rejection_tracker_opaque, 0, + &promise_value, reason); } } @@ -548,58 +546,6 @@ njs_promise_reject(njs_vm_t *vm, njs_pro static njs_int_t -njs_promise_host_rejection_tracker(njs_vm_t *vm, njs_promise_t *promise, - njs_promise_rejection_type_t operation) -{ - uint32_t i, length; - njs_value_t *value; - njs_promise_data_t *data; - - if (vm->options.unhandled_rejection - == NJS_VM_OPT_UNHANDLED_REJECTION_IGNORE) - { - return NJS_OK; - } - - if (vm->promise_reason == NULL) { - vm->promise_reason = njs_array_alloc(vm, 1, 0, NJS_ARRAY_SPARE); - if (njs_slow_path(vm->promise_reason == NULL)) { - return NJS_ERROR; - } - } - - data = njs_data(&promise->value); - - if (operation == NJS_PROMISE_REJECT) { - if (vm->promise_reason != NULL) { - return njs_array_add(vm, vm->promise_reason, &data->result); - } - - } else { - value = vm->promise_reason->start; - length = vm->promise_reason->length; - - for (i = 0; i < length; i++) { - if (njs_values_same(&value[i], &data->result)) { - length--; - - if (i < length) { - memmove(&value[i], &value[i + 1], - sizeof(njs_value_t) * (length - i)); - } - - break; - } - } - - vm->promise_reason->length = length; - } - - return NJS_OK; -} - - -static njs_int_t njs_promise_invoke_then(njs_vm_t *vm, njs_value_t *promise, njs_value_t *args, njs_int_t nargs, njs_value_t *retval) { @@ -896,7 +842,7 @@ njs_promise_perform_then(njs_vm_t *vm, n njs_promise_capability_t *capability, njs_value_t *retval) { njs_int_t ret; - njs_value_t arguments[2]; + njs_value_t arguments[2], promise_value; njs_promise_t *promise; njs_function_t *function; njs_promise_data_t *data; @@ -949,10 +895,10 @@ njs_promise_perform_then(njs_vm_t *vm, n if (data->state == NJS_PROMISE_REJECTED) { njs_set_data(&arguments[0], rejected_reaction, 0); - ret = njs_promise_host_rejection_tracker(vm, promise, - NJS_PROMISE_HANDLE); - if (njs_slow_path(ret != NJS_OK)) { - return ret; + if (vm->rejection_tracker != NULL) { + njs_set_promise(&promise_value, promise); + vm->rejection_tracker(vm, vm->rejection_tracker_opaque, 1, + &promise_value, &data->result); } } else { diff -r da8b044e1c61 -r 8aad26845b18 src/njs_value.c --- a/src/njs_value.c Thu Jan 18 18:03:24 2024 -0800 +++ b/src/njs_value.c Thu Jan 18 18:03:35 2024 -0800 @@ -457,6 +457,13 @@ njs_value_native_function(const njs_valu } +void * +njs_value_ptr(const njs_value_t *value) +{ + return njs_data(value); +} + + njs_external_ptr_t njs_value_external(const njs_value_t *value) { diff -r da8b044e1c61 -r 8aad26845b18 src/njs_vm.c --- a/src/njs_vm.c Thu Jan 18 18:03:24 2024 -0800 +++ b/src/njs_vm.c Thu Jan 18 18:03:35 2024 -0800 @@ -627,37 +627,6 @@ njs_vm_pending(njs_vm_t *vm) njs_int_t -njs_vm_unhandled_rejection(njs_vm_t *vm) -{ - njs_int_t ret; - njs_str_t str; - njs_value_t string; - - if (!(vm->options.unhandled_rejection - == NJS_VM_OPT_UNHANDLED_REJECTION_THROW - && vm->promise_reason != NULL - && vm->promise_reason->length != 0)) - { - return 0; - } - - njs_value_assign(&string, &vm->promise_reason->start[0]); - ret = njs_value_to_string(vm, &string, &string); - if (njs_slow_path(ret != NJS_OK)) { - return ret; - } - - njs_string_get(&string, &str); - njs_vm_error(vm, "unhandled promise rejection: %V", &str); - - njs_mp_free(vm->mem_pool, vm->promise_reason); - vm->promise_reason = NULL; - - return 1; -} - - -njs_int_t njs_vm_enqueue_job(njs_vm_t *vm, njs_function_t *function, const njs_value_t *args, njs_uint_t nargs) { @@ -738,6 +707,15 @@ njs_vm_set_module_loader(njs_vm_t *vm, n } +void +njs_vm_set_rejection_tracker(njs_vm_t *vm, + njs_rejection_tracker_t rejection_tracker, void *opaque) +{ + vm->rejection_tracker = rejection_tracker; + vm->rejection_tracker_opaque = opaque; +} + + njs_int_t njs_vm_add_path(njs_vm_t *vm, const njs_str_t *path) { diff -r da8b044e1c61 -r 8aad26845b18 src/njs_vm.h --- a/src/njs_vm.h Thu Jan 18 18:03:24 2024 -0800 +++ b/src/njs_vm.h Thu Jan 18 18:03:35 2024 -0800 @@ -160,8 +160,6 @@ struct njs_vm_s { njs_regex_compile_ctx_t *regex_compile_ctx; njs_regex_match_data_t *single_match_data; - njs_array_t *promise_reason; - njs_parser_scope_t *global_scope; /* @@ -185,6 +183,8 @@ struct njs_vm_s { njs_module_loader_t module_loader; void *module_loader_opaque; + njs_rejection_tracker_t rejection_tracker; + void *rejection_tracker_opaque; }; From jordanc.carter at outlook.com Sun Jan 21 10:37:24 2024 From: jordanc.carter at outlook.com (=?iso-8859-1?q?J_Carter?=) Date: Sun, 21 Jan 2024 10:37:24 +0000 Subject: [PATCH] SSL: Added SSLKEYLOGFILE key material to debug logging Message-ID: # HG changeset patch # User J Carter # Date 1705832811 0 # Sun Jan 21 10:26:51 2024 +0000 # Node ID b00332a5253eefb53bacc024c72f55876c2eac6e # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 SSL: Added SSLKEYLOGFILE key material to debug logging. This patch also introduces the debug_keylog error log level flag, which may be used to graunually enable or ommit logging of key material via error level flags (note, it's always enabled when using debug_connection). Each line of key material is output to the error log as separate log message, and is prepended with 'ssl keylog: ' for convenient extraction. The purpose of logging key material is to allow external tools, such as wireshark/tshark, to decrypt captured TLS connections in all situations. Previously, only TLS 1.2 (and below) connections could be decrypted when specific ciphers suites were used, and when the decrypter had access to the acting server's TLS certificates and keys. It was not possible to decrypt TLS 1.3 traffic without generating SSLKEYLOGFILE on peer, or by using other hacks on nginx host (using GDB, or patched ssl libraries). Decrypting inbound and outbound TLS connections is useful when debugging for a few reasons: 1) Nginx does not have a convenient mechanism for logging response body sent to client, or response body received from upstream while proxying. Packet captures provide a convenient mechanism for viewing this traffic. This same use-case applies to client request body in various scenarios where it cannot be logged using $request_body. 2) It is often convenient to use wireshark to dissect non-http traffic proxied via stream module. This will now work in all scenarios, including when stream module is used to perform TLS offloading, and then proxies to the upstream over TLS. 3) Many post-handshake TLS issues are better diagnosed through analysis of decypted TLS traffic rather than openssl's often ambiguous alert/error messages. 4) It's a convenient way to debug third party modules that initiate or accept TLS connections (provided they utilize nginx's native networking facilities). Example usage: error_log /var/log/nginx/error.log debug; or error_log /var/log/nginx/error.log debug_keylog; Live extraction: tail -f -n0 /var/log/nginx/error.log |\ grep -Poa --line-buffered '(?<=ssl keylog: ).*' |\ tee -a /tmp/keylog.log Wireshark: 1) Navigate 'Edit -> Preferences -> Protocols -> TLS'. 2) Set '(Pre)-Master-Secret log filename' to '/tmp/keylog.log'. diff -r ee40e2b1d083 -r b00332a5253e src/core/ngx_log.c --- a/src/core/ngx_log.c Mon Dec 25 21:15:48 2023 +0400 +++ b/src/core/ngx_log.c Sun Jan 21 10:26:51 2024 +0000 @@ -86,7 +86,7 @@ static const char *debug_levels[] = { "debug_core", "debug_alloc", "debug_mutex", "debug_event", - "debug_http", "debug_mail", "debug_stream" + "debug_http", "debug_mail", "debug_stream", "debug_keylog" }; diff -r ee40e2b1d083 -r b00332a5253e src/core/ngx_log.h --- a/src/core/ngx_log.h Mon Dec 25 21:15:48 2023 +0400 +++ b/src/core/ngx_log.h Sun Jan 21 10:26:51 2024 +0000 @@ -30,6 +30,7 @@ #define NGX_LOG_DEBUG_HTTP 0x100 #define NGX_LOG_DEBUG_MAIL 0x200 #define NGX_LOG_DEBUG_STREAM 0x400 +#define NGX_LOG_DEBUG_KEYLOG 0x800 /* * do not forget to update debug_levels[] in src/core/ngx_log.c @@ -37,7 +38,7 @@ */ #define NGX_LOG_DEBUG_FIRST NGX_LOG_DEBUG_CORE -#define NGX_LOG_DEBUG_LAST NGX_LOG_DEBUG_STREAM +#define NGX_LOG_DEBUG_LAST NGX_LOG_DEBUG_KEYLOG #define NGX_LOG_DEBUG_CONNECTION 0x80000000 #define NGX_LOG_DEBUG_ALL 0x7ffffff0 diff -r ee40e2b1d083 -r b00332a5253e src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Mon Dec 25 21:15:48 2023 +0400 +++ b/src/event/ngx_event_openssl.c Sun Jan 21 10:26:51 2024 +0000 @@ -10,6 +10,11 @@ #include +#if (NGX_DEBUG && OPENSSL_VERSION_NUMBER >= 0x10101000L \ + && !defined LIBRESSL_VERSION_NUMBER) +#define NGX_SSL_KEYLOG 1 +#endif + #define NGX_SSL_PASSWORD_BUFFER_SIZE 4096 @@ -27,6 +32,9 @@ static int ngx_ssl_verify_callback(int ok, X509_STORE_CTX *x509_store); static void ngx_ssl_info_callback(const ngx_ssl_conn_t *ssl_conn, int where, int ret); +#ifdef NGX_SSL_KEYLOG +static void ngx_ssl_keylog_callback(const SSL *ssl, const char *line); +#endif static void ngx_ssl_passwords_cleanup(void *data); static int ngx_ssl_new_client_session(ngx_ssl_conn_t *ssl_conn, ngx_ssl_session_t *sess); @@ -426,10 +434,28 @@ SSL_CTX_set_info_callback(ssl->ctx, ngx_ssl_info_callback); +#ifdef NGX_SSL_KEYLOG + SSL_CTX_set_keylog_callback(ssl->ctx, ngx_ssl_keylog_callback); +#endif + return NGX_OK; } +#ifdef NGX_SSL_KEYLOG + +static void +ngx_ssl_keylog_callback(const SSL *ssl, const char *line) +{ + ngx_connection_t *c; + + c = ngx_ssl_get_connection(ssl); + ngx_log_debug(NGX_LOG_DEBUG_KEYLOG, c->log, 0, "ssl keylog: %s", line); +} + +#endif + + ngx_int_t ngx_ssl_certificates(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_array_t *certs, ngx_array_t *keys, ngx_array_t *passwords) diff -r ee40e2b1d083 -r b00332a5253e src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c Mon Dec 25 21:15:48 2023 +0400 +++ b/src/event/quic/ngx_event_quic_openssl_compat.c Sun Jan 21 10:26:51 2024 +0000 @@ -118,6 +118,8 @@ return; } + ngx_log_debug(NGX_LOG_DEBUG_KEYLOG, c->log, 0, "ssl keylog: %s", line); + p = (u_char *) line; for (start = p; *p && *p != ' '; p++); From arut at nginx.com Mon Jan 22 10:49:54 2024 From: arut at nginx.com (=?iso-8859-1?q?Roman_Arutyunyan?=) Date: Mon, 22 Jan 2024 14:49:54 +0400 Subject: [PATCH] Avoiding mixed socket families in PROXY protocol v1 (ticket #2594) Message-ID: <2f12c929527b2337c15e.1705920594@arut-laptop> # HG changeset patch # User Roman Arutyunyan # Date 1705916128 -14400 # Mon Jan 22 13:35:28 2024 +0400 # Node ID 2f12c929527b2337c15ef99d3a4dc97819b61fbd # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 Avoiding mixed socket families in PROXY protocol v1 (ticket #2594). When using realip module, remote and local addreses of a connection can belong to different address families. This previously resulted in generating PROXY protocol headers like this: PROXY TCP4 127.0.0.1 unix:/tmp/nginx1.sock 55544 0 The PROXY protocol v1 specification does not allow mixed families. The change will generate the unknown PROXY protocol header in this case: PROXY UNKNOWN Also, the above mentioned format for unix socket address is not specified in PROXY protocol v1 and is a by-product of internal nginx representation of it. The change eliminates such addresses from PROXY protocol headers as well. diff --git a/src/core/ngx_proxy_protocol.c b/src/core/ngx_proxy_protocol.c --- a/src/core/ngx_proxy_protocol.c +++ b/src/core/ngx_proxy_protocol.c @@ -291,6 +291,10 @@ ngx_proxy_protocol_write(ngx_connection_ return NULL; } + if (c->sockaddr->sa_family != c->local_sockaddr->sa_family) { + goto unknown; + } + switch (c->sockaddr->sa_family) { case AF_INET: @@ -304,8 +308,7 @@ ngx_proxy_protocol_write(ngx_connection_ #endif default: - return ngx_cpymem(buf, "PROXY UNKNOWN" CRLF, - sizeof("PROXY UNKNOWN" CRLF) - 1); + goto unknown; } buf += ngx_sock_ntop(c->sockaddr, c->socklen, buf, last - buf, 0); @@ -319,6 +322,11 @@ ngx_proxy_protocol_write(ngx_connection_ lport = ngx_inet_get_port(c->local_sockaddr); return ngx_slprintf(buf, last, " %ui %ui" CRLF, port, lport); + +unknown: + + return ngx_cpymem(buf, "PROXY UNKNOWN" CRLF, + sizeof("PROXY UNKNOWN" CRLF) - 1); } From mdounin at mdounin.ru Mon Jan 22 11:59:21 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 22 Jan 2024 14:59:21 +0300 Subject: [PATCH] Avoiding mixed socket families in PROXY protocol v1 (ticket #2594) In-Reply-To: <2f12c929527b2337c15e.1705920594@arut-laptop> References: <2f12c929527b2337c15e.1705920594@arut-laptop> Message-ID: Hello! On Mon, Jan 22, 2024 at 02:49:54PM +0400, Roman Arutyunyan wrote: > # HG changeset patch > # User Roman Arutyunyan > # Date 1705916128 -14400 > # Mon Jan 22 13:35:28 2024 +0400 > # Node ID 2f12c929527b2337c15ef99d3a4dc97819b61fbd > # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 > Avoiding mixed socket families in PROXY protocol v1 (ticket #2594). > > When using realip module, remote and local addreses of a connection can belong > to different address families. This previously resulted in generating PROXY > protocol headers like this: > > PROXY TCP4 127.0.0.1 unix:/tmp/nginx1.sock 55544 0 > > The PROXY protocol v1 specification does not allow mixed families. The change > will generate the unknown PROXY protocol header in this case: > > PROXY UNKNOWN > > Also, the above mentioned format for unix socket address is not specified in > PROXY protocol v1 and is a by-product of internal nginx representation of it. > The change eliminates such addresses from PROXY protocol headers as well. Nitpicking: double space in "from PROXY". This change will essentially disable use of PROXY protocol in such configurations. While it is probably good enough from formal point of view, and better that what we have now, this might still be a surprise, especially when multiple address families are used on the original proxy server, and the configuration works for some of them, but not for others. Wouldn't it be better to remember if the PROXY protocol was used to set the address, and use $proxy_protocol_server_addr / $proxy_protocol_server_port in this case? Alternatively, we can use some dummy server address instead, so the client address will be still sent. -- Maxim Dounin http://mdounin.ru/ From arut at nginx.com Mon Jan 22 15:48:01 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 22 Jan 2024 19:48:01 +0400 Subject: [PATCH] Avoiding mixed socket families in PROXY protocol v1 (ticket #2594) In-Reply-To: References: <2f12c929527b2337c15e.1705920594@arut-laptop> Message-ID: <20240122154801.ycda4ie442ipzw6n@N00W24XTQX> Hi, On Mon, Jan 22, 2024 at 02:59:21PM +0300, Maxim Dounin wrote: > Hello! > > On Mon, Jan 22, 2024 at 02:49:54PM +0400, Roman Arutyunyan wrote: > > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1705916128 -14400 > > # Mon Jan 22 13:35:28 2024 +0400 > > # Node ID 2f12c929527b2337c15ef99d3a4dc97819b61fbd > > # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 > > Avoiding mixed socket families in PROXY protocol v1 (ticket #2594). > > > > When using realip module, remote and local addreses of a connection can belong > > to different address families. This previously resulted in generating PROXY > > protocol headers like this: > > > > PROXY TCP4 127.0.0.1 unix:/tmp/nginx1.sock 55544 0 > > > > The PROXY protocol v1 specification does not allow mixed families. The change > > will generate the unknown PROXY protocol header in this case: > > > > PROXY UNKNOWN > > > > Also, the above mentioned format for unix socket address is not specified in > > PROXY protocol v1 and is a by-product of internal nginx representation of it. > > The change eliminates such addresses from PROXY protocol headers as well. > > Nitpicking: double space in "from PROXY". Yes, thanks. > This change will essentially disable use of PROXY protocol in such > configurations. While it is probably good enough from formal > point of view, and better that what we have now, this might still > be a surprise, especially when multiple address families are used > on the original proxy server, and the configuration works for some > of them, but not for others. > > Wouldn't it be better to remember if the PROXY protocol was used > to set the address, and use $proxy_protocol_server_addr / > $proxy_protocol_server_port in this case? > > Alternatively, we can use some dummy server address instead, so > the client address will be still sent. Another alternative is duplicating client address in this case, see patch. -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1705938401 -14400 # Mon Jan 22 19:46:41 2024 +0400 # Node ID 89ac89209d927b8a438780434a17a0677ef3bf4e # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 Avoiding mixed socket families in PROXY protocol v1 (ticket #2594). When using realip module, remote and local addresses of a connection can belong to different address families. This previously resulted in generating PROXY protocol headers like this: PROXY TCP4 127.0.0.1 unix:/tmp/nginx1.sock 55544 0 The PROXY protocol v1 specification does not allow mixed families. The change substitutes server address with client address in this case: PROXY TCP4 127.0.0.1 127.0.0.1 55544 55544 As an alternative, "PROXY UNKNOWN" header could be used, which unlike this header does not contain any useful information about the client. Also, the above mentioned format for unix socket address is not specified in PROXY protocol v1 and is a by-product of internal nginx representation of it. The change eliminates such addresses from PROXY protocol headers as well. diff --git a/src/core/ngx_proxy_protocol.c b/src/core/ngx_proxy_protocol.c --- a/src/core/ngx_proxy_protocol.c +++ b/src/core/ngx_proxy_protocol.c @@ -279,7 +279,9 @@ ngx_proxy_protocol_read_port(u_char *p, u_char * ngx_proxy_protocol_write(ngx_connection_t *c, u_char *buf, u_char *last) { - ngx_uint_t port, lport; + ngx_uint_t port, lport; + socklen_t local_socklen; + struct sockaddr *local_sockaddr; if (last - buf < NGX_PROXY_PROTOCOL_V1_MAX_HEADER) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, @@ -312,11 +314,19 @@ ngx_proxy_protocol_write(ngx_connection_ *buf++ = ' '; - buf += ngx_sock_ntop(c->local_sockaddr, c->local_socklen, buf, last - buf, - 0); + if (c->sockaddr->sa_family == c->local_sockaddr->sa_family) { + local_sockaddr = c->local_sockaddr; + local_socklen = c->local_socklen; + + } else { + local_sockaddr = c->sockaddr; + local_socklen = c->socklen; + } + + buf += ngx_sock_ntop(local_sockaddr, local_socklen, buf, last - buf, 0); port = ngx_inet_get_port(c->sockaddr); - lport = ngx_inet_get_port(c->local_sockaddr); + lport = ngx_inet_get_port(local_sockaddr); return ngx_slprintf(buf, last, " %ui %ui" CRLF, port, lport); } From yar at nginx.com Tue Jan 23 12:56:47 2024 From: yar at nginx.com (=?utf-8?q?Yaroslav_Zhuravlev?=) Date: Tue, 23 Jan 2024 12:56:47 +0000 Subject: [PATCH] Year 2024 Message-ID: <9fa58eaa9b8fd4dcd967.1706014607@ORK-ML-00007151> GNUmakefile | 2 +- text/LICENSE | 2 +- xml/menu.xml | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx.org.patch Type: text/x-patch Size: 1093 bytes Desc: not available URL: From yar at nginx.com Tue Jan 23 13:49:07 2024 From: yar at nginx.com (=?utf-8?q?Yaroslav_Zhuravlev?=) Date: Tue, 23 Jan 2024 13:49:07 +0000 Subject: [PATCH] Documented opensourcing of the OTel module Message-ID: <00807e94be3622a79d77.1706017747@ORK-ML-00007151> xml/en/docs/index.xml | 8 +++++++- xml/en/docs/ngx_otel_module.xml | 20 +++++++++++++++----- xml/ru/docs/index.xml | 10 ++++++++-- 3 files changed, 30 insertions(+), 8 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx.org.patch Type: text/x-patch Size: 2813 bytes Desc: not available URL: From mdounin at mdounin.ru Tue Jan 23 21:03:06 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 24 Jan 2024 00:03:06 +0300 Subject: [PATCH] Avoiding mixed socket families in PROXY protocol v1 (ticket #2594) In-Reply-To: <20240122154801.ycda4ie442ipzw6n@N00W24XTQX> References: <2f12c929527b2337c15e.1705920594@arut-laptop> <20240122154801.ycda4ie442ipzw6n@N00W24XTQX> Message-ID: Hello! On Mon, Jan 22, 2024 at 07:48:01PM +0400, Roman Arutyunyan wrote: > Hi, > > On Mon, Jan 22, 2024 at 02:59:21PM +0300, Maxim Dounin wrote: > > Hello! > > > > On Mon, Jan 22, 2024 at 02:49:54PM +0400, Roman Arutyunyan wrote: > > > > > # HG changeset patch > > > # User Roman Arutyunyan > > > # Date 1705916128 -14400 > > > # Mon Jan 22 13:35:28 2024 +0400 > > > # Node ID 2f12c929527b2337c15ef99d3a4dc97819b61fbd > > > # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 > > > Avoiding mixed socket families in PROXY protocol v1 (ticket #2594). Also nitpicking: ticket #2010 might be a better choice. The #2594 is actually a duplicate (with a side issue noted that using long unix socket path might result in a PROXY protocol header without ports and CRLF) and should be closed as such. > > > > > > When using realip module, remote and local addreses of a connection can belong > > > to different address families. This previously resulted in generating PROXY > > > protocol headers like this: > > > > > > PROXY TCP4 127.0.0.1 unix:/tmp/nginx1.sock 55544 0 > > > > > > The PROXY protocol v1 specification does not allow mixed families. The change > > > will generate the unknown PROXY protocol header in this case: > > > > > > PROXY UNKNOWN > > > > > > Also, the above mentioned format for unix socket address is not specified in > > > PROXY protocol v1 and is a by-product of internal nginx representation of it. > > > The change eliminates such addresses from PROXY protocol headers as well. > > > > Nitpicking: double space in "from PROXY". > > Yes, thanks. > > > This change will essentially disable use of PROXY protocol in such > > configurations. While it is probably good enough from formal > > point of view, and better that what we have now, this might still > > be a surprise, especially when multiple address families are used > > on the original proxy server, and the configuration works for some > > of them, but not for others. > > > > Wouldn't it be better to remember if the PROXY protocol was used > > to set the address, and use $proxy_protocol_server_addr / > > $proxy_protocol_server_port in this case? > > > > Alternatively, we can use some dummy server address instead, so > > the client address will be still sent. > > Another alternative is duplicating client address in this case, see patch. I don't think it is a good idea. Using some meaningful real address might easily mislead users. I would rather use a clearly dummy address instead, such as INADDR_ANY with port 0. Also, as suggested, using the server address as obtained via PROXY protocol from the client might be a better solution as long as the client address was set via PROXY protocol (regardless of whether address families match or not), and what users expect from the "proty_protocol on;" when chaining stream proxies in the first place. [...] -- Maxim Dounin http://mdounin.ru/ From benjamin.p.kallus.gr at dartmouth.edu Tue Jan 23 21:03:59 2024 From: benjamin.p.kallus.gr at dartmouth.edu (Ben Kallus) Date: Tue, 23 Jan 2024 21:03:59 +0000 Subject: [njs] Ignoring UndefinedBehaviorSanitizer warnings where appropriate. In-Reply-To: References: Message-ID: > Casting NaN to integer is undefined behavior, > but it is fine in some cases where we do additional checks later. > For example: > int64_t i64 = njs_unsafe_cast_double_to_int64(num); > if (i64 == num) { > // num is integer > } This could be fine, but it's not guaranteed by the standard. For this reason, I think ignoring UBSan warnings is almost always a bad idea. You can't know what future compiler optimizations will do; all we can do is to comply with the standard if we want to ensure that code continues to work in the future. Whether or not most C programmers want to admit it, writing C is programming an abstract machine. If the semantics of this machine get in the way of performance, then the programmer should resort to assembly rather than rely on behavior that could change at any time. From benjamin.p.kallus.gr at dartmouth.edu Wed Jan 24 00:09:02 2024 From: benjamin.p.kallus.gr at dartmouth.edu (Ben Kallus) Date: Wed, 24 Jan 2024 00:09:02 +0000 Subject: Core: Avoid memcpy from NULL In-Reply-To: References: <6fb91d26f5e60149d7b98c3ad37a0683@sebres.de> Message-ID: Hi Maxim, > As already pointed out previously, there are no known cases > when memcpy(p, NULL, 0) can result in miscompilation of nginx > code, ... If you think there are cases when the code can be > miscompiled in practice, and not theoretically, please share. There is no such thing as "miscompilation" of code that executes undefined behavior. The behavior is undefined; literally any instructions that the compiler emits is correct compilation. This is the definition of undefined behavior. You want me to cite a line in nginx that you would consider "miscompiled in practice." I'm not going to spend hours combing through assembly to convince you that undefined behavior is worth avoiding. Sorry! > as nginx usually does not checks string data pointers > against NULL (and instead checks length, if needed). In > particular, ngx_pstrdup() you are trying to patch doesn't. That > is, this is exactly the "no direct impact" situation as assumed > above. It is non-obvious that checks against NULL will be optimized away after calls to ngx_memcpy. Whether a function even calls ngx_memcpy on a given pointer may not always be obvious, especially if that call happens many layers down the stack. The argument "but we just don't do checks against NULL after calling memcpy" (much like the argument "but we just don't pass macro arguments that would violate operator precedence or cause side effects to be reevaluated") is a bad one because it requires all contributors to follow a set of undocumented rules which, when broken, can have serious consequences. Futher, the rules will change in the future as new compiler optimizations are developed and enabled by default. For now, NULL checks are to be avoided after calling memcpy on GCC -O2. In the future, it's plausible that they may need to be avoided before calling memcpy as well, and on clang -O1. Compiler authors do not all care about the fact that you believe that the standard is wrong. Their job is to make compliant programs run correctly and fast; they have no similar obligation for programs that execute UB. Most other C projects accept that avoiding all UB is necessary, even when somewhat inconvenient. Search "memcpy null" in the commit history of your C or C++ project of choice and you can see this for yourself. Every project I tried (OpenSSL, Apache httpd, FFmpeg, curl, WebKit, CPython) has at least 2 commits that work around memcpy from NULL, usually by adding a length check. As for what is the nicest way to avoid NULL memcpy, that is a matter of taste. I personally think that it is needless to add an extra branch to every memcpy, even when that memcpy's arguments are known to be nonnull. I therefore advocate for a piecemeal approach, which also seems to be the more common one. Patching ngx_memcpy, as you suggest, is also a valid solution to the issue, and which is better is a matter of opinion on which I don't have strong feelings. -Ben ( There is a proposal for C2y to define memcpy(NULL,NULL,0): https://docs.google.com/document/d/1guH_HgibKrX7t9JfKGfWX2UCPyZOTLsnRfR6UleD1F8/edit If you feel strongly that memcpy from NULL should be defined, feel free to contribute to it :) ) From pluknet at nginx.com Wed Jan 24 00:19:59 2024 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 24 Jan 2024 04:19:59 +0400 Subject: [PATCH] Events: protection from stale changes to level-triggered kevents Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1706055243 -14400 # Wed Jan 24 04:14:03 2024 +0400 # Node ID d47ed07b06e93f4c6137ccd4ddfce0de23afb6a2 # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 Events: protection from stale changes to level-triggered kevents. When kqueue events are reported in level-triggered mode, without EV_CLEAR set, it was previously possible to try to remove a kevent attached to a closed file descriptor. Calling close() on a file descriptor removes any kevents that reference the descriptor, it is an error to remove such kevents afterwards. In FreeBSD, this results in a kevent reported with EV_ERROR set in flags and EBADF in data, which corresponds to operating on an invalid file descriptor. MacOS behaves similarly; the difference is that it uses a distinct error path for no knote found and EV_ADD unspecified, and returns EV_ERROR with ENOENT. Either way, this triggers "kevent() error" alerts. In practice, this may happen as part of handing read event after the main loop in ngx_event_pipe(), which then initiates closing the connection, as caught by proxy_chunked_extra.t. Another use-case common on SSL connections is handling read event after SSL handshaking is finished, which results in a kevent removal change. It may happen then to fully read and process the request in the same cycle iteration, closing the connection with the pending kevent removal. A variation of this use-case is to re-add the event after SSL handshaking to read SSL payload, e.g. as part of the application protocol greeting, then receive EPIPE from a subsequent SSL_write() and remove the event again on connection close. Normally this would result in three change list elements appended: EV_DELETE, EV_ADD, EV_DELETE. The check in ngx_kqueue_del_event() annihilates instead a previously appended EV_ADD change, leaving the first remove change, which reduces to the previous use-case. Caught by mail_ssl_session_reuse.t. The fix is to check in ngx_kqueue_process_events() if we operate over a just closed file descriptor in this iteration, as it may happen after the change list was updated, and prune such kevent changes before entering kevent(). Another use-case that makes the fix incomplete is processing further events in the same iteration that may reuse a just closed file descriptor and clear ev->closed while initializing a reused event, rendering the check useless. This may happen e.g. as part of accepting a new connection. The fix is to check in ngx_kqueue_add_event() if there are invalidated events in the change list matching the one being added and prune the corresponding kevent changes. diff --git a/src/event/modules/ngx_kqueue_module.c b/src/event/modules/ngx_kqueue_module.c --- a/src/event/modules/ngx_kqueue_module.c +++ b/src/event/modules/ngx_kqueue_module.c @@ -283,8 +283,11 @@ static ngx_int_t ngx_kqueue_add_event(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags) { ngx_int_t rc; +#if !(NGX_HAVE_CLEAR_EVENT) + ngx_uint_t i; + ngx_event_t *e; +#endif #if 0 - ngx_event_t *e; ngx_connection_t *c; #endif @@ -329,6 +332,36 @@ ngx_kqueue_add_event(ngx_event_t *ev, ng #endif +#if !(NGX_HAVE_CLEAR_EVENT) + + for (i = 0; i < nchanges; i++) { + if (ev->index == NGX_INVALID_INDEX + && ((uintptr_t) change_list[i].udata & (uintptr_t) ~1) + == (uintptr_t) ev) + { + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, ev->log, 0, + "kevent stale: %d: ft:%d fl:%04Xd", + (int) change_list[i].ident, change_list[i].filter, + change_list[i].flags); + + /* + * the stale event from a file descriptor + * that was just closed and reused in this iteration + */ + + if (i < --nchanges) { + e = (ngx_event_t *) + ((uintptr_t) change_list[nchanges].udata & (uintptr_t) ~1); + change_list[i] = change_list[nchanges]; + e->index = i; + + i--; + } + } + } + +#endif + rc = ngx_kqueue_set_event(ev, event, EV_ADD|EV_ENABLE|flags); return rc; @@ -503,6 +536,9 @@ ngx_kqueue_process_events(ngx_cycle_t *c ngx_uint_t level; ngx_err_t err; ngx_event_t *ev; +#if !(NGX_HAVE_CLEAR_EVENT) + ngx_event_t *e; +#endif ngx_queue_t *queue; struct timespec ts, *tp; @@ -530,6 +566,36 @@ ngx_kqueue_process_events(ngx_cycle_t *c tp = &ts; } +#if !(NGX_HAVE_CLEAR_EVENT) + + for (i = 0; i < n; i++) { + ev = (ngx_event_t *) + ((uintptr_t) change_list[i].udata & (uintptr_t) ~1); + + if (ev->closed) { + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, cycle->log, 0, + "kevent closed: %d: ft:%d fl:%04Xd", + (int) change_list[i].ident, change_list[i].filter, + change_list[i].flags); + + /* + * the stale event change for a file descriptor + * that was just closed in this iteration + */ + + if (i < --n) { + e = (ngx_event_t *) + ((uintptr_t) change_list[n].udata & (uintptr_t) ~1); + change_list[i] = change_list[n]; + e->index = i; + + i--; + } + } + } + +#endif + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "kevent timer: %M, changes: %d", timer, n); From xeioex at nginx.com Wed Jan 24 00:40:15 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Wed, 24 Jan 2024 00:40:15 +0000 Subject: [njs] Shell: fixed unhandled rejected promises handling. Message-ID: details: https://hg.nginx.org/njs/rev/6485ad23565e branches: changeset: 2270:6485ad23565e user: Dmitry Volyntsev date: Tue Jan 23 16:33:29 2024 -0800 description: Shell: fixed unhandled rejected promises handling. The issue was introduced in dffdf7c50dfc (not released yet). diffstat: external/njs_shell.c | 10 +++++----- test/js/promise_reject_post_catch.t.js | 9 --------- test/shell_test.exp | 9 +++++++++ 3 files changed, 14 insertions(+), 14 deletions(-) diffs (59 lines): diff -r 8aad26845b18 -r 6485ad23565e external/njs_shell.c --- a/external/njs_shell.c Thu Jan 18 18:03:35 2024 -0800 +++ b/external/njs_shell.c Tue Jan 23 16:33:29 2024 -0800 @@ -1207,6 +1207,11 @@ njs_process_script(njs_vm_t *vm, void *r } } + ret = njs_process_events(runtime); + if (njs_slow_path(ret == NJS_ERROR)) { + break; + } + if (njs_unhandled_rejection(runtime)) { njs_process_output(vm, NULL, NJS_ERROR); @@ -1215,11 +1220,6 @@ njs_process_script(njs_vm_t *vm, void *r } } - ret = njs_process_events(runtime); - if (njs_slow_path(ret == NJS_ERROR)) { - break; - } - if (ret == NJS_OK) { break; } diff -r 8aad26845b18 -r 6485ad23565e test/js/promise_reject_post_catch.t.js --- a/test/js/promise_reject_post_catch.t.js Thu Jan 18 18:03:35 2024 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,9 +0,0 @@ -/*--- -includes: [] -flags: [] -negative: - phase: runtime ----*/ - -var p = Promise.reject(); -setImmediate(() => {p.catch(() => {})}); diff -r 8aad26845b18 -r 6485ad23565e test/shell_test.exp --- a/test/shell_test.exp Thu Jan 18 18:03:35 2024 -0800 +++ b/test/shell_test.exp Tue Jan 23 16:33:29 2024 -0800 @@ -476,6 +476,15 @@ ReferenceError: \"ref\" is not defined at anonymous \\\(string:1\\\) at main \\\(string:1\\\)\n$" +njs_test { + {"setImmediate(() => { console.log('x'); return Promise.reject('xx'); })\r\n" + "0\r\nx\r\nThrown:\r\nError: unhandled promise rejection: xx\r\n"} + {"setImmediate(() => { console.log('x'); return Promise.reject('xx'); })\r\n" + "1\r\nx\r\nThrown:\r\nError: unhandled promise rejection: xx\r\n"} + {"42\r\n" + "42\r\n"} +} + # CLI OPTIONS # help From xeioex at nginx.com Wed Jan 24 00:40:17 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Wed, 24 Jan 2024 00:40:17 +0000 Subject: [njs] Change: imported modules are not resolved relative to current dir. Message-ID: details: https://hg.nginx.org/njs/rev/7eaaa7d57636 branches: changeset: 2271:7eaaa7d57636 user: Dmitry Volyntsev date: Tue Jan 23 16:33:52 2024 -0800 description: Change: imported modules are not resolved relative to current dir. Previously, when a module was imported with a relative path it was looked for first in the directory of the importing context (global, or a module). For example when: main.js: import libs/lib1.js; libs/lib1.js: import lib2.js; lib2.js was looked for first in libs/. Now, it is only looked for in directories speficied with js_path and a directory of nginx configuration file. diffstat: src/njs_module.c | 24 ++++++------------------ test/js/import_chain.t.js | 2 +- test/js/import_relative_path.t.js | 10 ---------- test/shell_test.exp | 4 ++-- 4 files changed, 9 insertions(+), 31 deletions(-) diffs (120 lines): diff -r 6485ad23565e -r 7eaaa7d57636 src/njs_module.c --- a/src/njs_module.c Tue Jan 23 16:33:29 2024 -0800 +++ b/src/njs_module.c Tue Jan 23 16:33:52 2024 -0800 @@ -16,8 +16,7 @@ typedef struct { } njs_module_info_t; -static njs_int_t njs_module_lookup(njs_vm_t *vm, const njs_str_t *cwd, - njs_module_info_t *info); +static njs_int_t njs_module_lookup(njs_vm_t *vm, njs_module_info_t *info); static njs_int_t njs_module_path(njs_vm_t *vm, const njs_str_t *dir, njs_module_info_t *info); static njs_int_t njs_module_read(njs_vm_t *vm, int fd, njs_str_t *body); @@ -45,7 +44,7 @@ njs_parser_module(njs_parser_t *parser, goto done; } - external = parser; + external = NULL; loader = njs_default_module_loader; if (vm->module_loader != NULL) { @@ -70,7 +69,7 @@ done: static njs_int_t -njs_module_lookup(njs_vm_t *vm, const njs_str_t *cwd, njs_module_info_t *info) +njs_module_lookup(njs_vm_t *vm, njs_module_info_t *info) { njs_int_t ret; njs_str_t *path; @@ -80,12 +79,6 @@ njs_module_lookup(njs_vm_t *vm, const nj return njs_module_path(vm, NULL, info); } - ret = njs_module_path(vm, cwd, info); - - if (ret != NJS_DECLINED) { - return ret; - } - if (vm->paths == NULL) { return NJS_DECLINED; } @@ -158,7 +151,6 @@ njs_module_path(njs_vm_t *vm, const njs_ return NJS_DECLINED; } - info->file.start = (u_char *) &info->path[0]; info->file.length = njs_strlen(info->file.start); @@ -359,24 +351,20 @@ njs_module_require(njs_vm_t *vm, njs_val static njs_mod_t * -njs_default_module_loader(njs_vm_t *vm, njs_external_ptr_t external, +njs_default_module_loader(njs_vm_t *vm, njs_external_ptr_t unused, njs_str_t *name) { u_char *start; njs_int_t ret; - njs_str_t cwd, text; + njs_str_t text; njs_mod_t *module; - njs_parser_t *prev; njs_module_info_t info; - prev = external; - njs_memzero(&info, sizeof(njs_module_info_t)); info.name = *name; - njs_file_dirname(&prev->lexer->file, &cwd); - ret = njs_module_lookup(vm, &cwd, &info); + ret = njs_module_lookup(vm, &info); if (njs_slow_path(ret != NJS_OK)) { return NULL; } diff -r 6485ad23565e -r 7eaaa7d57636 test/js/import_chain.t.js --- a/test/js/import_chain.t.js Tue Jan 23 16:33:29 2024 -0800 +++ b/test/js/import_chain.t.js Tue Jan 23 16:33:52 2024 -0800 @@ -1,7 +1,7 @@ /*--- includes: [] flags: [] -paths: [test/js/module/, test/js/module/libs/] +paths: [test/js/module/, test/js/module/libs/, test/js/module/sub] ---*/ import lib2 from 'lib2.js'; diff -r 6485ad23565e -r 7eaaa7d57636 test/js/import_relative_path.t.js --- a/test/js/import_relative_path.t.js Tue Jan 23 16:33:29 2024 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,10 +0,0 @@ -/*--- -includes: [] -flags: [] -paths: [test/js/module/] ----*/ - -import name from 'name.js'; -import hash from 'libs/hash.js'; - -assert.sameValue(hash.name, "libs.name"); diff -r 6485ad23565e -r 7eaaa7d57636 test/shell_test.exp --- a/test/shell_test.exp Tue Jan 23 16:33:29 2024 -0800 +++ b/test/shell_test.exp Tue Jan 23 16:33:52 2024 -0800 @@ -563,8 +563,8 @@ njs_test { # quiet mode -njs_run {"-q" "test/js/import_relative_path.t.js"} \ - "SyntaxError: Cannot find module \"name.js\" in 7" +njs_run {"-q" "test/js/import_chain.t.js"} \ + "SyntaxError: Cannot find module \"lib2.js\" in 7" # sandboxing From xeioex at nginx.com Wed Jan 24 00:40:18 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Wed, 24 Jan 2024 00:40:18 +0000 Subject: [njs] Moving out HostLoadImportedModule from njs core. Message-ID: details: https://hg.nginx.org/njs/rev/9b3dac56fd8a branches: changeset: 2272:9b3dac56fd8a user: Dmitry Volyntsev date: Tue Jan 23 16:34:10 2024 -0800 description: Moving out HostLoadImportedModule from njs core. HostLoadImportedModule should be implemented by host environment according to ECMAScript specs. The following method was removed: njs_vm_add_path(). diffstat: external/njs_shell.c | 230 +++++++++++++++++++++++++++++++++++++++++++++----- nginx/ngx_js.c | 211 ++++++++++++++++++++++++++++++++++++++++++---- src/njs.h | 2 - src/njs_module.c | 230 --------------------------------------------------- src/njs_module.h | 1 - src/njs_parser.c | 39 ++++++++ src/njs_vm.c | 23 ----- src/njs_vm.h | 1 - 8 files changed, 438 insertions(+), 299 deletions(-) diffs (915 lines): diff -r 7eaaa7d57636 -r 9b3dac56fd8a external/njs_shell.c --- a/external/njs_shell.c Tue Jan 23 16:33:52 2024 -0800 +++ b/external/njs_shell.c Tue Jan 23 16:34:10 2024 -0800 @@ -47,7 +47,7 @@ typedef struct { char *file; njs_str_t command; size_t n_paths; - char **paths; + njs_str_t *paths; char **argv; njs_uint_t argc; } njs_opts_t; @@ -99,6 +99,14 @@ typedef struct { typedef struct { + int fd; + njs_str_t name; + njs_str_t file; + char path[NJS_MAX_PATH + 1]; +} njs_module_info_t; + + +typedef struct { njs_vm_t *vm; uint32_t event_id; @@ -129,6 +137,8 @@ static njs_int_t njs_process_script(njs_ #ifndef NJS_FUZZER_TARGET static njs_int_t njs_options_parse(njs_opts_t *opts, int argc, char **argv); +static njs_int_t njs_options_add_path(njs_opts_t *opts, u_char *path, + size_t len); static void njs_options_free(njs_opts_t *opts); #ifdef NJS_HAVE_READLINE @@ -390,7 +400,7 @@ done: static njs_int_t njs_options_parse(njs_opts_t *opts, int argc, char **argv) { - char *p, **paths; + char *p; njs_int_t i, ret; njs_uint_t n; @@ -516,15 +526,13 @@ njs_options_parse(njs_opts_t *opts, int case 'p': if (++i < argc) { - opts->n_paths++; - paths = realloc(opts->paths, opts->n_paths * sizeof(char *)); - if (paths == NULL) { + ret = njs_options_add_path(opts, (u_char *) argv[i], + njs_strlen(argv[i])); + if (ret != NJS_OK) { njs_stderror("failed to add path\n"); return NJS_ERROR; } - opts->paths = paths; - opts->paths[opts->n_paths - 1] = argv[i]; break; } @@ -595,6 +603,27 @@ done: } +static njs_int_t +njs_options_add_path(njs_opts_t *opts, u_char *path, size_t len) +{ + njs_str_t *paths; + + opts->n_paths++; + + paths = realloc(opts->paths, opts->n_paths * sizeof(njs_str_t)); + if (paths == NULL) { + njs_stderror("failed to add path\n"); + return NJS_ERROR; + } + + opts->paths = paths; + opts->paths[opts->n_paths - 1].start = path; + opts->paths[opts->n_paths - 1].length = len; + + return NJS_OK; +} + + static void njs_options_free(njs_opts_t *opts) { @@ -806,14 +835,179 @@ njs_rejection_tracker(njs_vm_t *vm, njs_ } +static njs_int_t +njs_module_path(const njs_str_t *dir, njs_module_info_t *info) +{ + char *p; + size_t length; + njs_bool_t trail; + char src[NJS_MAX_PATH + 1]; + + trail = 0; + length = info->name.length; + + if (dir != NULL) { + length += dir->length; + + if (length == 0) { + return NJS_DECLINED; + } + + trail = (dir->start[dir->length - 1] != '/'); + + if (trail) { + length++; + } + } + + if (njs_slow_path(length > NJS_MAX_PATH)) { + return NJS_ERROR; + } + + p = &src[0]; + + if (dir != NULL) { + p = (char *) njs_cpymem(p, dir->start, dir->length); + + if (trail) { + *p++ = '/'; + } + } + + p = (char *) njs_cpymem(p, info->name.start, info->name.length); + *p = '\0'; + + p = realpath(&src[0], &info->path[0]); + if (p == NULL) { + return NJS_DECLINED; + } + + info->fd = open(&info->path[0], O_RDONLY); + if (info->fd < 0) { + return NJS_DECLINED; + } + + info->file.start = (u_char *) &info->path[0]; + info->file.length = njs_strlen(info->file.start); + + return NJS_OK; +} + + +static njs_int_t +njs_module_lookup(njs_opts_t *opts, njs_module_info_t *info) +{ + njs_int_t ret; + njs_str_t *path; + njs_uint_t i; + + if (info->name.start[0] == '/') { + return njs_module_path(NULL, info); + } + + path = opts->paths; + + for (i = 0; i < opts->n_paths; i++) { + ret = njs_module_path(&path[i], info); + + if (ret != NJS_DECLINED) { + return ret; + } + } + + return NJS_DECLINED; +} + + +static njs_int_t +njs_module_read(njs_mp_t *mp, int fd, njs_str_t *text) +{ + ssize_t n; + struct stat sb; + + text->start = NULL; + + if (fstat(fd, &sb) == -1) { + goto fail; + } + + if (!S_ISREG(sb.st_mode)) { + goto fail; + } + + text->length = sb.st_size; + + text->start = njs_mp_alloc(mp, text->length); + if (text->start == NULL) { + goto fail; + } + + n = read(fd, text->start, sb.st_size); + + if (n < 0 || n != sb.st_size) { + goto fail; + } + + return NJS_OK; + +fail: + + if (text->start != NULL) { + njs_mp_free(mp, text->start); + } + + return NJS_ERROR; +} + + +static njs_mod_t * +njs_module_loader(njs_vm_t *vm, njs_external_ptr_t external, njs_str_t *name) +{ + u_char *start; + njs_int_t ret; + njs_str_t text; + njs_mod_t *module; + njs_opts_t *opts; + njs_module_info_t info; + + opts = external; + + njs_memzero(&info, sizeof(njs_module_info_t)); + + info.name = *name; + + ret = njs_module_lookup(opts, &info); + if (njs_slow_path(ret != NJS_OK)) { + return NULL; + } + + ret = njs_module_read(njs_vm_memory_pool(vm), info.fd, &text); + + (void) close(info.fd); + + if (njs_slow_path(ret != NJS_OK)) { + njs_vm_internal_error(vm, "while reading \"%V\" module", &info.file); + return NULL; + } + + start = text.start; + + module = njs_vm_compile_module(vm, &info.file, &start, + &text.start[text.length]); + + njs_mp_free(njs_vm_memory_pool(vm), text.start); + + return module; +} + + static njs_vm_t * njs_create_vm(njs_opts_t *opts) { + size_t len; u_char *p, *start; njs_vm_t *vm; njs_int_t ret; - njs_str_t path; - njs_uint_t i; njs_vm_opt_t vm_options; njs_vm_opt_init(&vm_options); @@ -857,16 +1051,7 @@ njs_create_vm(njs_opts_t *opts) njs_vm_external_ptr(vm)); } - for (i = 0; i < opts->n_paths; i++) { - path.start = (u_char *) opts->paths[i]; - path.length = njs_strlen(opts->paths[i]); - - ret = njs_vm_add_path(vm, &path); - if (ret != NJS_OK) { - njs_stderror("failed to add path\n"); - return NULL; - } - } + njs_vm_set_module_loader(vm, njs_module_loader, opts); start = (u_char *) getenv("NJS_PATH"); if (start == NULL) { @@ -876,10 +1061,9 @@ njs_create_vm(njs_opts_t *opts) for ( ;; ) { p = njs_strchr(start, ':'); - path.start = start; - path.length = (p != NULL) ? (size_t) (p - start) : njs_strlen(start); - - ret = njs_vm_add_path(vm, &path); + len = (p != NULL) ? (size_t) (p - start) : njs_strlen(start); + + ret = njs_options_add_path(opts, start, len); if (ret != NJS_OK) { njs_stderror("failed to add path\n"); return NULL; diff -r 7eaaa7d57636 -r 9b3dac56fd8a nginx/ngx_js.c --- a/nginx/ngx_js.c Tue Jan 23 16:33:52 2024 -0800 +++ b/nginx/ngx_js.c Tue Jan 23 16:34:10 2024 -0800 @@ -29,6 +29,20 @@ typedef struct { } ngx_js_rejected_promise_t; +#if defined(PATH_MAX) +#define NGX_MAX_PATH PATH_MAX +#else +#define NGX_MAX_PATH 4096 +#endif + +typedef struct { + int fd; + njs_str_t name; + njs_str_t file; + char path[NGX_MAX_PATH + 1]; +} njs_module_info_t; + + static njs_int_t ngx_js_ext_build(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, njs_value_t *setval, njs_value_t *retval); static njs_int_t ngx_js_ext_conf_file_path(njs_vm_t *vm, @@ -1715,6 +1729,182 @@ ngx_js_rejection_tracker(njs_vm_t *vm, n } +static njs_int_t +ngx_js_module_path(const ngx_str_t *dir, njs_module_info_t *info) +{ + char *p; + size_t length; + njs_bool_t trail; + char src[NGX_MAX_PATH + 1]; + + trail = 0; + length = info->name.length; + + if (dir != NULL) { + length += dir->len; + + if (length == 0) { + return NJS_DECLINED; + } + + trail = (dir->data[dir->len - 1] != '/'); + + if (trail) { + length++; + } + } + + if (njs_slow_path(length > NGX_MAX_PATH)) { + return NJS_ERROR; + } + + p = &src[0]; + + if (dir != NULL) { + p = (char *) njs_cpymem(p, dir->data, dir->len); + + if (trail) { + *p++ = '/'; + } + } + + p = (char *) njs_cpymem(p, info->name.start, info->name.length); + *p = '\0'; + + p = realpath(&src[0], &info->path[0]); + if (p == NULL) { + return NJS_DECLINED; + } + + info->fd = open(&info->path[0], O_RDONLY); + if (info->fd < 0) { + return NJS_DECLINED; + } + + info->file.start = (u_char *) &info->path[0]; + info->file.length = njs_strlen(info->file.start); + + return NJS_OK; +} + + +static njs_int_t +ngx_js_module_lookup(ngx_js_loc_conf_t *conf, njs_module_info_t *info) +{ + njs_int_t ret; + ngx_str_t *path; + njs_uint_t i; + + if (info->name.start[0] == '/') { + return ngx_js_module_path(NULL, info); + } + + ret = ngx_js_module_path((const ngx_str_t *) &ngx_cycle->conf_prefix, info); + + if (ret != NJS_DECLINED) { + return ret; + } + + if (conf->paths == NGX_CONF_UNSET_PTR) { + return NJS_DECLINED; + } + + path = conf->paths->elts; + + for (i = 0; i < conf->paths->nelts; i++) { + ret = ngx_js_module_path(&path[i], info); + + if (ret != NJS_DECLINED) { + return ret; + } + } + + return NJS_DECLINED; +} + + +static njs_int_t +ngx_js_module_read(njs_mp_t *mp, int fd, njs_str_t *text) +{ + ssize_t n; + struct stat sb; + + text->start = NULL; + + if (fstat(fd, &sb) == -1) { + goto fail; + } + + if (!S_ISREG(sb.st_mode)) { + goto fail; + } + + text->length = sb.st_size; + + text->start = njs_mp_alloc(mp, text->length); + if (text->start == NULL) { + goto fail; + } + + n = read(fd, text->start, sb.st_size); + + if (n < 0 || n != sb.st_size) { + goto fail; + } + + return NJS_OK; + +fail: + + if (text->start != NULL) { + njs_mp_free(mp, text->start); + } + + return NJS_ERROR; +} + + +static njs_mod_t * +ngx_js_module_loader(njs_vm_t *vm, njs_external_ptr_t external, njs_str_t *name) +{ + u_char *start; + njs_int_t ret; + njs_str_t text; + njs_mod_t *module; + ngx_js_loc_conf_t *conf; + njs_module_info_t info; + + conf = external; + + njs_memzero(&info, sizeof(njs_module_info_t)); + + info.name = *name; + + ret = ngx_js_module_lookup(conf, &info); + if (njs_slow_path(ret != NJS_OK)) { + return NULL; + } + + ret = ngx_js_module_read(njs_vm_memory_pool(vm), info.fd, &text); + + (void) close(info.fd); + + if (ret != NJS_OK) { + njs_vm_internal_error(vm, "while reading \"%V\" module", &info.file); + return NULL; + } + + start = text.start; + + module = njs_vm_compile_module(vm, &info.file, &start, + &text.start[text.length]); + + njs_mp_free(njs_vm_memory_pool(vm), text.start); + + return module; +} + + ngx_int_t ngx_js_init_conf_vm(ngx_conf_t *cf, ngx_js_loc_conf_t *conf, njs_vm_opt_t *options) @@ -1723,7 +1913,7 @@ ngx_js_init_conf_vm(ngx_conf_t *cf, ngx_ u_char *start, *end, *p; ngx_str_t *m, file; njs_int_t rc; - njs_str_t text, path; + njs_str_t text; ngx_uint_t i; njs_value_t *value; ngx_pool_cleanup_t *cln; @@ -1795,14 +1985,7 @@ ngx_js_init_conf_vm(ngx_conf_t *cf, ngx_ njs_vm_set_rejection_tracker(conf->vm, ngx_js_rejection_tracker, NULL); - path.start = ngx_cycle->conf_prefix.data; - path.length = ngx_cycle->conf_prefix.len; - - rc = njs_vm_add_path(conf->vm, &path); - if (rc != NJS_OK) { - ngx_log_error(NGX_LOG_EMERG, cf->log, 0, "failed to add \"js_path\""); - return NGX_ERROR; - } + njs_vm_set_module_loader(conf->vm, ngx_js_module_loader, conf); if (conf->paths != NGX_CONF_UNSET_PTR) { m = conf->paths->elts; @@ -1811,16 +1994,6 @@ ngx_js_init_conf_vm(ngx_conf_t *cf, ngx_ if (ngx_conf_full_name(cf->cycle, &m[i], 1) != NGX_OK) { return NGX_ERROR; } - - path.start = m[i].data; - path.length = m[i].len; - - rc = njs_vm_add_path(conf->vm, &path); - if (rc != NJS_OK) { - ngx_log_error(NGX_LOG_EMERG, cf->log, 0, - "failed to add \"js_path\""); - return NGX_ERROR; - } } } diff -r 7eaaa7d57636 -r 9b3dac56fd8a src/njs.h --- a/src/njs.h Tue Jan 23 16:33:52 2024 -0800 +++ b/src/njs.h Tue Jan 23 16:34:10 2024 -0800 @@ -327,8 +327,6 @@ NJS_EXPORT njs_int_t njs_vm_invoke(njs_v */ NJS_EXPORT njs_int_t njs_vm_start(njs_vm_t *vm, njs_value_t *retval); -NJS_EXPORT njs_int_t njs_vm_add_path(njs_vm_t *vm, const njs_str_t *path); - #define NJS_PROTO_ID_ANY (-1) NJS_EXPORT njs_int_t njs_vm_external_prototype(njs_vm_t *vm, diff -r 7eaaa7d57636 -r 9b3dac56fd8a src/njs_module.c --- a/src/njs_module.c Tue Jan 23 16:33:52 2024 -0800 +++ b/src/njs_module.c Tue Jan 23 16:34:10 2024 -0800 @@ -8,197 +8,6 @@ #include -typedef struct { - int fd; - njs_str_t name; - njs_str_t file; - char path[NJS_MAX_PATH + 1]; -} njs_module_info_t; - - -static njs_int_t njs_module_lookup(njs_vm_t *vm, njs_module_info_t *info); -static njs_int_t njs_module_path(njs_vm_t *vm, const njs_str_t *dir, - njs_module_info_t *info); -static njs_int_t njs_module_read(njs_vm_t *vm, int fd, njs_str_t *body); -static njs_mod_t *njs_default_module_loader(njs_vm_t *vm, - njs_external_ptr_t external, njs_str_t *name); - - -njs_mod_t * -njs_parser_module(njs_parser_t *parser, njs_str_t *name) -{ - njs_mod_t *module; - njs_vm_t *vm; - njs_external_ptr_t external; - njs_module_loader_t loader; - - vm = parser->vm; - - if (name->length == 0) { - njs_parser_syntax_error(parser, "Cannot find module \"%V\"", name); - return NULL; - } - - module = njs_module_find(vm, name, 1); - if (module != NULL) { - goto done; - } - - external = NULL; - loader = njs_default_module_loader; - - if (vm->module_loader != NULL) { - loader = vm->module_loader; - external = vm->module_loader_opaque; - } - - module = loader(vm, external, name); - if (module == NULL) { - njs_parser_syntax_error(parser, "Cannot find module \"%V\"", name); - return NULL; - } - -done: - - if (module->index == 0) { - module->index = vm->shared->module_items++; - } - - return module; -} - - -static njs_int_t -njs_module_lookup(njs_vm_t *vm, njs_module_info_t *info) -{ - njs_int_t ret; - njs_str_t *path; - njs_uint_t i; - - if (info->name.start[0] == '/') { - return njs_module_path(vm, NULL, info); - } - - if (vm->paths == NULL) { - return NJS_DECLINED; - } - - path = vm->paths->start; - - for (i = 0; i < vm->paths->items; i++) { - ret = njs_module_path(vm, path, info); - - if (ret != NJS_DECLINED) { - return ret; - } - - path++; - } - - return NJS_DECLINED; -} - - -static njs_int_t -njs_module_path(njs_vm_t *vm, const njs_str_t *dir, njs_module_info_t *info) -{ - char *p; - size_t length; - njs_bool_t trail; - char src[NJS_MAX_PATH + 1]; - - trail = 0; - length = info->name.length; - - if (dir != NULL) { - length += dir->length; - - if (length == 0) { - return NJS_DECLINED; - } - - trail = (dir->start[dir->length - 1] != '/'); - - if (trail) { - length++; - } - } - - if (njs_slow_path(length > NJS_MAX_PATH)) { - return NJS_ERROR; - } - - p = &src[0]; - - if (dir != NULL) { - p = (char *) njs_cpymem(p, dir->start, dir->length); - - if (trail) { - *p++ = '/'; - } - } - - p = (char *) njs_cpymem(p, info->name.start, info->name.length); - *p = '\0'; - - p = realpath(&src[0], &info->path[0]); - if (p == NULL) { - return NJS_DECLINED; - } - - info->fd = open(&info->path[0], O_RDONLY); - if (info->fd < 0) { - return NJS_DECLINED; - } - - info->file.start = (u_char *) &info->path[0]; - info->file.length = njs_strlen(info->file.start); - - return NJS_OK; -} - - -static njs_int_t -njs_module_read(njs_vm_t *vm, int fd, njs_str_t *text) -{ - ssize_t n; - struct stat sb; - - text->start = NULL; - - if (fstat(fd, &sb) == -1) { - goto fail; - } - - if (!S_ISREG(sb.st_mode)) { - goto fail; - } - - text->length = sb.st_size; - - text->start = njs_mp_alloc(vm->mem_pool, text->length); - if (text->start == NULL) { - goto fail; - } - - n = read(fd, text->start, sb.st_size); - - if (n < 0 || n != sb.st_size) { - goto fail; - } - - return NJS_OK; - -fail: - - if (text->start != NULL) { - njs_mp_free(vm->mem_pool, text->start); - } - - return NJS_ERROR; -} - - static njs_int_t njs_module_hash_test(njs_lvlhsh_query_t *lhq, void *data) { @@ -348,42 +157,3 @@ njs_module_require(njs_vm_t *vm, njs_val return NJS_OK; } - - -static njs_mod_t * -njs_default_module_loader(njs_vm_t *vm, njs_external_ptr_t unused, - njs_str_t *name) -{ - u_char *start; - njs_int_t ret; - njs_str_t text; - njs_mod_t *module; - njs_module_info_t info; - - njs_memzero(&info, sizeof(njs_module_info_t)); - - info.name = *name; - - ret = njs_module_lookup(vm, &info); - if (njs_slow_path(ret != NJS_OK)) { - return NULL; - } - - ret = njs_module_read(vm, info.fd, &text); - - (void) close(info.fd); - - if (njs_slow_path(ret != NJS_OK)) { - njs_internal_error(vm, "while reading \"%V\" module", &info.file); - return NULL; - } - - start = text.start; - - module = njs_vm_compile_module(vm, &info.file, &start, - &text.start[text.length]); - - njs_mp_free(vm->mem_pool, text.start); - - return module; -} diff -r 7eaaa7d57636 -r 9b3dac56fd8a src/njs_module.h --- a/src/njs_module.h Tue Jan 23 16:33:52 2024 -0800 +++ b/src/njs_module.h Tue Jan 23 16:34:10 2024 -0800 @@ -19,7 +19,6 @@ struct njs_mod_s { njs_mod_t *njs_module_add(njs_vm_t *vm, njs_str_t *name, njs_value_t *value); njs_mod_t *njs_module_find(njs_vm_t *vm, njs_str_t *name, njs_bool_t shared); -njs_mod_t *njs_parser_module(njs_parser_t *parser, njs_str_t *name); njs_int_t njs_module_require(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval); diff -r 7eaaa7d57636 -r 9b3dac56fd8a src/njs_parser.c --- a/src/njs_parser.c Tue Jan 23 16:33:52 2024 -0800 +++ b/src/njs_parser.c Tue Jan 23 16:34:10 2024 -0800 @@ -8123,6 +8123,45 @@ njs_parser_export_after(njs_parser_t *pa } +static njs_mod_t * +njs_parser_module(njs_parser_t *parser, njs_str_t *name) +{ + njs_vm_t *vm; + njs_mod_t *module; + + vm = parser->vm; + + if (name->length == 0) { + njs_parser_syntax_error(parser, "Cannot find module \"%V\"", name); + return NULL; + } + + module = njs_module_find(vm, name, 1); + if (module != NULL) { + goto done; + } + + if (vm->module_loader == NULL) { + njs_parser_syntax_error(parser, "Cannot load module \"%V\"", name); + return NULL; + } + + module = vm->module_loader(vm, vm->module_loader_opaque, name); + if (module == NULL) { + njs_parser_syntax_error(parser, "Cannot find module \"%V\"", name); + return NULL; + } + +done: + + if (module->index == 0) { + module->index = vm->shared->module_items++; + } + + return module; +} + + static njs_int_t njs_parser_import(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) diff -r 7eaaa7d57636 -r 9b3dac56fd8a src/njs_vm.c --- a/src/njs_vm.c Tue Jan 23 16:33:52 2024 -0800 +++ b/src/njs_vm.c Tue Jan 23 16:34:10 2024 -0800 @@ -716,29 +716,6 @@ njs_vm_set_rejection_tracker(njs_vm_t *v } -njs_int_t -njs_vm_add_path(njs_vm_t *vm, const njs_str_t *path) -{ - njs_str_t *item; - - if (vm->paths == NULL) { - vm->paths = njs_arr_create(vm->mem_pool, 4, sizeof(njs_str_t)); - if (njs_slow_path(vm->paths == NULL)) { - return NJS_ERROR; - } - } - - item = njs_arr_add(vm->paths); - if (njs_slow_path(item == NULL)) { - return NJS_ERROR; - } - - *item = *path; - - return NJS_OK; -} - - njs_value_t njs_vm_exception(njs_vm_t *vm) { diff -r 7eaaa7d57636 -r 9b3dac56fd8a src/njs_vm.h --- a/src/njs_vm.h Tue Jan 23 16:33:52 2024 -0800 +++ b/src/njs_vm.h Tue Jan 23 16:34:10 2024 -0800 @@ -118,7 +118,6 @@ typedef enum { struct njs_vm_s { njs_value_t exception; - njs_arr_t *paths; njs_arr_t *protos; njs_arr_t *scope_absolute; From mdounin at mdounin.ru Wed Jan 24 01:23:52 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 24 Jan 2024 04:23:52 +0300 Subject: [PATCH] Documented opensourcing of the OTel module In-Reply-To: <00807e94be3622a79d77.1706017747@ORK-ML-00007151> References: <00807e94be3622a79d77.1706017747@ORK-ML-00007151> Message-ID: Hello! On Tue, Jan 23, 2024 at 01:49:07PM +0000, Yaroslav Zhuravlev wrote: > xml/en/docs/index.xml | 8 +++++++- > xml/en/docs/ngx_otel_module.xml | 20 +++++++++++++++----- > xml/ru/docs/index.xml | 10 ++++++++-- > 3 files changed, 30 insertions(+), 8 deletions(-) > > > # HG changeset patch > # User Yaroslav Zhuravlev > # Date 1704815768 0 > # Tue Jan 09 15:56:08 2024 +0000 > # Node ID 00807e94be3622a79d7796be6ea11934f97b2662 > # Parent e3116677300fa455200da63002c746aece689029 > Documented opensourcing of the OTel module. > > diff --git a/xml/en/docs/index.xml b/xml/en/docs/index.xml > --- a/xml/en/docs/index.xml > +++ b/xml/en/docs/index.xml > @@ -8,7 +8,7 @@ >
link="/en/docs/" > lang="en" > - rev="49" > + rev="50" > toc="no"> > > > @@ -681,6 +681,12 @@ > ngx_mgmt_module > > > + > + > + > + > + > + > > > ngx_otel_module > diff --git a/xml/en/docs/ngx_otel_module.xml b/xml/en/docs/ngx_otel_module.xml > --- a/xml/en/docs/ngx_otel_module.xml > +++ b/xml/en/docs/ngx_otel_module.xml > @@ -9,12 +9,14 @@ > link="/en/docs/ngx_otel_module.html" > lang="en" > - rev="1"> > + rev="2"> > >
> > > -The ngx_otel_module module (1.23.4) provides > +The ngx_otel_module module (1.23.4) is nginx-authored Quoting from https://mailman.nginx.org/pipermail/nginx-devel/2023-October/4AGH5XVKNP6UDFE32PZIXYO7JQ4RE37P.html: : Note that "nginx-authored" here looks misleading, as no nginx core : developers work on this module. > +third-party module > +that provides > OpenTelemetry > distributed tracing support. > The module supports > @@ -23,12 +25,20 @@ > > > > +The module is open source since 1.25.2. > +Download and install instructions are available > +here. > +The module is also available as a prebuilt > +nginx-module-otel dynamic module > +package (1.25.4). > + > + > + > > This module is available as part of our > commercial subscription > -in nginx-plus-module-otel package. > -After installation, the module can be loaded > -dynamically. > +(the > +nginx-plus-module-otel package). I don't see reasons to provide additional links here. Rather, the note probably can be removed altogether, or changed to something like "In previuos versions, this module is available...". > > > > diff --git a/xml/ru/docs/index.xml b/xml/ru/docs/index.xml > --- a/xml/ru/docs/index.xml > +++ b/xml/ru/docs/index.xml > @@ -8,7 +8,7 @@ >
link="/ru/docs/" > lang="ru" > - rev="49" > + rev="50" > toc="no"> > > > @@ -687,9 +687,15 @@ > ngx_mgmt_module [en] > > > + > + > + > + > + > + > > > -ngx_otel_module [en] > +ngx_otel_module [en] > > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Wed Jan 24 01:24:33 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 24 Jan 2024 04:24:33 +0300 Subject: [PATCH] Year 2024 In-Reply-To: <9fa58eaa9b8fd4dcd967.1706014607@ORK-ML-00007151> References: <9fa58eaa9b8fd4dcd967.1706014607@ORK-ML-00007151> Message-ID: Hello! On Tue, Jan 23, 2024 at 12:56:47PM +0000, Yaroslav Zhuravlev wrote: > GNUmakefile | 2 +- > text/LICENSE | 2 +- > xml/menu.xml | 1 + > 3 files changed, 3 insertions(+), 2 deletions(-) > > > # HG changeset patch > # User Yaroslav Zhuravlev > # Date 1706014530 0 > # Tue Jan 23 12:55:30 2024 +0000 > # Node ID 9fa58eaa9b8fd4dcd9677d37a86140f945eaf7c6 > # Parent 87d313e1bf7f365ac81693aae5d83907869774cb > Year 2024. > > diff --git a/GNUmakefile b/GNUmakefile > --- a/GNUmakefile > +++ b/GNUmakefile > @@ -71,7 +71,7 @@ > > YEARS = \ > 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 \ > - 2020 2021 2022 > + 2020 2021 2022 2023 > > all: news arx 404 $(LANGS) > > diff --git a/text/LICENSE b/text/LICENSE > --- a/text/LICENSE > +++ b/text/LICENSE > @@ -1,6 +1,6 @@ > /* > * Copyright (C) 2002-2021 Igor Sysoev > - * Copyright (C) 2011-2023 Nginx, Inc. > + * Copyright (C) 2011-2024 Nginx, Inc. > * All rights reserved. > * > * Redistribution and use in source and binary forms, with or without > diff --git a/xml/menu.xml b/xml/menu.xml > --- a/xml/menu.xml > +++ b/xml/menu.xml > @@ -59,6 +59,7 @@ > --> > > news > + > > > Looks good. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Wed Jan 24 08:57:16 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 24 Jan 2024 11:57:16 +0300 Subject: Core: Avoid memcpy from NULL In-Reply-To: References: Message-ID: Hello! On Wed, Jan 24, 2024 at 12:09:02AM +0000, Ben Kallus wrote: > > As already pointed out previously, there are no known cases > > when memcpy(p, NULL, 0) can result in miscompilation of nginx > > code, ... If you think there are cases when the code can be > > miscompiled in practice, and not theoretically, please share. > > There is no such thing as "miscompilation" of code that executes > undefined behavior. The behavior is undefined; literally any > instructions that the compiler emits is correct compilation. This is > the definition of undefined behavior. While it is certainly true, this is purely theoretical. In practice, real things happen: the code is compiled even to something that is either correct or not. In all known cases so far the compiled code is correct even if GCC with relevant (mis)optimizations enabled is used. > You want me to cite a line in nginx that you would consider > "miscompiled in practice." I'm not going to spend hours combing > through assembly to convince you that undefined behavior is worth > avoiding. Sorry! There is no need to convince me that undefined behaviour worth avoiding. The point is that patching it in the particular place you are trying to patch will make things worse by reducing pressure on developers, not better. And there is no immediate need to patch this particular place. Instead, we should ensure that a safe coding pattern is used across the code - either by patching ngx_memcpy() and other functions to check for length 0, or by reviewing and fixing all the affected calls we are able to find, or by explicitly asking GCC to avoid such misoptimizations (such as with -fno-delete-null-pointer-check like Linux kernel does), or by fixing the standard. [...] > > as nginx usually does not checks string data pointers > > against NULL (and instead checks length, if needed). In > > particular, ngx_pstrdup() you are trying to patch doesn't. That > > is, this is exactly the "no direct impact" situation as assumed > > above. > > It is non-obvious that checks against NULL will be optimized away > after calls to ngx_memcpy. Whether a function even calls ngx_memcpy on > a given pointer may not always be obvious, especially if that call > happens many layers down the stack. In the particular place you are trying to patch, it is quite obvious, even assuming link-time optimizations, since ngx_pstrdup() is called in very few places. And this can be easily verified at least for a particular compiler and compiler options. For other places, that's indeed might not be obvious (but see below), and that's why I asking you to share cases of the nginx code miscompiled if you know any. The point is, however, that the change you suggests with patching just ngx_pstrdup() won't fix these other places. Instead, it will rather ensure these other places are never fixed. OTOH, gcc13 with -O2 removes very few NULL pointer checks across nginx codebase. In my tests (with "-S" added to compilation to obtain assembler output), -fno-delete-null-pointer-check affects only 31 files, and files I checked only have few pointer checks removed (restored with -fno-delete-null-pointer-check): $ diff -urN objs.o2/src/ objs.o2fnodelete/src/ | diffstat core/ngx_inet.o | 703 +-- core/ngx_log.o | 555 +-- core/ngx_open_file_cache.o | 581 +-- core/ngx_output_chain.o | 771 ++-- core/ngx_palloc.o | 153 core/ngx_radix_tree.o | 327 - core/ngx_resolver.o | 2252 ++++++------ event/ngx_event.o | 850 ++-- event/ngx_event_openssl.o | 3576 ++++++++++--------- event/ngx_event_openssl_stapling.o | 6 event/ngx_event_timer.o | 18 event/ngx_event_udp.o | 127 http/modules/ngx_http_auth_basic_module.o | 12 http/modules/ngx_http_charset_filter_module.o | 7 http/modules/ngx_http_fastcgi_module.o | 2134 +++++------ http/modules/ngx_http_geo_module.o | 136 http/modules/ngx_http_image_filter_module.o | 186 - http/modules/ngx_http_limit_conn_module.o | 103 http/modules/ngx_http_log_module.o | 948 ++--- http/modules/ngx_http_secure_link_module.o | 34 http/modules/ngx_http_slice_filter_module.o | 77 http/modules/ngx_http_sub_filter_module.o | 126 http/ngx_http_file_cache.o | 118 http/ngx_http_parse.o | 842 ++-- http/ngx_http_script.o | 16 http/ngx_http_upstream.o | 4673 +++++++++++++------------- stream/ngx_stream_geo_module.o | 134 stream/ngx_stream_limit_conn_module.o | 105 stream/ngx_stream_log_module.o | 872 ++-- stream/ngx_stream_proxy_module.o | 749 ++-- stream/ngx_stream_script.o | 16 31 files changed, 10658 insertions(+), 10549 deletions(-) In particular, in the only real change in ngx_palloc.o assembler is as follows: @@ -452,16 +452,19 @@ ngx_reset_pool: testl %ebx, %ebx jne .L99 .L97: + testl %esi, %esi + je .L100 movl %esi, %eax .p2align 4,,10 .p2align 3 Which corresponds to restored with -fno-delete-null-pointer-check initial "p" check on the first iteration of the second loop, due to "pool->large" being used in the first loop initialization: for (l = pool->large; l; l = l->next) { if (l->alloc) { ngx_free(l->alloc); } } for (p = pool; p; p = p->d.next) { p->d.last = (u_char *) p + sizeof(ngx_pool_t); p->d.failed = 0; } Many other cases, such as multiple changes in ngx_inet.o, one of the two changes in ngx_open_file_cache.o, and one change ngx_http_parse.o, correspond to ngx_strlchr() calls, where subsequent result check is omitted because no-match is directly handled by the inlined ngx_strlchr() code: p = ngx_strlchr(uri->data, last, '?'); if (p) { ... } And there seems to be no real changes in ngx_http_script.o and ngx_stream_script.o, just some minor instructions reordering. Overall, it might be feasible to review all the differences to ensure there are no real issues introduced by various compiler optimizations. [...] > There is a proposal for C2y to define memcpy(NULL,NULL,0): > https://docs.google.com/document/d/1guH_HgibKrX7t9JfKGfWX2UCPyZOTLsnRfR6UleD1F8/edit > If you feel strongly that memcpy from NULL should be defined, feel > free to contribute to it :) Interesting, thanks. This would be the best solution and will eliminate the need for any changes by defining the behaviour, as it should. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Wed Jan 24 09:20:59 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 24 Jan 2024 12:20:59 +0300 Subject: [PATCH] SSL: Added SSLKEYLOGFILE key material to debug logging In-Reply-To: References: Message-ID: Hello! On Sun, Jan 21, 2024 at 10:37:24AM +0000, J Carter wrote: > # HG changeset patch > # User J Carter > # Date 1705832811 0 > # Sun Jan 21 10:26:51 2024 +0000 > # Node ID b00332a5253eefb53bacc024c72f55876c2eac6e > # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 > SSL: Added SSLKEYLOGFILE key material to debug logging. > > This patch also introduces the debug_keylog error log level flag, which > may be used to graunually enable or ommit logging of key material via > error level flags (note, it's always enabled when using > debug_connection). > > Each line of key material is output to the error log as separate log > message, and is prepended with 'ssl keylog: ' for convenient extraction. > > The purpose of logging key material is to allow external tools, such as > wireshark/tshark, to decrypt captured TLS connections in all situations. > > Previously, only TLS 1.2 (and below) connections could be decrypted > when specific ciphers suites were used, and when the decrypter had > access to the acting server's TLS certificates and keys. It was not > possible to decrypt TLS 1.3 traffic without generating SSLKEYLOGFILE on > peer, or by using other hacks on nginx host (using GDB, or patched ssl > libraries). Thanks for the patch. Logging session keying material is known to be problematic from ethical point of view. As such, I would rather avoid introducing relevant functionality in nginx. [...] -- Maxim Dounin http://mdounin.ru/ From stephen.farrell at cs.tcd.ie Wed Jan 24 10:51:45 2024 From: stephen.farrell at cs.tcd.ie (Stephen Farrell) Date: Wed, 24 Jan 2024 10:51:45 +0000 Subject: [PATCH] SSL: Added SSLKEYLOGFILE key material to debug logging In-Reply-To: References: Message-ID: <3efc7b41-176c-45ed-8a1e-1231268a0a99@cs.tcd.ie> Chiming in from the sidelines... On 24/01/2024 09:20, Maxim Dounin wrote: > Logging session keying material is known to be problematic from > ethical point of view. As such, I would rather avoid introducing > relevant functionality in nginx. I agree the above (not adding this) would be the better outcome. Cheers, S. -------------- next part -------------- A non-text attachment was scrubbed... Name: OpenPGP_0xE4D8E9F997A833DD.asc Type: application/pgp-keys Size: 1197 bytes Desc: OpenPGP public key URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: OpenPGP_signature.asc Type: application/pgp-signature Size: 236 bytes Desc: OpenPGP digital signature URL: From jordanc.carter at outlook.com Wed Jan 24 12:17:08 2024 From: jordanc.carter at outlook.com (J Carter) Date: Wed, 24 Jan 2024 12:17:08 +0000 Subject: [PATCH] SSL: Added SSLKEYLOGFILE key material to debug logging In-Reply-To: References: Message-ID: Hello, Thanks for the feedback. On Wed, 24 Jan 2024 12:20:59 +0300 Maxim Dounin wrote: > Hello! > > On Sun, Jan 21, 2024 at 10:37:24AM +0000, J Carter wrote: > > > # HG changeset patch > > # User J Carter > > # Date 1705832811 0 > > # Sun Jan 21 10:26:51 2024 +0000 > > # Node ID b00332a5253eefb53bacc024c72f55876c2eac6e > > # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 > > SSL: Added SSLKEYLOGFILE key material to debug logging. > > > > This patch also introduces the debug_keylog error log level flag, which > > may be used to graunually enable or ommit logging of key material via > > error level flags (note, it's always enabled when using > > debug_connection). > > > > Each line of key material is output to the error log as separate log > > message, and is prepended with 'ssl keylog: ' for convenient extraction. > > > > The purpose of logging key material is to allow external tools, such as > > wireshark/tshark, to decrypt captured TLS connections in all situations. > > > > Previously, only TLS 1.2 (and below) connections could be decrypted > > when specific ciphers suites were used, and when the decrypter had > > access to the acting server's TLS certificates and keys. It was not > > possible to decrypt TLS 1.3 traffic without generating SSLKEYLOGFILE on > > peer, or by using other hacks on nginx host (using GDB, or patched ssl > > libraries). > > Thanks for the patch. > > Logging session keying material is known to be problematic from > ethical point of view. As such, I would rather avoid introducing > relevant functionality in nginx. > > [...] > Could you expand upon your ethical concerns around logging key material over say logging / storing to disk request or response content directly from nginx ? It'd be good to have clarity for future contributions. From pluknet at nginx.com Wed Jan 24 16:15:58 2024 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 24 Jan 2024 20:15:58 +0400 Subject: [PATCH 1 of 2] Fixed build warnings "failed to load external entity" Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1706112528 -14400 # Wed Jan 24 20:08:48 2024 +0400 # Node ID e798f596b284259985e35f1486b31004fae00d4d # Parent e3116677300fa455200da63002c746aece689029 Fixed build warnings "failed to load external entity". diff --git a/xml/ru/docs/njs/index.xml b/xml/ru/docs/njs/index.xml --- a/xml/ru/docs/njs/index.xml +++ b/xml/ru/docs/njs/index.xml @@ -50,7 +50,7 @@ njs совместим с - +Безопасность [en] From pluknet at nginx.com Wed Jan 24 16:15:59 2024 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 24 Jan 2024 20:15:59 +0400 Subject: [PATCH 2 of 2] Fixed dirindex and varindex to output top level module name In-Reply-To: References: Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1706112783 -14400 # Wed Jan 24 20:13:03 2024 +0400 # Node ID cf7c914743d1d549af23a9f39e0a70377432fcf4 # Parent e798f596b284259985e35f1486b31004fae00d4d Fixed dirindex and varindex to output top level module name. diff --git a/xsls/link.xsls b/xsls/link.xsls --- a/xsls/link.xsls +++ b/xsls/link.xsls @@ -50,7 +50,11 @@ X:template = "links" { !{@id} X:if "count(../link[@id = current()/@id]) > 1" { X:text{ (} - !{substring-before(substring-after(@doc, '/'), '.xml')} + X:if "contains(@doc, '/')" { + !{substring-before(substring-after(@doc, '/'), '.xml')} + } else { + !{substring-before(@doc, '.xml')} + } X:text{)} }
@@ -62,7 +66,11 @@ X:template = "varlinks" { !!; X:if "count(../link[@id = current()/@id and @doc != current()/@doc]) > 0" { X:text{ (} - !{substring-before(substring-after(@doc, '/'), '.xml')} + X:if "contains(@doc, '/')" { + !{substring-before(substring-after(@doc, '/'), '.xml')} + } else { + !{substring-before(@doc, '.xml')} + } X:text{)} }
From Austin.Mayerhofer at forcepoint.com Wed Jan 24 21:15:00 2024 From: Austin.Mayerhofer at forcepoint.com (Mayerhofer, Austin) Date: Wed, 24 Jan 2024 21:15:00 +0000 Subject: Nginx-tests stream_ssl_conf_command.t test hanging indefinitely Message-ID: Hi all, Apologies if I sent this twice, I don’t think the first one went through because I wasn’t subscribed to the list. nginx-tests’ stream_ssl_conf_command.t is hanging for me and not running to completion, I’m using the following configuration: OS: MacOS 12.6.3 Chip: Apple M1 Max NGINX: 1.24.0 built from source code with ./configure --with-debug --with-http_ssl_module --with-http_stub_status_module --with-http_v2_module --without-http_auth_basic_module --without-http_autoindex_module --without-http_browser_module --without-http-cache --without-http_charset_module --without-http_empty_gif_module --without-http_fastcgi_module --without-http_grpc_module --without-http_limit_conn_module --without-http_limit_req_module --without-http_memcached_module --without-http_referer_module --without-http_scgi_module --without-http_split_clients_module --without-http_ssi_module --without-http_upstream_hash_module --without-http_upstream_ip_hash_module --without-http_upstream_least_conn_module --without-http_userid_module --without-http_uwsgi_module --with-stream --with-stream_ssl_module --with-stream_ssl_preread_module --without-stream_limit_conn_module --without-stream_set_module --without-stream_split_clients_module --without-stream_upstream_hash_module --without-stream_upstream_least_conn_module --without-stream_upstream_zone_module nginx-tests: https://github.com/nginx/nginx-tests/tree/4c2ad8093952706f327d04887c5546bad91b75a6 When I run: ``` TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v stream_ssl_conf_command.t ``` The output is: ``` stream_ssl_conf_command.t .. 1..5 ok 1 - Certificate ok 2 - SessionTicket ok 3 – ServerPreference ``` And it hangs there. It seems to be something with the ServerPreference test, as if I remove this code, it does run to completion: ``` $s = stream( PeerAddr => '127.0.0.1:' . port(8443), SSL => 1, SSL_cipher_list => 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384' ); is($s->socket()->get_cipher(), 'ECDHE-RSA-AES128-GCM-SHA256', 'ServerPreference'); ``` These are the last 20 lines of error.log, but I didn’t notice anything out of the ordinary in the ~200 total lines in the file, just that it’s hanging. 2024/01/24 11:38:08 [debug] 98386#0: *2 reusable connection: 0 2024/01/24 11:38:08 [debug] 98386#0: *2 free: 000000011CE04080, unused: 0 2024/01/24 11:38:08 [debug] 98386#0: *2 free: 000000011CE04180, unused: 0 2024/01/24 11:38:08 [debug] 98386#0: *2 free: 000000011CE04280, unused: 32 2024/01/24 11:38:08 [debug] 98386#0: timer delta: 0 2024/01/24 11:38:08 [debug] 98386#0: worker cycle 2024/01/24 11:38:08 [debug] 98386#0: kevent timer: 3000, changes: 0 2024/01/24 11:38:08 [debug] 98386#0: kevent events: 1 2024/01/24 11:38:08 [debug] 98386#0: kevent: 3: ft:-1 fl:0025 ff:00000000 d:31 ud:00000001200280D1 2024/01/24 11:38:08 [debug] 98386#0: *3 SSL shutdown handler 2024/01/24 11:38:08 [debug] 98386#0: *3 SSL_shutdown: 1 2024/01/24 11:38:08 [debug] 98386#0: *3 close stream connection: 3 2024/01/24 11:38:08 [debug] 98386#0: *3 event timer del: 3: 422265465 2024/01/24 11:38:08 [debug] 98386#0: *3 reusable connection: 0 2024/01/24 11:38:08 [debug] 98386#0: *3 free: 000000011CE04A20, unused: 0 2024/01/24 11:38:08 [debug] 98386#0: *3 free: 000000011CE04B20, unused: 0 2024/01/24 11:38:08 [debug] 98386#0: *3 free: 000000011CE04CC0, unused: 32 2024/01/24 11:38:08 [debug] 98386#0: timer delta: 0 2024/01/24 11:38:08 [debug] 98386#0: worker cycle 2024/01/24 11:38:08 [debug] 98386#0: kevent timer: -1, changes: 0 Has anyone else run into this problem? I searched the nginx and nginx-devel mailing lists and didn’t see anything. Thank you for any help! This message has been scanned for malware by Forcepoint. www.forcepoint.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From pluknet at nginx.com Wed Jan 24 22:59:28 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 25 Jan 2024 02:59:28 +0400 Subject: Nginx-tests stream_ssl_conf_command.t test hanging indefinitely In-Reply-To: References: Message-ID: > On 25 Jan 2024, at 01:15, Mayerhofer, Austin via nginx-devel wrote: > > Hi all, > Apologies if I sent this twice, I don’t think the first one went through because I wasn’t subscribed to the list. > nginx-tests’ stream_ssl_conf_command.t is hanging for me and not running to completion, I’m using the following configuration: > OS: MacOS 12.6.3 > Chip: Apple M1 Max > NGINX: 1.24.0 built from source code with ./configure --with-debug --with-http_ssl_module --with-http_stub_status_module --with-http_v2_module --without-http_auth_basic_module --without-http_autoindex_module --without-http_browser_module --without-http-cache --without-http_charset_module --without-http_empty_gif_module --without-http_fastcgi_module --without-http_grpc_module --without-http_limit_conn_module --without-http_limit_req_module --without-http_memcached_module --without-http_referer_module --without-http_scgi_module --without-http_split_clients_module --without-http_ssi_module --without-http_upstream_hash_module --without-http_upstream_ip_hash_module --without-http_upstream_least_conn_module --without-http_userid_module --without-http_uwsgi_module --with-stream --with-stream_ssl_module --with-stream_ssl_preread_module --without-stream_limit_conn_module --without-stream_set_module --without-stream_split_clients_module --without-stream_upstream_hash_module --without-stream_upstream_least_conn_module --without-stream_upstream_zone_module nginx-tests: https://github.com/nginx/nginx-tests/tree/4c2ad8093952706f327d04887c5546bad91b75a6 > When I run: > ``` > TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v stream_ssl_conf_command.t > ``` > The output is: > ``` > stream_ssl_conf_command.t .. > 1..5 > ok 1 - Certificate > ok 2 - SessionTicket > ok 3 – ServerPreference > ``` > And it hangs there. Perl from macOS base system (/usr/bin/perl) is known to be buggy for some unknown reason. This is expressed in various hangs when running nginx-tests. I saw similar reports since at least macOS 12.3.1, and this is not caused by a particular Perl version (same version from macports works for me). You can try Perl from homebrew or macports collections, it should work just fine. -- Sergey Kandaurov From benjamin.p.kallus.gr at dartmouth.edu Thu Jan 25 03:21:16 2024 From: benjamin.p.kallus.gr at dartmouth.edu (Ben Kallus) Date: Thu, 25 Jan 2024 03:21:16 +0000 Subject: [PATCH] Enforce that CR precede LF in chunk lines Message-ID: The HTTP/1.1 standards allow the recognition of bare LF as a line terminator in some contexts. >From RFC 9112 Section 2.2: > Although the line terminator for the start-line and fields is the sequence CRLF, a recipient MAY recognize a single LF as a line terminator and ignore any preceding CR. >From RFC 7230 Section 3.5: > Although the line terminator for the start-line and header fields is > the sequence CRLF, a recipient MAY recognize a single LF as a line > terminator and ignore any preceding CR. >From RFC 2616 Section 19.3: > The line terminator for message-header fields is the sequence CRLF. > However, we recommend that applications, when parsing such headers, > recognize a single LF as a line terminator and ignore the leading CR. In summary, bare LF can be recognized as a line terminator for a field-line (i.e. a header or trailer) or a start-line, but not outside of these contexts. In particular, bare LF is not an acceptable line terminator for chunk data lines or chunk size lines. One of the rejection messages for an RFC 9112 errata report makes the reasoning behind this choice clear: > The difference was intentional. A chunked parser is not a start line or field parser (it is a message body parser) and it is supposed to be less forgiving because it does not have to retain backwards compatibility with 1.0 parsers. > Hence, bare LF around the chunk sizes would be invalid and should result in the connection being marked as invalid. Currently, Nginx allows chunk lines (both size and data) to use bare LF as a line terminator. This means that (for example) the following payload is erroneously accepted: ``` POST / HTTP/1.1\r\n Host: whatever\r\n Transfer-Encoding: chunked\r\n 0\n # <--- This is missing a \r \r\n ``` This is probably not such a big deal, but it is a standards violation, and it comes with one small consequence: chunk lengths that are off by one will not invalidate the message body. I've developed a few request smuggling exploits against other servers and proxies in the past that rely upon the attacker's ability to predict the length of a request after it has passed through a reverse proxy. This is usually straightforward, but if there are any unpredictable headers inserted by the proxy, getting the guess right becomes nontrivial. Being able to be off by one thus makes the attacker's job a little bit easier. Given that many popular HTTP implementations (Apache httpd, Node, Boost::Beast, Lighttpd) adhere to the standard on this line termination issue, we should expect this change to break almost no clients, since any client generating requests that terminate chunk lines with bare LF would already be incompatible with a large portion of the web. It is, however, also true that many HTTP implementations (Go net/http, H2O, LiteSpeed) exhibit the same behavior as Nginx, and it's probably worth exploring why that is. The following patch changes Nginx's parsing behavior to match the standard. Note that this patch does not stop Nginx from allowing bare LF in request lines, response lines, headers, or trailers. It stops Nginx from accepting bare LF only in chunk size and data lines, where the standard does not permit LF/CRLF permissiveness. It's also a delete-only patch, which is always nice :) If you all are open to this change, it will also be necessary to fix up the many LF-delimited chunks that are present within the test suite. diff -r ee40e2b1d083 src/http/ngx_http_parse.c --- a/src/http/ngx_http_parse.c Mon Dec 25 21:15:48 2023 +0400 +++ b/src/http/ngx_http_parse.c Wed Jan 24 18:11:50 2024 +0000 @@ -2217,9 +2217,6 @@ case CR: state = sw_last_chunk_extension_almost_done; break; - case LF: - state = sw_trailer; - break; case ';': case ' ': case '\t': @@ -2236,9 +2233,6 @@ case CR: state = sw_chunk_extension_almost_done; break; - case LF: - state = sw_chunk_data; - break; case ';': case ' ': case '\t': @@ -2255,8 +2249,6 @@ case CR: state = sw_chunk_extension_almost_done; break; - case LF: - state = sw_chunk_data; } break; @@ -2276,9 +2268,6 @@ case CR: state = sw_after_data_almost_done; break; - case LF: - state = sw_chunk_start; - break; default: goto invalid; } @@ -2296,8 +2285,6 @@ case CR: state = sw_last_chunk_extension_almost_done; break; - case LF: - state = sw_trailer; } break; From hongzhidao at gmail.com Thu Jan 25 03:53:00 2024 From: hongzhidao at gmail.com (=?UTF-8?B?5rSq5b+X6YGT?=) Date: Thu, 25 Jan 2024 11:53:00 +0800 Subject: Core: Avoid memcpy from NULL In-Reply-To: References: Message-ID: Hi, Here's a similar ticket in another OSS. https://github.com/bellard/quickjs/issues/225#issuecomment-1908279228 > QuickJS may pass NULL pointers to memcpy with zero size. The C spec tells it is an undefined behavior but most C code do it, so the spec should be fixed instead. On Wed, Jan 24, 2024 at 4:57 PM Maxim Dounin wrote: > Hello! > > On Wed, Jan 24, 2024 at 12:09:02AM +0000, Ben Kallus wrote: > > > > As already pointed out previously, there are no known cases > > > when memcpy(p, NULL, 0) can result in miscompilation of nginx > > > code, ... If you think there are cases when the code can be > > > miscompiled in practice, and not theoretically, please share. > > > > There is no such thing as "miscompilation" of code that executes > > undefined behavior. The behavior is undefined; literally any > > instructions that the compiler emits is correct compilation. This is > > the definition of undefined behavior. > > While it is certainly true, this is purely theoretical. In > practice, real things happen: the code is compiled even to > something that is either correct or not. In all known cases so > far the compiled code is correct even if GCC with relevant > (mis)optimizations enabled is used. > > > You want me to cite a line in nginx that you would consider > > "miscompiled in practice." I'm not going to spend hours combing > > through assembly to convince you that undefined behavior is worth > > avoiding. Sorry! > > There is no need to convince me that undefined behaviour worth > avoiding. The point is that patching it in the particular place > you are trying to patch will make things worse by reducing > pressure on developers, not better. And there is no immediate > need to patch this particular place. > > Instead, we should ensure that a safe coding pattern is used > across the code - either by patching ngx_memcpy() and other > functions to check for length 0, or by reviewing and fixing all > the affected calls we are able to find, or by explicitly asking > GCC to avoid such misoptimizations (such as with > -fno-delete-null-pointer-check like Linux kernel does), or by > fixing the standard. > > [...] > > > > as nginx usually does not checks string data pointers > > > against NULL (and instead checks length, if needed). In > > > particular, ngx_pstrdup() you are trying to patch doesn't. That > > > is, this is exactly the "no direct impact" situation as assumed > > > above. > > > > It is non-obvious that checks against NULL will be optimized away > > after calls to ngx_memcpy. Whether a function even calls ngx_memcpy on > > a given pointer may not always be obvious, especially if that call > > happens many layers down the stack. > > In the particular place you are trying to patch, it is quite > obvious, even assuming link-time optimizations, since > ngx_pstrdup() is called in very few places. And this can be > easily verified at least for a particular compiler and compiler > options. > > For other places, that's indeed might not be obvious (but see > below), and that's why I asking you to share cases of the nginx > code miscompiled if you know any. The point is, however, that the > change you suggests with patching just ngx_pstrdup() won't fix > these other places. Instead, it will rather ensure these other > places are never fixed. > > OTOH, gcc13 with -O2 removes very few NULL pointer checks across > nginx codebase. In my tests (with "-S" added to compilation to > obtain assembler output), -fno-delete-null-pointer-check affects > only 31 files, and files I checked only have few pointer checks > removed (restored with -fno-delete-null-pointer-check): > > $ diff -urN objs.o2/src/ objs.o2fnodelete/src/ | diffstat > core/ngx_inet.o | 703 +-- > core/ngx_log.o | 555 +-- > core/ngx_open_file_cache.o | 581 +-- > core/ngx_output_chain.o | 771 ++-- > core/ngx_palloc.o | 153 > core/ngx_radix_tree.o | 327 - > core/ngx_resolver.o | 2252 ++++++------ > event/ngx_event.o | 850 ++-- > event/ngx_event_openssl.o | 3576 ++++++++++--------- > event/ngx_event_openssl_stapling.o | 6 > event/ngx_event_timer.o | 18 > event/ngx_event_udp.o | 127 > http/modules/ngx_http_auth_basic_module.o | 12 > http/modules/ngx_http_charset_filter_module.o | 7 > http/modules/ngx_http_fastcgi_module.o | 2134 +++++------ > http/modules/ngx_http_geo_module.o | 136 > http/modules/ngx_http_image_filter_module.o | 186 - > http/modules/ngx_http_limit_conn_module.o | 103 > http/modules/ngx_http_log_module.o | 948 ++--- > http/modules/ngx_http_secure_link_module.o | 34 > http/modules/ngx_http_slice_filter_module.o | 77 > http/modules/ngx_http_sub_filter_module.o | 126 > http/ngx_http_file_cache.o | 118 > http/ngx_http_parse.o | 842 ++-- > http/ngx_http_script.o | 16 > http/ngx_http_upstream.o | 4673 > +++++++++++++------------- > stream/ngx_stream_geo_module.o | 134 > stream/ngx_stream_limit_conn_module.o | 105 > stream/ngx_stream_log_module.o | 872 ++-- > stream/ngx_stream_proxy_module.o | 749 ++-- > stream/ngx_stream_script.o | 16 > 31 files changed, 10658 insertions(+), 10549 deletions(-) > > In particular, in the only real change in ngx_palloc.o assembler > is as follows: > > @@ -452,16 +452,19 @@ ngx_reset_pool: > testl %ebx, %ebx > jne .L99 .L97: > + testl %esi, %esi > + je .L100 > movl %esi, %eax > .p2align 4,,10 > .p2align 3 > > Which corresponds to restored with -fno-delete-null-pointer-check > initial "p" check on the first iteration of the second loop, due > to "pool->large" being used in the first loop initialization: > > for (l = pool->large; l; l = l->next) { > if (l->alloc) { > ngx_free(l->alloc); > } > } > > for (p = pool; p; p = p->d.next) { > p->d.last = (u_char *) p + sizeof(ngx_pool_t); > p->d.failed = 0; > } > > Many other cases, such as multiple changes in ngx_inet.o, one of > the two changes in ngx_open_file_cache.o, and one change > ngx_http_parse.o, correspond to ngx_strlchr() calls, where > subsequent result check is omitted because no-match is directly > handled by the inlined ngx_strlchr() code: > > p = ngx_strlchr(uri->data, last, '?'); > > if (p) { ... } > > And there seems to be no real changes in ngx_http_script.o and > ngx_stream_script.o, just some minor instructions reordering. > > Overall, it might be feasible to review all the differences to > ensure there are no real issues introduced by various compiler > optimizations. > > [...] > > > There is a proposal for C2y to define memcpy(NULL,NULL,0): > > > https://docs.google.com/document/d/1guH_HgibKrX7t9JfKGfWX2UCPyZOTLsnRfR6UleD1F8/edit > > If you feel strongly that memcpy from NULL should be defined, feel > > free to contribute to it :) > > Interesting, thanks. This would be the best solution and will > eliminate the need for any changes by defining the behaviour, as > it should. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Thu Jan 25 10:14:19 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 25 Jan 2024 13:14:19 +0300 Subject: [PATCH] Enforce that CR precede LF in chunk lines In-Reply-To: References: Message-ID: Hello! On Thu, Jan 25, 2024 at 03:21:16AM +0000, Ben Kallus wrote: > The HTTP/1.1 standards allow the recognition of bare LF as a line > terminator in some contexts. > > From RFC 9112 Section 2.2: > > Although the line terminator for the start-line and fields is the sequence CRLF, a recipient MAY recognize a single LF as a line terminator and ignore any preceding CR. > > From RFC 7230 Section 3.5: > > Although the line terminator for the start-line and header fields is > > the sequence CRLF, a recipient MAY recognize a single LF as a line > > terminator and ignore any preceding CR. > > From RFC 2616 Section 19.3: > > The line terminator for message-header fields is the sequence CRLF. > > However, we recommend that applications, when parsing such headers, > > recognize a single LF as a line terminator and ignore the leading CR. > > In summary, bare LF can be recognized as a line terminator for a > field-line (i.e. a header or trailer) or a start-line, but not outside > of these contexts. In particular, bare LF is not an acceptable line > terminator for chunk data lines or chunk size lines. One of the > rejection messages for an RFC 9112 errata report makes the reasoning > behind this choice clear: > > > The difference was intentional. A chunked parser is not a start line or field parser (it is a message body parser) and it is supposed to be less forgiving because it does not have to retain backwards compatibility with 1.0 parsers. > > Hence, bare LF around the chunk sizes would be invalid and should result in the connection being marked as invalid. Still, there is a robustness principle which allows applications to parse requests with various deviations from the grammar. Quoting RFC 2616: Although this document specifies the requirements for the generation of HTTP/1.1 messages, not all applications will be correct in their implementation. We therefore recommend that operational applications be tolerant of deviations whenever those deviations can be interpreted unambiguously. As such, it is certainly valid for a HTTP/1.1 server based on RFC 2616 to accept LF as line terminator in chunk sizes. While RFC 7230 and RFC 9112 tried to harden requirements, and now say that "the server SHOULD respond with 400" on deviations which are not explicitly allowed, it is still not a violation to accept such deviations. > Currently, Nginx allows chunk lines (both size and data) to use bare > LF as a line terminator. This means that (for example) the following > payload is erroneously accepted: > ``` > POST / HTTP/1.1\r\n > Host: whatever\r\n > Transfer-Encoding: chunked\r\n > 0\n # <--- This is > missing a \r > \r\n > ``` > > This is probably not such a big deal, but it is a standards violation, > and it comes with one small consequence: chunk lengths that are off by > one will not invalidate the message body. > > I've developed a few request smuggling exploits against other servers > and proxies in the past that rely upon the attacker's ability to > predict the length of a request after it has passed through a reverse > proxy. This is usually straightforward, but if there are any > unpredictable headers inserted by the proxy, getting the guess right > becomes nontrivial. Being able to be off by one thus makes the > attacker's job a little bit easier. You may want to be more specific what "off by one" means here. While it is true that an attacker which isn't able to generate proper CRLF for some reason might be stopped by such a restriction, it still needs to ensure there is an LF, which makes things mostly equivalent in almost all cases. > Given that many popular HTTP implementations (Apache httpd, Node, > Boost::Beast, Lighttpd) adhere to the standard on this line > termination issue, we should expect this change to break almost no > clients, since any client generating requests that terminate chunk > lines with bare LF would already be incompatible with a large portion > of the web. > > It is, however, also true that many HTTP implementations (Go net/http, > H2O, LiteSpeed) exhibit the same behavior as Nginx, and it's probably > worth exploring why that is. > > The following patch changes Nginx's parsing behavior to match the > standard. Note that this patch does not stop Nginx from allowing bare > LF in request lines, response lines, headers, or trailers. It stops > Nginx from accepting bare LF only in chunk size and data lines, where > the standard does not permit LF/CRLF permissiveness. It's also a > delete-only patch, which is always nice :) > > If you all are open to this change, it will also be necessary to fix > up the many LF-delimited chunks that are present within the test > suite. No, thanks. There are little-to-no benefits from such a change, but the change will needlessly complicate testing, including manual testing such as with nc(1). -- Maxim Dounin http://mdounin.ru/ From Austin.Mayerhofer at forcepoint.com Thu Jan 25 18:24:05 2024 From: Austin.Mayerhofer at forcepoint.com (Mayerhofer, Austin) Date: Thu, 25 Jan 2024 18:24:05 +0000 Subject: [EXTERNAL] Re: Nginx-tests stream_ssl_conf_command.t test hanging indefinitely In-Reply-To: References: Message-ID: Hey Sergey, Thanks for the help. I tried installing perl via homebrew but I ran into some dependency issues setting it up, and by the time I did set it up, it was skipping due to “# SKIP no http_ssl available”. Is there a set of instructions or documentation for setting up a Mac environment for nginx-tests? I might be setting up perl wrong. From: Sergey Kandaurov Date: Wednesday, January 24, 2024 at 2:59 PM To: nginx-devel at nginx.org Cc: Mayerhofer, Austin Subject: [EXTERNAL] Re: Nginx-tests stream_ssl_conf_command.t test hanging indefinitely > On 25 Jan 2024, at 01:15, Mayerhofer, Austin via nginx-devel wrote: > > Hi all, > Apologies if I sent this twice, I don’t think the first one went through because I wasn’t subscribed to the list. > nginx-tests’ stream_ssl_conf_command.t is hanging for me and not running to completion, I’m using the following configuration: > OS: MacOS 12.6.3 > Chip: Apple M1 Max > NGINX: 1.24.0 built from source code with ./configure --with-debug --with-http_ssl_module --with-http_stub_status_module --with-http_v2_module --without-http_auth_basic_module --without-http_autoindex_module --without-http_browser_module --without-http-cache --without-http_charset_module --without-http_empty_gif_module --without-http_fastcgi_module --without-http_grpc_module --without-http_limit_conn_module --without-http_limit_req_module --without-http_memcached_module --without-http_referer_module --without-http_scgi_module --without-http_split_clients_module --without-http_ssi_module --without-http_upstream_hash_module --without-http_upstream_ip_hash_module --without-http_upstream_least_conn_module --without-http_userid_module --without-http_uwsgi_module --with-stream --with-stream_ssl_module --with-stream_ssl_preread_module --without-stream_limit_conn_module --without-stream_set_module --without-stream_split_clients_module --without-stream_upstream_hash_module --without-stream_upstream_least_conn_module --without-stream_upstream_zone_module nginx-tests: https://github.com/nginx/nginx-tests/tree/4c2ad8093952706f327d04887c5546bad91b75a6 > When I run: > ``` > TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v stream_ssl_conf_command.t > ``` > The output is: > ``` > stream_ssl_conf_command.t .. > 1..5 > ok 1 - Certificate > ok 2 - SessionTicket > ok 3 – ServerPreference > ``` > And it hangs there. Perl from macOS base system (/usr/bin/perl) is known to be buggy for some unknown reason. This is expressed in various hangs when running nginx-tests. I saw similar reports since at least macOS 12.3.1, and this is not caused by a particular Perl version (same version from macports works for me). You can try Perl from homebrew or macports collections, it should work just fine. -- Sergey Kandaurov This message has been scanned for malware by Forcepoint. www.forcepoint.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From Austin.Mayerhofer at forcepoint.com Thu Jan 25 18:59:36 2024 From: Austin.Mayerhofer at forcepoint.com (Mayerhofer, Austin) Date: Thu, 25 Jan 2024 18:59:36 +0000 Subject: nginx-tests SSL tests failing out of the box? Message-ID: Hi all, I have not made any changes to NGINX. Vanilla NGINX (./configure with no flags) passes all tests that run, but when compiling with SSL, not all SSL tests are passing. Is this expected, or do I need to configure nginx further aside from adding the --with-http_ssl_module flag? Do each of the failing tests below require separate fixes, or is there a one-size-fits-all solution for all of them? OS: MacOS 12.6.3 Chip: Apple M1 Max NGINX: 1.24.0 built from source code with ./configure --with-debug --with-http_ssl_module Nginx-tests: https://github.com/nginx/nginx-tests/tree/4c2ad8093952706f327d04887c5546bad91b75a6 OpenSSL: 3.2.0 (/opt/homebrew/bin/openssl) Perl: 5.30.3 (/usr/bin/perl) When I run ``` TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v ssl.t ``` I see ``` not ok 2 - session reused # Failed test 'session reused' # at ssl.t line 187. # 'HTTP/1.1 200 OK # Server: nginx/1.24.0 # Date: Thu, 25 Jan 2024 18:50:10 GMT # Content-Type: text/plain # Content-Length: 6 # Connection: close # # body .' # doesn't match '(?^m:^body r$)' ``` When I run ``` TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v ssl_certificate.t ``` I see ``` not ok 9 - session id context match # Failed test 'session id context match' # at ssl_certificate.t line 183. # 'HTTP/1.1 200 OK # Server: nginx/1.24.0 # Date: Thu, 25 Jan 2024 18:52:11 GMT # Content-Type: text/html # Content-Length: 0 # Last-Modified: Thu, 25 Jan 2024 18:52:11 GMT # Connection: close # ETag: "65b2addb-0" # X-SSL: default:. # X-SSL-Protocol: TLSv1.3 # Accept-Ranges: bytes # # ' # doesn't match '(?^:default:r)' ``` And finally running ``` TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v ssl_crl.t ``` Yields ``` not ok 1 - crl - no revoked certs # Failed test 'crl - no revoked certs' # at ssl_crl.t line 157. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Thu, 25 Jan 2024 18:53:50 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: FAILED:unsupported certificate purpose # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^:SUCCESS)' ``` Thanks, Austin This message has been scanned for malware by Forcepoint. www.forcepoint.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From amdeich at gmail.com Thu Jan 25 19:52:58 2024 From: amdeich at gmail.com (Andrey Kulikov) Date: Thu, 25 Jan 2024 22:52:58 +0300 Subject: nginx-tests SSL tests failing out of the box? In-Reply-To: References: Message-ID: Hello, Don't think your issue is specific to OpenSSL 3.2.0 or ARM64 arch. If you specify just --with-http_ssl_module flag, nginx will be compiled with system OpenSSL. What might be not what you expect (OpenSSL: 3.2.0) on MacOS. Try to specify --with-openssl= on nginx configure stage. Like --with-openssl=./../openssl-3.2.0/ for example. On Thu, Jan 25, 2024 at 10:00 PM Mayerhofer, Austin via nginx-devel < nginx-devel at nginx.org> wrote: > Hi all, > > > > I have not made any changes to NGINX. Vanilla NGINX (./configure with no > flags) passes all tests that run, but when compiling with SSL, not all SSL > tests are passing. Is this expected, or do I need to configure nginx > further aside from adding the --with-http_ssl_module flag? Do each of the > failing tests below require separate fixes, or is there a one-size-fits-all > solution for all of them? > > > > OS: MacOS 12.6.3 > > Chip: Apple M1 Max > > NGINX: 1.24.0 built from source code with ./configure --with-debug > --with-http_ssl_module > > Nginx-tests: > https://github.com/nginx/nginx-tests/tree/4c2ad8093952706f327d04887c5546bad91b75a6 > > OpenSSL: 3.2.0 (/opt/homebrew/bin/openssl) > > Perl: 5.30.3 (/usr/bin/perl) > > > > When I run > > > > ``` > > TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v ssl.t > > ``` > > > > I see > > > > ``` > > not ok 2 - session reused > > > > # Failed test 'session reused' > > # at ssl.t line 187. > > # 'HTTP/1.1 200 OK > > # Server: nginx/1.24.0 > > # Date: Thu, 25 Jan 2024 18:50:10 GMT > > # Content-Type: text/plain > > # Content-Length: 6 > > # Connection: close > > # > > # body .' > > # doesn't match '(?^m:^body r$)' > > ``` > > > > > > Thanks, > > Austin > > > This message has been scanned for malware by Forcepoint. > www.forcepoint.com > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From benjamin.p.kallus.gr at dartmouth.edu Thu Jan 25 20:32:17 2024 From: benjamin.p.kallus.gr at dartmouth.edu (Ben Kallus) Date: Thu, 25 Jan 2024 20:32:17 +0000 Subject: [PATCH] Enforce that CR precede LF in chunk lines In-Reply-To: References: Message-ID: > Still, there is a robustness principle which allows applications > to parse requests with various deviations from the grammar. Whether this is principle is good is a matter of opinion. I tend to lean toward thinking that it is not (as you can probably tell) but reasonable minds will differ on this point. > You may want to be more specific what "off by one" means here. Happy to :) Here's an example of a payload that smuggles a request past Apache Traffic Server to a Node.js backend: ``` POST / HTTP/1.1\r\n Transfer-Encoding: chunked\r\n \r\n 2\r\r ;a\r\n 02\r\n 2d\r\n 0\r\n \r\n DELETE / HTTP/1.1\r\n Content-Length: 183\r\n \r\n 0\r\n\r\nGET / HTTP/1.1\r\n\r\n ``` The exact mechanism of this payload is relatively unimportant (it has to do with the `2\r\r;a`). The important point is that the POST is seen by both ATS and Node, the DELETE is seen only by Node, and the GET is seen only by ATS. Thus, the DELETE request is smuggled. (A very similar attack worked on Google Cloud's classic application load balancer, and on Akamai's load balancer until very recently when companies patched the bugs. I'm still working on the writeup for those bugs, but you can see us present the material here: https://yewtu.be/watch?v=aKPAX00ft5s&t=2h19m0s) You'll notice that the DELETE request has a Content-Length header. This is because in order for the smuggling to go undetected, the response to the DELETE request needs to be sent only after the GET request is forwarded. One way to do this is to add a message body to the DELETE request, so that it remains incomplete until the arrival of the GET request. It is therefore necessary for the attacker to predict the length of the GET request after it has passed through the reverse proxy, so that this length can be used to compute the Content-Length (or chunk size) in the DELETE request. Because reverse proxies often modify requests, this is not always straightforward. In this instance, I use a Content-Length header of 183 because with the configuration of ATS that I was attacking, `GET / HTTP/1.1\r\n\r\n` ends up becoming 178 bytes long due to the insertion of X-Forwarded-For, Via, etc., +5 for `0\r\n\r\n`. If I had used a length less than 183, then Node would send a 400 after responding to the DELETE request, which makes the reverse proxy aware that request smuggling has occurred. If I had used a length greater than 183, then Node would time out waiting for the rest of the DELETE request's message body. Thus, I need to guess the length exactly right to pull off undetected request smuggling. Guessing correctly can be challenging, especially when added headers have unpredictable lengths. This is common with CDNs, which often insert random identifiers into request headers. If instead of using Content-Length, I had used a chunked message body to smuggle the DELETE request, and the backend server allows bare LF as a chunk line terminator, then my length guess could be one less than the correct value without invalidating the message for servers that accept bare LF in chunk lines. Thus, when developing future request smuggling attacks, getting my length guess correct is a little easier when the backend server allows bare LF chunk line endings. > including manual testing such > as with nc(1). If you pass the -C flag, nc will translate LF to CRLF for you :) From mdounin at mdounin.ru Thu Jan 25 20:38:57 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 25 Jan 2024 23:38:57 +0300 Subject: nginx-tests SSL tests failing out of the box? In-Reply-To: References: Message-ID: Hello! On Thu, Jan 25, 2024 at 06:59:36PM +0000, Mayerhofer, Austin via nginx-devel wrote: > Hi all, > > I have not made any changes to NGINX. Vanilla NGINX (./configure with no flags) passes all tests that run, but when compiling with SSL, not all SSL tests are passing. Is this expected, or do I need to configure nginx further aside from adding the --with-http_ssl_module flag? Do each of the failing tests below require separate fixes, or is there a one-size-fits-all solution for all of them? > > OS: MacOS 12.6.3 > Chip: Apple M1 Max > NGINX: 1.24.0 built from source code with ./configure --with-debug --with-http_ssl_module > Nginx-tests: https://github.com/nginx/nginx-tests/tree/4c2ad8093952706f327d04887c5546bad91b75a6 > OpenSSL: 3.2.0 (/opt/homebrew/bin/openssl) > Perl: 5.30.3 (/usr/bin/perl) > > When I run > > ``` > TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v ssl.t > ``` > > I see > > ``` > not ok 2 - session reused > > # Failed test 'session reused' > # at ssl.t line 187. > # 'HTTP/1.1 200 OK > # Server: nginx/1.24.0 > # Date: Thu, 25 Jan 2024 18:50:10 GMT > # Content-Type: text/plain > # Content-Length: 6 > # Connection: close > # > # body .' > # doesn't match '(?^m:^body r$)' > ``` [...] It looks like SSL session reuse is broken in Perl you are using. This might be the case if, for example, Net::SSLeay in your installation was compiled with system LibreSSL as an SSL library - at least on the server side LibreSSL simply does not support session reuse with TLSv1.3. Test suite checks if nginx was compiled with LibreSSL and marks appropriate tests as TODO, but if the Perl module is broken instead, the test will fail. -- Maxim Dounin http://mdounin.ru/ From xeioex at nginx.com Thu Jan 25 23:01:32 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Thu, 25 Jan 2024 23:01:32 +0000 Subject: [njs] Fixed fuzzer build after 9b3dac56fd8a. Message-ID: details: https://hg.nginx.org/njs/rev/6770c015efdc branches: changeset: 2273:6770c015efdc user: Dmitry Volyntsev date: Thu Jan 25 14:48:05 2024 -0800 description: Fixed fuzzer build after 9b3dac56fd8a. diffstat: external/njs_shell.c | 60 +++++++++++++++++++++++---------------------------- 1 files changed, 27 insertions(+), 33 deletions(-) diffs (120 lines): diff -r 9b3dac56fd8a -r 6770c015efdc external/njs_shell.c --- a/external/njs_shell.c Tue Jan 23 16:34:10 2024 -0800 +++ b/external/njs_shell.c Thu Jan 25 14:48:05 2024 -0800 @@ -137,8 +137,7 @@ static njs_int_t njs_process_script(njs_ #ifndef NJS_FUZZER_TARGET static njs_int_t njs_options_parse(njs_opts_t *opts, int argc, char **argv); -static njs_int_t njs_options_add_path(njs_opts_t *opts, u_char *path, - size_t len); +static njs_int_t njs_options_add_path(njs_opts_t *opts, char *path, size_t len); static void njs_options_free(njs_opts_t *opts); #ifdef NJS_HAVE_READLINE @@ -400,7 +399,8 @@ done: static njs_int_t njs_options_parse(njs_opts_t *opts, int argc, char **argv) { - char *p; + char *p, *start; + size_t len; njs_int_t i, ret; njs_uint_t n; @@ -447,6 +447,27 @@ njs_options_parse(njs_opts_t *opts, int opts->exit_code = atoi(p); } + start = getenv("NJS_PATH"); + if (start != NULL) { + for ( ;; ) { + p = (char *) njs_strchr(start, ':'); + + len = (p != NULL) ? (size_t) (p - start) : njs_strlen(start); + + ret = njs_options_add_path(opts, start, len); + if (ret != NJS_OK) { + njs_stderror("failed to add path\n"); + return NJS_ERROR; + } + + if (p == NULL) { + break; + } + + start = p + 1; + } + } + for (i = 1; i < argc; i++) { p = argv[i]; @@ -526,8 +547,7 @@ njs_options_parse(njs_opts_t *opts, int case 'p': if (++i < argc) { - ret = njs_options_add_path(opts, (u_char *) argv[i], - njs_strlen(argv[i])); + ret = njs_options_add_path(opts, argv[i], njs_strlen(argv[i])); if (ret != NJS_OK) { njs_stderror("failed to add path\n"); return NJS_ERROR; @@ -604,7 +624,7 @@ done: static njs_int_t -njs_options_add_path(njs_opts_t *opts, u_char *path, size_t len) +njs_options_add_path(njs_opts_t *opts, char *path, size_t len) { njs_str_t *paths; @@ -617,7 +637,7 @@ njs_options_add_path(njs_opts_t *opts, u } opts->paths = paths; - opts->paths[opts->n_paths - 1].start = path; + opts->paths[opts->n_paths - 1].start = (u_char *) path; opts->paths[opts->n_paths - 1].length = len; return NJS_OK; @@ -1004,10 +1024,7 @@ njs_module_loader(njs_vm_t *vm, njs_exte static njs_vm_t * njs_create_vm(njs_opts_t *opts) { - size_t len; - u_char *p, *start; njs_vm_t *vm; - njs_int_t ret; njs_vm_opt_t vm_options; njs_vm_opt_init(&vm_options); @@ -1053,29 +1070,6 @@ njs_create_vm(njs_opts_t *opts) njs_vm_set_module_loader(vm, njs_module_loader, opts); - start = (u_char *) getenv("NJS_PATH"); - if (start == NULL) { - return vm; - } - - for ( ;; ) { - p = njs_strchr(start, ':'); - - len = (p != NULL) ? (size_t) (p - start) : njs_strlen(start); - - ret = njs_options_add_path(opts, start, len); - if (ret != NJS_OK) { - njs_stderror("failed to add path\n"); - return NULL; - } - - if (p == NULL) { - break; - } - - start = p + 1; - } - return vm; } From Austin.Mayerhofer at forcepoint.com Thu Jan 25 23:20:22 2024 From: Austin.Mayerhofer at forcepoint.com (Mayerhofer, Austin) Date: Thu, 25 Jan 2024 23:20:22 +0000 Subject: nginx-tests SSL tests failing out of the box? In-Reply-To: References: Message-ID: Hey Andrey, Thanks for the help. I rebuilt NGINX with OpenSSL 3.0.8 sources, but the same tests still fail. Here is the output of nginx configuration: nginx version: nginx/1.24.0 built by clang 14.0.0 (clang-1400.0.29.202) built with OpenSSL 3.0.8 7 Feb 2023 TLS SNI support enabled configure arguments: --with-debug --with-http_ssl_module --with-openssl= Are these SSL tests supposed to be failing with these configure arguments? From: Andrey Kulikov Date: Thursday, January 25, 2024 at 11:53 AM To: nginx-devel at nginx.org Cc: Mayerhofer, Austin Subject: [EXTERNAL] Re: nginx-tests SSL tests failing out of the box? Hello, Don't think your issue is specific to OpenSSL 3.2.0 or ARM64 arch. If you specify just --with-http_ssl_module flag, nginx will be compiled with system OpenSSL. What might be not what you expect (OpenSSL: 3.2.0) on MacOS. Try to specify --with-openssl= on nginx configure stage. Like --with-openssl=./../openssl-3.2.0/ for example. On Thu, Jan 25, 2024 at 10:00 PM Mayerhofer, Austin via nginx-devel > wrote: Hi all, I have not made any changes to NGINX. Vanilla NGINX (./configure with no flags) passes all tests that run, but when compiling with SSL, not all SSL tests are passing. Is this expected, or do I need to configure nginx further aside from adding the --with-http_ssl_module flag? Do each of the failing tests below require separate fixes, or is there a one-size-fits-all solution for all of them? OS: MacOS 12.6.3 Chip: Apple M1 Max NGINX: 1.24.0 built from source code with ./configure --with-debug --with-http_ssl_module Nginx-tests: https://github.com/nginx/nginx-tests/tree/4c2ad8093952706f327d04887c5546bad91b75a6 OpenSSL: 3.2.0 (/opt/homebrew/bin/openssl) Perl: 5.30.3 (/usr/bin/perl) When I run ``` TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v ssl.t ``` I see ``` not ok 2 - session reused # Failed test 'session reused' # at ssl.t line 187. # 'HTTP/1.1 200 OK # Server: nginx/1.24.0 # Date: Thu, 25 Jan 2024 18:50:10 GMT # Content-Type: text/plain # Content-Length: 6 # Connection: close # # body .' # doesn't match '(?^m:^body r$)' ``` Thanks, Austin This message has been scanned for malware by Forcepoint. www.forcepoint.com _______________________________________________ nginx-devel mailing list nginx-devel at nginx.org https://mailman.nginx.org/mailman/listinfo/nginx-devel Click here to report this email as spam. -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Thu Jan 25 23:32:15 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 26 Jan 2024 02:32:15 +0300 Subject: [PATCH] Enforce that CR precede LF in chunk lines In-Reply-To: References: Message-ID: Hello! On Thu, Jan 25, 2024 at 08:32:17PM +0000, Ben Kallus wrote: > > Still, there is a robustness principle which allows applications > > to parse requests with various deviations from the grammar. > > Whether this is principle is good is a matter of opinion. I tend to > lean toward thinking that it is not (as you can probably tell) but > reasonable minds will differ on this point. > > > You may want to be more specific what "off by one" means here. > > Happy to :) > > Here's an example of a payload that smuggles a request past Apache > Traffic Server to a Node.js backend: > ``` > POST / HTTP/1.1\r\n > Transfer-Encoding: chunked\r\n > \r\n > 2\r\r > ;a\r\n > 02\r\n > 2d\r\n > 0\r\n > \r\n > DELETE / HTTP/1.1\r\n > Content-Length: 183\r\n > \r\n > 0\r\n\r\nGET / HTTP/1.1\r\n\r\n > ``` > The exact mechanism of this payload is relatively unimportant (it has > to do with the `2\r\r;a`). The important point is that the POST is > seen by both ATS and Node, the DELETE is seen only by Node, and the > GET is seen only by ATS. Thus, the DELETE request is smuggled. > > (A very similar attack worked on Google Cloud's classic application > load balancer, and on Akamai's load balancer until very recently when > companies patched the bugs. I'm still working on the writeup for those > bugs, but you can see us present the material here: > https://yewtu.be/watch?v=aKPAX00ft5s&t=2h19m0s) > > You'll notice that the DELETE request has a Content-Length header. > This is because in order for the smuggling to go undetected, the > response to the DELETE request needs to be sent only after the GET > request is forwarded. One way to do this is to add a message body to > the DELETE request, so that it remains incomplete until the arrival of > the GET request. It is therefore necessary for the attacker to predict > the length of the GET request after it has passed through the reverse > proxy, so that this length can be used to compute the Content-Length > (or chunk size) in the DELETE request. Because reverse proxies often > modify requests, this is not always straightforward. > > In this instance, I use a Content-Length header of 183 because with > the configuration of ATS that I was attacking, `GET / > HTTP/1.1\r\n\r\n` ends up becoming 178 bytes long due to the insertion > of X-Forwarded-For, Via, etc., +5 for `0\r\n\r\n`. If I had used a > length less than 183, then Node would send a 400 after responding to > the DELETE request, which makes the reverse proxy aware that request > smuggling has occurred. If I had used a length greater than 183, then > Node would time out waiting for the rest of the DELETE request's > message body. Thus, I need to guess the length exactly right to pull > off undetected request smuggling. Guessing correctly can be > challenging, especially when added headers have unpredictable lengths. > This is common with CDNs, which often insert random identifiers into > request headers. > > If instead of using Content-Length, I had used a chunked message body > to smuggle the DELETE request, and the backend server allows bare LF > as a chunk line terminator, then my length guess could be one less > than the correct value without invalidating the message for servers > that accept bare LF in chunk lines. Thus, when developing future > request smuggling attacks, getting my length guess correct is a little > easier when the backend server allows bare LF chunk line endings. As far as I understand what goes on here and what do you mean by using a chunked message body, with length guess which is one less than the correct value you'll end up with LF + "\r\n0\r\n\r\n" at the request end, which will result in 400. Length which is one more will work though, so understood, thanks. In the original exploit length which is two less should work though, due to the "SHOULD ignore at least one empty line (CRLF) received prior to the request-line" robustness exception in 2.2. Message parsing of RFC 9112. And one less might also work, as long as empty lines before the request-line accept bare LF (not sure about Node though). Overall, I don't think there is a big difference here. > > including manual testing such > > as with nc(1). > > If you pass the -C flag, nc will translate LF to CRLF for you :) It won't, because "-C" is a non-portable flag provided by a Debian-specific patch. And even if it will work for some, this will still complicate testing. -- Maxim Dounin http://mdounin.ru/ From Austin.Mayerhofer at forcepoint.com Fri Jan 26 01:14:56 2024 From: Austin.Mayerhofer at forcepoint.com (Mayerhofer, Austin) Date: Fri, 26 Jan 2024 01:14:56 +0000 Subject: Nginx-tests stream_ssl_conf_command.t test hanging indefinitely In-Reply-To: References: Message-ID: Hey Maxim, Thanks, I installed homebrew’s Perl and all these tests are passing now, woohoo! However a few others are failing now including ssl_ocsp.t and ssl_verify_depth.t, failing 13/17 and 3/11 tests respectively with the same error: ``` # Failed test 'verify depth 2 - end' # at ssl_verify_depth.t line 169. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 01:08:10 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Client: CN=end # X-Verify: FAILED:unsuitable certificate purpose # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^:SUCCESS)' ``` Originally the SSL tests were being skipped due to the absence of “socket_ssl”, so I had to manually install IO::Socket::SSL using cpan: ``` "Module" IO::Socket::SSL * "installed into: /opt/homebrew/Cellar/perl/5.38.2_1/lib/perl5/site_perl/5.38" * "LINKTYPE: dynamic" * "VERSION: 2.085" * "EXE_FILES: " ``` Could the error above be another perl issue? From: Mayerhofer, Austin Date: Thursday, January 25, 2024 at 10:24 AM To: Sergey Kandaurov , nginx-devel at nginx.org Subject: Re: [EXTERNAL] Re: Nginx-tests stream_ssl_conf_command.t test hanging indefinitely Hey Sergey, Thanks for the help. I tried installing perl via homebrew but I ran into some dependency issues setting it up, and by the time I did set it up, it was skipping due to “# SKIP no http_ssl available”. Is there a set of instructions or documentation for setting up a Mac environment for nginx-tests? I might be setting up perl wrong. From: Sergey Kandaurov Date: Wednesday, January 24, 2024 at 2:59 PM To: nginx-devel at nginx.org Cc: Mayerhofer, Austin Subject: [EXTERNAL] Re: Nginx-tests stream_ssl_conf_command.t test hanging indefinitely > On 25 Jan 2024, at 01:15, Mayerhofer, Austin via nginx-devel wrote: > > Hi all, > Apologies if I sent this twice, I don’t think the first one went through because I wasn’t subscribed to the list. > nginx-tests’ stream_ssl_conf_command.t is hanging for me and not running to completion, I’m using the following configuration: > OS: MacOS 12.6.3 > Chip: Apple M1 Max > NGINX: 1.24.0 built from source code with ./configure --with-debug --with-http_ssl_module --with-http_stub_status_module --with-http_v2_module --without-http_auth_basic_module --without-http_autoindex_module --without-http_browser_module --without-http-cache --without-http_charset_module --without-http_empty_gif_module --without-http_fastcgi_module --without-http_grpc_module --without-http_limit_conn_module --without-http_limit_req_module --without-http_memcached_module --without-http_referer_module --without-http_scgi_module --without-http_split_clients_module --without-http_ssi_module --without-http_upstream_hash_module --without-http_upstream_ip_hash_module --without-http_upstream_least_conn_module --without-http_userid_module --without-http_uwsgi_module --with-stream --with-stream_ssl_module --with-stream_ssl_preread_module --without-stream_limit_conn_module --without-stream_set_module --without-stream_split_clients_module --without-stream_upstream_hash_module --without-stream_upstream_least_conn_module --without-stream_upstream_zone_module nginx-tests: https://github.com/nginx/nginx-tests/tree/4c2ad8093952706f327d04887c5546bad91b75a6 > When I run: > ``` > TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v stream_ssl_conf_command.t > ``` > The output is: > ``` > stream_ssl_conf_command.t .. > 1..5 > ok 1 - Certificate > ok 2 - SessionTicket > ok 3 – ServerPreference > ``` > And it hangs there. Perl from macOS base system (/usr/bin/perl) is known to be buggy for some unknown reason. This is expressed in various hangs when running nginx-tests. I saw similar reports since at least macOS 12.3.1, and this is not caused by a particular Perl version (same version from macports works for me). You can try Perl from homebrew or macports collections, it should work just fine. -- Sergey Kandaurov This message has been scanned for malware by Forcepoint. www.forcepoint.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From arut at nginx.com Fri Jan 26 07:03:31 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Fri, 26 Jan 2024 11:03:31 +0400 Subject: [PATCH] Introduced worker_shutdown_idle_delay In-Reply-To: References: Message-ID: <20240126070331.wzis3t7vbsksrgda@N00W24XTQX> Hi Artem, On Wed, Nov 08, 2023 at 07:51:39AM +0300, Artem Pogartsev via nginx-devel wrote: > # HG changeset patch > # User Artem Pogartsev > # Date 1699416491 -10800 > # Wed Nov 08 07:08:11 2023 +0300 > # Node ID eb0dd3d903431f4dd7a62d629db457ecddeadd96 > # Parent 7ec761f0365f418511e30b82e9adf80bc56681df > Introduced worker_shutdown_idle_delay. > > The directive configures a delay before closing idle connections to be used > when gracefully shutting down worker processes. When the timer expires, nginx > will attempt to close any remaining idle connections. > > The delay is only added for connections with the "shutdown_delay" flag, and > currently it's only keepalive HTTP/1.1 connections. The behavior is not changed > for protocols that have built-in graceful shutdown mechanisms, such as GOAWAY > frames in HTTP2 and HTTP3. > > Although it's perfectly fine to close an HTTP/1.1 connection at any time > according to RFC 9112, it may still be useful to delay closing a connection, > wait for the next client request, and close the connection with a > "Connection: close" header to avoid unnecessary retries by clients. This is > especially important for environments with frequent nginx reloads and large > amounts of non-idempotent requests which are quite problematic for automatic > retries. > > Should be used carefully to not delay configuration reloads too much (and thus > increase nginx resource usage), and ideally in combination with properly > configured clients: [..] I suggest a simpler patch which disables idle mode for HTTP keepalive connections. Such connections will not be closed until one of the timeouts (keepalive_timeout or worker_shutdown_timeout) expires or when a new request arrives and receives 'Connection: close'. -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1706205992 -14400 # Thu Jan 25 22:06:32 2024 +0400 # Node ID 165096c37f91640042e93f115b7281a3c690bc9d # Parent d9bc503a4c39d29480e640347e0b992d83c792bc Added "keepalive_worker_shutdown" directive. The directive allows disabling idle mode for HTTP keepalive connections. Such a connection will not be closed on worker shutdown until a new request is received on that connection or one of keepalive_timeout/worker_shutdown_timeout expires. By default idle mode is enabled, as before. diff --git a/src/http/ngx_http_core_module.c b/src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c +++ b/src/http/ngx_http_core_module.c @@ -523,6 +523,13 @@ static ngx_command_t ngx_http_core_comm offsetof(ngx_http_core_loc_conf_t, keepalive_disable), &ngx_http_core_keepalive_disable }, + { ngx_string("keepalive_worker_shutdown"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, + ngx_conf_set_flag_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_core_loc_conf_t, keepalive_shutdown), + NULL }, + { ngx_string("satisfy"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, ngx_conf_set_enum_slot, @@ -3607,6 +3614,7 @@ ngx_http_core_create_loc_conf(ngx_conf_t clcf->keepalive_timeout = NGX_CONF_UNSET_MSEC; clcf->keepalive_header = NGX_CONF_UNSET; clcf->keepalive_requests = NGX_CONF_UNSET_UINT; + clcf->keepalive_shutdown = NGX_CONF_UNSET; clcf->lingering_close = NGX_CONF_UNSET_UINT; clcf->lingering_time = NGX_CONF_UNSET_MSEC; clcf->lingering_timeout = NGX_CONF_UNSET_MSEC; @@ -3897,6 +3905,7 @@ ngx_http_core_merge_loc_conf(ngx_conf_t ngx_conf_merge_value(conf->chunked_transfer_encoding, prev->chunked_transfer_encoding, 1); ngx_conf_merge_value(conf->etag, prev->etag, 1); + ngx_conf_merge_value(conf->keepalive_shutdown, prev->keepalive_shutdown, 1); ngx_conf_merge_uint_value(conf->server_tokens, prev->server_tokens, NGX_HTTP_SERVER_TOKENS_ON); diff --git a/src/http/ngx_http_core_module.h b/src/http/ngx_http_core_module.h --- a/src/http/ngx_http_core_module.h +++ b/src/http/ngx_http_core_module.h @@ -407,6 +407,7 @@ struct ngx_http_core_loc_conf_s { ngx_uint_t server_tokens; /* server_tokens */ ngx_flag_t chunked_transfer_encoding; /* chunked_transfer_encoding */ ngx_flag_t etag; /* etag */ + ngx_flag_t keepalive_shutdown; /* keepalive_worker_shutdown */ #if (NGX_HTTP_GZIP) ngx_flag_t gzip_vary; /* gzip_vary */ diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c +++ b/src/http/ngx_http_request.c @@ -3302,7 +3302,10 @@ ngx_http_set_keepalive(ngx_http_request_ r->http_state = NGX_HTTP_KEEPALIVE_STATE; #endif - c->idle = 1; + if (clcf->keepalive_shutdown) { + c->idle = 1; + } + ngx_reusable_connection(c, 1); ngx_add_timer(rev, clcf->keepalive_timeout); From pluknet at nginx.com Fri Jan 26 10:36:29 2024 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 26 Jan 2024 14:36:29 +0400 Subject: [PATCH] SSL: fixed $ssl_curves allocation error handling Message-ID: <2f70dd17c16461f833ea.1706265389@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1706265240 -14400 # Fri Jan 26 14:34:00 2024 +0400 # Node ID 2f70dd17c16461f833eafec2dcf9193557bfb176 # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 SSL: fixed $ssl_curves allocation error handling. diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -5187,6 +5187,9 @@ ngx_ssl_get_curves(ngx_connection_t *c, } curves = ngx_palloc(pool, n * sizeof(int)); + if (curves == NULL) { + return NGX_ERROR; + } n = SSL_get1_curves(c->ssl->connection, curves); len = 0; From arut at nginx.com Fri Jan 26 12:02:30 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Fri, 26 Jan 2024 16:02:30 +0400 Subject: [PATCH 1 of 4] Fixed request termination with AIO and subrequests (ticket #2555) In-Reply-To: References: Message-ID: <20240126120230.45y2unpnlbzll4ru@N00W24XTQX> Hi, On Mon, Nov 27, 2023 at 05:50:24AM +0300, Maxim Dounin wrote: > # HG changeset patch > # User Maxim Dounin > # Date 1701049682 -10800 > # Mon Nov 27 04:48:02 2023 +0300 > # Node ID a5e39e9d1f4c84dcbe6a2f9e079372a3d63aef0b > # Parent f366007dd23a6ce8e8427c1b3042781b618a2ade > Fixed request termination with AIO and subrequests (ticket #2555). > > When a request was terminated due to an error via ngx_http_terminate_request() > while an AIO operation was running in a subrequest, various issues were > observed. This happened because ngx_http_request_finalizer() was only set > in the subrequest where ngx_http_terminate_request() was called, but not > in the subrequest where the AIO operation was running. After completion > of the AIO operation resumed normal processing of the subrequest, leading > to issues. Something is wrong with the last sentence. > In particular, in case of the upstream module, termination of the request > called upstream cleanup, which closed the upstream connection. Attempts to > further work with the upstream connection after AIO operation completion > resulted in segfaults in ngx_ssl_recv(), "readv() failed (9: Bad file > descriptor) while reading upstream" errors, or socket leaks. Can you elaborate on socket leaks? > In ticket #2555, issues were observed with the following configuration > with cache background update (with thread writing instrumented to > introduce a delay, when a client closes the connection during an update): > > location = /background-and-aio-write { > proxy_pass ... > proxy_cache one; > proxy_cache_valid 200 1s; > proxy_cache_background_update on; > proxy_cache_use_stale updating; > aio threads; > aio_write on; > limit_rate 1000; > } > > Similarly, the same issue can be seen with SSI, and can be caused by > errors in subrequests, such as in the following configuration > (were "/proxy" uses AIO, and "/sleep" returns 444 after some delay, s/were/where/ ? > causing request termination): > > location = /ssi-active-boom { > ssi on; > ssi_types *; > return 200 ' > > > '; > limit_rate 1000; > } > > Or the same with both AIO operation and the error in non-active subrequests > (which needs slightly different handling, see below): > > location = /ssi-non-active-boom { > ssi on; > ssi_types *; > return 200 ' > > > > '; > limit_rate 1000; > } > > Similarly, issues can be observed with just static files. However, > with static files potential impact is limited due to timeout safeguards > in ngx_http_writer(), and the fact that c->error is set during request > termination. > In a simple configuration with an AIO operation in the active subrequest, > such as in the following configuration, the connection is closed right > after completion of the AIO operation anyway, since ngx_http_writer() > tries to write to the connection and fails due to c->error set: > > location = /ssi-active-static-boom { > ssi on; > ssi_types *; > return 200 ' > > > '; > limit_rate 1000; > } > > In the following configuration, with an AIO operation in a non-active > subrequest, the connection is closed only after send_timeout expires: > > location = /ssi-non-active-static-boom { > ssi on; > ssi_types *; > return 200 ' > > > > '; > limit_rate 1000; > } > > Fix is to introduce r->main->terminated flag, which is to be checked > by AIO event handlers when the r->main->blocked counter is decremented. > When the flag is set, handlers are expected to wake up the connection > instead of the subrequest (which might be already cleaned up). > > Additionally, now ngx_http_request_finalizer() is always set in the > active subrequest, so waking up the connection properly finalizes the > request even if termination happened in a non-active subrequest. The issue does not seem to be significant for static file. In fact, the biggest problem is trying to use a resource after it was freed by an ngx_http_cleanup_add()-registered handler, as opposed to ngx_pool_cleanup_add() handlers which are safer, but serve a slightly different purpose. As for non-ngx_http_cleanup_add() related code (like static files), the effect of the issue is just a possible delay of the connection closure until output is produced, in which case typically ngx_http_write_filter() triggers the closure. So the patch basically fixes a time delay (usually limited by a timeout). IMO there's no need to go in so many details about that. > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > --- a/src/http/ngx_http_copy_filter_module.c > +++ b/src/http/ngx_http_copy_filter_module.c > @@ -195,9 +195,18 @@ ngx_http_copy_aio_event_handler(ngx_even > r->main->blocked--; > r->aio = 0; > > - r->write_event_handler(r); > + if (r->main->terminated) { > + /* > + * trigger connection event handler if the request was > + * terminated > + */ > > - ngx_http_run_posted_requests(c); > + c->write->handler(c->write); > + > + } else { > + r->write_event_handler(r); > + ngx_http_run_posted_requests(c); > + } > } > > #endif > @@ -305,11 +314,11 @@ ngx_http_copy_thread_event_handler(ngx_e > > #endif > > - if (r->done) { > + if (r->done || r->main->terminated) { > /* > * trigger connection event handler if the subrequest was > - * already finalized; this can happen if the handler is used > - * for sendfile() in threads > + * already finalized (this can happen if the handler is used > + * for sendfile() in threads), or if the request was terminated > */ > > c->write->handler(c->write); > diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c > --- a/src/http/ngx_http_file_cache.c > +++ b/src/http/ngx_http_file_cache.c > @@ -14,7 +14,7 @@ > static ngx_int_t ngx_http_file_cache_lock(ngx_http_request_t *r, > ngx_http_cache_t *c); > static void ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev); > -static void ngx_http_file_cache_lock_wait(ngx_http_request_t *r, > +static ngx_int_t ngx_http_file_cache_lock_wait(ngx_http_request_t *r, > ngx_http_cache_t *c); > static ngx_int_t ngx_http_file_cache_read(ngx_http_request_t *r, > ngx_http_cache_t *c); > @@ -463,6 +463,7 @@ ngx_http_file_cache_lock(ngx_http_reques > static void > ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev) > { > + ngx_int_t rc; > ngx_connection_t *c; > ngx_http_request_t *r; > > @@ -474,13 +475,31 @@ ngx_http_file_cache_lock_wait_handler(ng > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > "http file cache wait: \"%V?%V\"", &r->uri, &r->args); > > - ngx_http_file_cache_lock_wait(r, r->cache); > - > - ngx_http_run_posted_requests(c); > + rc = ngx_http_file_cache_lock_wait(r, r->cache); > + > + if (rc == NGX_AGAIN) { > + return; > + } > + > + r->cache->waiting = 0; > + r->main->blocked--; > + > + if (r->main->terminated) { > + /* > + * trigger connection event handler if the request was > + * terminated > + */ > + > + c->write->handler(c->write); > + > + } else { > + r->write_event_handler(r); > + ngx_http_run_posted_requests(c); > + } > } BTW, cache lock is not a real aio. It's just a regular event timer. And it's deleted in ngx_http_file_cache_free() which is called from ngx_http_upstream_finalize_request(). So it looks like the "terminated" flag will never be 1 here. > -static void > +static ngx_int_t > ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c) > { > ngx_uint_t wait; > @@ -495,7 +514,7 @@ ngx_http_file_cache_lock_wait(ngx_http_r > ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, > "cache lock timeout"); > c->lock_timeout = 0; > - goto wakeup; > + return NGX_OK; > } > > cache = c->file_cache; > @@ -513,14 +532,10 @@ ngx_http_file_cache_lock_wait(ngx_http_r > > if (wait) { > ngx_add_timer(&c->wait_event, (timer > 500) ? 500 : timer); > - return; > + return NGX_AGAIN; > } > > -wakeup: > - > - c->waiting = 0; > - r->main->blocked--; > - r->write_event_handler(r); > + return NGX_OK; > } > > > @@ -740,9 +755,18 @@ ngx_http_cache_aio_event_handler(ngx_eve > r->main->blocked--; > r->aio = 0; > > - r->write_event_handler(r); > - > - ngx_http_run_posted_requests(c); > + if (r->main->terminated) { > + /* > + * trigger connection event handler if the request was > + * terminated > + */ > + > + c->write->handler(c->write); > + > + } else { > + r->write_event_handler(r); > + ngx_http_run_posted_requests(c); > + } > } > > #endif > @@ -810,9 +834,18 @@ ngx_http_cache_thread_event_handler(ngx_ > r->main->blocked--; > r->aio = 0; > > - r->write_event_handler(r); > - > - ngx_http_run_posted_requests(c); > + if (r->main->terminated) { > + /* > + * trigger connection event handler if the request was > + * terminated > + */ > + > + c->write->handler(c->write); > + > + } else { > + r->write_event_handler(r); > + ngx_http_run_posted_requests(c); > + } > } > > #endif > diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c > --- a/src/http/ngx_http_request.c > +++ b/src/http/ngx_http_request.c > @@ -2681,6 +2681,8 @@ ngx_http_terminate_request(ngx_http_requ > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, > "http terminate request count:%d", mr->count); > > + mr->terminated = 1; Another solution could be skipping the cleanup handlers below if mr->blocked is set. This would fix the crash, but would not fix the delay though. > if (rc > 0 && (mr->headers_out.status == 0 || mr->connection->sent == 0)) { > mr->headers_out.status = rc; > } > @@ -2703,8 +2705,13 @@ ngx_http_terminate_request(ngx_http_requ > if (mr->write_event_handler) { > > if (mr->blocked) { > + if (r != r->connection->data) { > + r = r->connection->data; > + } Why not simply r = r->connection->data. Or maybe a new variable ar (active request) similar to mr (main request) would make sense. > + > r->connection->error = 1; > r->write_event_handler = ngx_http_request_finalizer; > + > return; > } > > diff --git a/src/http/ngx_http_request.h b/src/http/ngx_http_request.h > --- a/src/http/ngx_http_request.h > +++ b/src/http/ngx_http_request.h > @@ -550,6 +550,7 @@ struct ngx_http_request_s { > unsigned root_tested:1; > unsigned done:1; > unsigned logged:1; > + unsigned terminated:1; > > unsigned buffered:4; > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c > +++ b/src/http/ngx_http_upstream.c > @@ -3984,11 +3984,11 @@ ngx_http_upstream_thread_event_handler(n > > #endif > > - if (r->done) { > + if (r->done || r->main->terminated) { > /* > * trigger connection event handler if the subrequest was > - * already finalized; this can happen if the handler is used > - * for sendfile() in threads > + * already finalized (this can happen if the handler is used > + * for sendfile() in threads), or if the request was terminated > */ > > c->write->handler(c->write); > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel The patch is generally ok. -- Roman Arutynyan From pluknet at nginx.com Fri Jan 26 12:26:00 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 26 Jan 2024 16:26:00 +0400 Subject: [PATCH 2 of 4] Upstream: fixed usage of closed sockets with filter finalization In-Reply-To: References: Message-ID: > On 27 Nov 2023, at 06:50, Maxim Dounin wrote: > > # HG changeset patch > # User Maxim Dounin > # Date 1701049758 -10800 > # Mon Nov 27 04:49:18 2023 +0300 > # Node ID faf0b9defc76b8683af466f8a950c2c241382970 > # Parent a5e39e9d1f4c84dcbe6a2f9e079372a3d63aef0b > Upstream: fixed usage of closed sockets with filter finalization. > > When filter finalization is triggered when working with an upstream server, > and error_page redirects request processing to some simple handler, > ngx_http_request_finalize() triggers request termination when the response > is sent. In particular, via the upstream cleanup handler, nginx will close > the upstream connection and the corresponding socket. > > Still, this can happen to be with ngx_event_pipe() on stack. While > the code will set p->downstream_error due to NGX_ERROR returned from the > output filter chain by filter finalization, otherwise the error will be > ignored till control returns to ngx_http_upstream_process_request(). > And event pipe might try reading from the (already closed) socket, resulting > in "readv() failed (9: Bad file descriptor) while reading upstream" errors > (or even segfaults with SSL). > > Such errors were seen with the following configuration: > > location /t2 { > proxy_pass http://127.0.0.1:8080/big; > > image_filter_buffer 10m; > image_filter resize 150 100; > error_page 415 = /empty; > } > > location /empty { > return 204; > } > > location /big { > # big enough static file > } > > Fix is to set p->upstream_error in ngx_http_upstream_finalize_request(), > so the existing checks in ngx_event_pipe_read_upstream() will prevent > further reading from the closed upstream connection. > > Similarly, p->upstream_error is now checked when handling events at > ngx_event_pipe() exit, as checking p->upstream->fd is not enough if > keepalive upstream connections are being used and the connection was > saved to cache during request termination. > Setting p->upstream_error in ngx_http_upstream_finalize_request() may look suspicious, because it is used to be set on connection errors such as upstream timeout or recv error, or, as a recently introduced exception in the fastcgi module, - also when the FastCGI record ends prematurely, before receiving all the expected content. But technically I think this is quite correct, because we no longer want to receive further data, and also (and you mention this in the commit log) this repeats closing an upstream connection socket in the same place in ngx_http_upstream_finalize_request(). So I think it should be fine. > diff --git a/src/event/ngx_event_pipe.c b/src/event/ngx_event_pipe.c > --- a/src/event/ngx_event_pipe.c > +++ b/src/event/ngx_event_pipe.c > @@ -57,7 +57,9 @@ ngx_event_pipe(ngx_event_pipe_t *p, ngx_ > do_write = 1; > } > > - if (p->upstream->fd != (ngx_socket_t) -1) { > + if (p->upstream->fd != (ngx_socket_t) -1 > + && !p->upstream_error) > + { > rev = p->upstream->read; > > flags = (rev->eof || rev->error) ? NGX_CLOSE_EVENT : 0; > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c > +++ b/src/http/ngx_http_upstream.c > @@ -4561,6 +4561,10 @@ ngx_http_upstream_finalize_request(ngx_h > > u->peer.connection = NULL; > > + if (u->pipe) { > + u->pipe->upstream_error = 1; > + } > + > if (u->pipe && u->pipe->temp_file) { > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, > "http upstream temp fd: %d", Looks good. -- Sergey Kandaurov From pluknet at nginx.com Fri Jan 26 12:26:21 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 26 Jan 2024 16:26:21 +0400 Subject: [PATCH 3 of 4] Silenced complaints about socket leaks on forced termination In-Reply-To: <61d08e4cf97cc073200e.1701053426@vm-bsd.mdounin.ru> References: <61d08e4cf97cc073200e.1701053426@vm-bsd.mdounin.ru> Message-ID: <65A3D86F-FAE9-405A-990D-F1EA14D64D8E@nginx.com> > On 27 Nov 2023, at 06:50, Maxim Dounin wrote: > > # HG changeset patch > # User Maxim Dounin > # Date 1701049787 -10800 > # Mon Nov 27 04:49:47 2023 +0300 > # Node ID 61d08e4cf97cc073200ec32fc6ada9a2d48ffe51 > # Parent faf0b9defc76b8683af466f8a950c2c241382970 > Silenced complaints about socket leaks on forced termination. > > When graceful shutdown was requested, and then nginx was forced to > do fast shutdown, it used to (incorrectly) complain about open sockets > left in connections which weren't yet closed when fast shutdown > was requested. > > Fix is to avoid complaining about open sockets when fast shutdown was > requested after graceful one. Abnormal termination, if requested with > the WINCH signal, can still happen though. I've been wondering about such IMHO odd behaviour and support the fix. There might be an opinion that once you requested graceful shutdown, you have to wait until it's done, but I think that requesting fast shutdown afterwards should be legitimate. > > diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c > --- a/src/os/unix/ngx_process_cycle.c > +++ b/src/os/unix/ngx_process_cycle.c > @@ -948,7 +948,7 @@ ngx_worker_process_exit(ngx_cycle_t *cyc > } > } > > - if (ngx_exiting) { > + if (ngx_exiting && !ngx_terminate) { > c = cycle->connections; > for (i = 0; i < cycle->connection_n; i++) { > if (c[i].fd != -1 > @@ -963,11 +963,11 @@ ngx_worker_process_exit(ngx_cycle_t *cyc > ngx_debug_quit = 1; > } > } > + } > > - if (ngx_debug_quit) { > - ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "aborting"); > - ngx_debug_point(); > - } > + if (ngx_debug_quit) { > + ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "aborting"); > + ngx_debug_point(); > } > > /* > diff --git a/src/os/win32/ngx_process_cycle.c b/src/os/win32/ngx_process_cycle.c > --- a/src/os/win32/ngx_process_cycle.c > +++ b/src/os/win32/ngx_process_cycle.c > @@ -834,7 +834,7 @@ ngx_worker_process_exit(ngx_cycle_t *cyc > } > } > > - if (ngx_exiting) { > + if (ngx_exiting && !ngx_terminate) { > c = cycle->connections; > for (i = 0; i < cycle->connection_n; i++) { > if (c[i].fd != (ngx_socket_t) -1 I think it's fine. -- Sergey Kandaurov From pluknet at nginx.com Fri Jan 26 12:27:30 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 26 Jan 2024 16:27:30 +0400 Subject: [PATCH 4 of 4] AIO operations now add timers (ticket #2162) In-Reply-To: <00c3e7333145ddb5ea0e.1701053427@vm-bsd.mdounin.ru> References: <00c3e7333145ddb5ea0e.1701053427@vm-bsd.mdounin.ru> Message-ID: > On 27 Nov 2023, at 06:50, Maxim Dounin wrote: > > # HG changeset patch > # User Maxim Dounin > # Date 1701050170 -10800 > # Mon Nov 27 04:56:10 2023 +0300 > # Node ID 00c3e7333145ddb5ea0eeaaa66b3d9c26973c9c2 > # Parent 61d08e4cf97cc073200ec32fc6ada9a2d48ffe51 > AIO operations now add timers (ticket #2162). > > Each AIO (thread IO) operation being run is now accompanied with 1-minute > timer. This timer prevents unexpected shutdown of the worker process while > an AIO operation is running, and logs an alert if the operation is running > for too long. > > This fixes "open socket left" alerts during worker processes shutdown > due to pending AIO (or thread IO) operations while corresponding requests > have no timers. In particular, such errors were observed while reading > cache headers (ticket #2162), and with worker_shutdown_timeout. > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > --- a/src/http/ngx_http_copy_filter_module.c > +++ b/src/http/ngx_http_copy_filter_module.c > @@ -170,6 +170,8 @@ ngx_http_copy_aio_handler(ngx_output_cha > file->aio->data = r; > file->aio->handler = ngx_http_copy_aio_event_handler; > > + ngx_add_timer(&file->aio->event, 60000); > + > r->main->blocked++; > r->aio = 1; > ctx->aio = 1; > @@ -192,6 +194,17 @@ ngx_http_copy_aio_event_handler(ngx_even > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > "http aio: \"%V?%V\"", &r->uri, &r->args); > > + if (ev->timedout) { > + ngx_log_error(NGX_LOG_ALERT, c->log, 0, > + "aio operation took too long"); > + ev->timedout = 0; > + return; > + } > + > + if (ev->timer_set) { > + ngx_del_timer(ev); > + } > + > r->main->blocked--; > r->aio = 0; > > @@ -273,6 +286,8 @@ ngx_http_copy_thread_handler(ngx_thread_ > return NGX_ERROR; > } > > + ngx_add_timer(&task->event, 60000); > + > r->main->blocked++; > r->aio = 1; > > @@ -297,6 +312,17 @@ ngx_http_copy_thread_event_handler(ngx_e > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > "http thread: \"%V?%V\"", &r->uri, &r->args); > > + if (ev->timedout) { > + ngx_log_error(NGX_LOG_ALERT, c->log, 0, > + "thread operation took too long"); > + ev->timedout = 0; > + return; > + } > + > + if (ev->timer_set) { > + ngx_del_timer(ev); > + } > + > r->main->blocked--; > r->aio = 0; > > diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c > --- a/src/http/ngx_http_file_cache.c > +++ b/src/http/ngx_http_file_cache.c > @@ -705,6 +705,8 @@ ngx_http_file_cache_aio_read(ngx_http_re > c->file.aio->data = r; > c->file.aio->handler = ngx_http_cache_aio_event_handler; > > + ngx_add_timer(&c->file.aio->event, 60000); > + > r->main->blocked++; > r->aio = 1; > > @@ -752,6 +754,17 @@ ngx_http_cache_aio_event_handler(ngx_eve > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > "http file cache aio: \"%V?%V\"", &r->uri, &r->args); > > + if (ev->timedout) { > + ngx_log_error(NGX_LOG_ALERT, c->log, 0, > + "aio operation took too long"); > + ev->timedout = 0; > + return; > + } > + > + if (ev->timer_set) { > + ngx_del_timer(ev); > + } > + > r->main->blocked--; > r->aio = 0; > > @@ -810,6 +823,8 @@ ngx_http_cache_thread_handler(ngx_thread > return NGX_ERROR; > } > > + ngx_add_timer(&task->event, 60000); > + > r->main->blocked++; > r->aio = 1; > > @@ -831,6 +846,17 @@ ngx_http_cache_thread_event_handler(ngx_ > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > "http file cache thread: \"%V?%V\"", &r->uri, &r->args); > > + if (ev->timedout) { > + ngx_log_error(NGX_LOG_ALERT, c->log, 0, > + "thread operation took too long"); > + ev->timedout = 0; > + return; > + } > + > + if (ev->timer_set) { > + ngx_del_timer(ev); > + } > + > r->main->blocked--; > r->aio = 0; > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c > +++ b/src/http/ngx_http_upstream.c > @@ -3949,6 +3949,8 @@ ngx_http_upstream_thread_handler(ngx_thr > r->aio = 1; > p->aio = 1; > > + ngx_add_timer(&task->event, 60000); > + > return NGX_OK; > } > > @@ -3967,6 +3969,17 @@ ngx_http_upstream_thread_event_handler(n > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > "http upstream thread: \"%V?%V\"", &r->uri, &r->args); > > + if (ev->timedout) { > + ngx_log_error(NGX_LOG_ALERT, c->log, 0, > + "thread operation took too long"); > + ev->timedout = 0; > + return; > + } > + > + if (ev->timer_set) { > + ngx_del_timer(ev); > + } > + > r->main->blocked--; > r->aio = 0; > > diff --git a/src/os/unix/ngx_files.c b/src/os/unix/ngx_files.c > --- a/src/os/unix/ngx_files.c > +++ b/src/os/unix/ngx_files.c > @@ -110,6 +110,8 @@ ngx_thread_read(ngx_file_t *file, u_char > return NGX_ERROR; > } > > + task->event.log = file->log; > + > file->thread_task = task; > } > > @@ -493,6 +495,8 @@ ngx_thread_write_chain_to_file(ngx_file_ > return NGX_ERROR; > } > > + task->event.log = file->log; > + > file->thread_task = task; > } > > diff --git a/src/os/unix/ngx_linux_sendfile_chain.c b/src/os/unix/ngx_linux_sendfile_chain.c > --- a/src/os/unix/ngx_linux_sendfile_chain.c > +++ b/src/os/unix/ngx_linux_sendfile_chain.c > @@ -332,6 +332,7 @@ ngx_linux_sendfile_thread(ngx_connection > return NGX_ERROR; > } > > + task->event.log = c->log; > task->handler = ngx_linux_sendfile_thread_handler; > > c->sendfile_task = task; Looks good to me. -- Sergey Kandaurov From pluknet at nginx.com Fri Jan 26 17:29:58 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 26 Jan 2024 21:29:58 +0400 Subject: nginx-tests SSL tests failing out of the box? In-Reply-To: References: Message-ID: <20240126172958.qqcmvtqo7rws7iwe@Y9MQ9X2QVV> On Thu, Jan 25, 2024 at 11:38:57PM +0300, Maxim Dounin wrote: > Hello! > > On Thu, Jan 25, 2024 at 06:59:36PM +0000, Mayerhofer, Austin via nginx-devel wrote: > > > Hi all, > > > > I have not made any changes to NGINX. Vanilla NGINX (./configure with no flags) passes all tests that run, but when compiling with SSL, not all SSL tests are passing. Is this expected, or do I need to configure nginx further aside from adding the --with-http_ssl_module flag? Do each of the failing tests below require separate fixes, or is there a one-size-fits-all solution for all of them? > > > > OS: MacOS 12.6.3 > > Chip: Apple M1 Max > > NGINX: 1.24.0 built from source code with ./configure --with-debug --with-http_ssl_module > > Nginx-tests: https://github.com/nginx/nginx-tests/tree/4c2ad8093952706f327d04887c5546bad91b75a6 > > OpenSSL: 3.2.0 (/opt/homebrew/bin/openssl) > > Perl: 5.30.3 (/usr/bin/perl) > > > > When I run > > > > ``` > > TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v ssl.t > > ``` > > > > I see > > > > ``` > > not ok 2 - session reused > > > > # Failed test 'session reused' > > # at ssl.t line 187. > > # 'HTTP/1.1 200 OK > > # Server: nginx/1.24.0 > > # Date: Thu, 25 Jan 2024 18:50:10 GMT > > # Content-Type: text/plain > > # Content-Length: 6 > > # Connection: close > > # > > # body .' > > # doesn't match '(?^m:^body r$)' > > ``` > > [...] > > It looks like SSL session reuse is broken in Perl you are > using. This might be the case if, for example, Net::SSLeay in > your installation was compiled with system LibreSSL as an SSL > library - at least on the server side LibreSSL simply does not > support session reuse with TLSv1.3. > > Test suite checks if nginx was compiled with LibreSSL and marks > appropriate tests as TODO, but if the Perl module is broken > instead, the test will fail. > Well, technically, we could test this and skip appropriately: diff --git a/ssl_session_reuse.t b/ssl_session_reuse.t --- a/ssl_session_reuse.t +++ b/ssl_session_reuse.t @@ -166,7 +166,9 @@ local $TODO = 'no TLSv1.3 sessions, old local $TODO = 'no TLSv1.3 sessions, old IO::Socket::SSL' if $IO::Socket::SSL::VERSION < 2.061 && test_tls13(); local $TODO = 'no TLSv1.3 sessions in LibreSSL' - if $t->has_module('LibreSSL') && test_tls13(); + if ($t->has_module('LibreSSL') + || Net::SSLeay::constant("LIBRESSL_VERSION_NUMBER")) + && test_tls13(); is(test_reuse(8443), 1, 'tickets reused'); is(test_reuse(8444), 1, 'tickets and cache reused'); But I see little to no purpose: if the testing tool is broken in various unexpected ways (another example is X509_V_ERR_INVALID_PURPOSE in peer certificate verification as reported in the adjacent thread), I think we barely can handle this in general. From garga at FreeBSD.org Fri Jan 26 19:28:15 2024 From: garga at FreeBSD.org (Renato Botelho) Date: Fri, 26 Jan 2024 16:28:15 -0300 Subject: nginx-tests: Some SSL tests are failing with openssl 3.2.0 Message-ID: <68c9e6c9-70b6-4ac0-b9ad-4b04ece6e311@FreeBSD.org> Hello! I'm building nginx on an environment with openssl 3.2.0 and some SSL tests are failing. I suspect it's related to openssl version because it works on another env with older openssl. But maybe I'm wrong. Here are results ./access.t ................................. ok ./access_log.t ............................. ok ./access_log_variables.t ................... ok ./addition.t ............................... ok ./addition_buffered.t ...................... ok ./auth_basic.t ............................. ok ./auth_delay.t ............................. ok ./auth_request.t ........................... ok ./auth_request_satisfy.t ................... ok ./auth_request_set.t ....................... ok ./autoindex.t .............................. ok ./autoindex_format.t ....................... ok ./autoindex_win32.t ........................ skipped: Win32API::File not installed ./binary_upgrade.t ......................... skipped: can leave orphaned process group ./body.t ................................... ok ./body_chunked.t ........................... ok ./charset.t ................................ ok ./charset_gzip_static.t .................... ok ./config_dump.t ............................ ok ./dav.t .................................... ok ./dav_chunked.t ............................ ok ./dav_utf8.t ............................... ok ./debug_connection.t ....................... skipped: no --with-debug available ./debug_connection_syslog.t ................ skipped: no --with-debug available ./debug_connection_unix.t .................. skipped: no --with-debug available ./empty_gif.t .............................. ok ./error_log.t .............................. ok ./fastcgi.t ................................ skipped: FCGI not installed ./fastcgi_body.t ........................... ok ./fastcgi_body2.t .......................... skipped: FCGI not installed ./fastcgi_buffering.t ...................... skipped: FCGI not installed ./fastcgi_cache.t .......................... skipped: FCGI not installed ./fastcgi_extra_data.t ..................... skipped: FCGI not installed ./fastcgi_header_params.t .................. skipped: FCGI not installed ./fastcgi_keepalive.t ...................... ok ./fastcgi_merge_params.t ................... skipped: FCGI not installed ./fastcgi_merge_params2.t .................. skipped: FCGI not installed ./fastcgi_request_buffering.t .............. skipped: FCGI not installed ./fastcgi_request_buffering_chunked.t ...... skipped: FCGI not installed ./fastcgi_split.t .......................... skipped: FCGI not installed ./fastcgi_unix.t ........................... skipped: FCGI not installed ./fastcgi_variables.t ...................... skipped: FCGI not installed ./geo.t .................................... ok ./geo_binary.t ............................. skipped: long configuration parsing ./geo_ipv6.t ............................... ok ./geo_unix.t ............................... ok ./geoip.t .................................. skipped: no http_geoip available ./grpc.t ................................... ok ./grpc_next_upstream.t ..................... ok ./grpc_pass.t .............................. ok ./grpc_request_buffering.t ................. ok ./grpc_ssl.t ............................... ok ./gunzip.t ................................. ok ./gunzip_memcached.t ....................... skipped: Cache::Memcached not installed ./gunzip_perl.t ............................ skipped: no perl available ./gunzip_ssi.t ............................. ok ./gunzip_static.t .......................... ok ./gzip.t ................................... ok ./gzip_flush.t ............................. skipped: no perl available ./h2.t ..................................... ok ./h2_absolute_redirect.t ................... ok ./h2_auth_request.t ........................ ok ./h2_error_page.t .......................... ok ./h2_fastcgi_request_buffering.t ........... ok ./h2_headers.t ............................. ok ./h2_http2.t ............................... skipped: no http2 ./h2_keepalive.t ........................... ok ./h2_limit_conn.t .......................... ok ./h2_limit_req.t ........................... ok ./h2_priority.t ............................ ok ./h2_proxy_cache.t ......................... ok ./h2_proxy_max_temp_file_size.t ............ ok ./h2_proxy_protocol.t ...................... ok ./h2_proxy_request_buffering.t ............. ok ./h2_proxy_request_buffering_redirect.t .... ok ./h2_proxy_request_buffering_ssl.t ......... ok ./h2_proxy_ssl.t ........................... ok ./h2_request_body.t ........................ ok ./h2_request_body_extra.t .................. ok ./h2_request_body_preread.t ................ ok ./h2_server_tokens.t ....................... ok ./h2_ssl.t ................................. ok ./h2_ssl_proxy_cache.t ..................... ok ./h2_ssl_proxy_protocol.t .................. ok ./h2_ssl_variables.t ....................... ok ./h2_ssl_verify_client.t ................... ok ./h2_trailers.t ............................ ok ./h2_variables.t ........................... ok ./h3_absolute_redirect.t ................... skipped: no http_v3 available ./h3_headers.t ............................. skipped: no http_v3 available ./h3_keepalive.t ........................... skipped: no http_v3 available ./h3_limit_conn.t .......................... skipped: no http_v3 available ./h3_limit_req.t ........................... skipped: no http_v3 available ./h3_proxy.t ............................... skipped: no http_v3 available ./h3_proxy_max_temp_file_size.t ............ skipped: no http_v3 available ./h3_reusable.t ............................ skipped: no http_v3 available ./h3_server_name.t ......................... skipped: no http_v3 available ./h3_server_tokens.t ....................... skipped: no http_v3 available ./h3_ssl_early_data.t ...................... skipped: no http_v3 available ./h3_ssl_reject_handshake.t ................ skipped: no http_v3 available ./h3_ssl_session_reuse.t ................... skipped: no http_v3 available ./h3_trailers.t ............................ skipped: no http_v3 available ./headers.t ................................ ok ./http_absolute_redirect.t ................. ok ./http_disable_symlinks.t .................. skipped: no external file found ./http_error_page.t ........................ ok ./http_expect_100_continue.t ............... ok ./http_header_buffers.t .................... ok ./http_headers_multi.t ..................... ok ./http_host.t .............................. ok ./http_include.t ........................... ok ./http_keepalive.t ......................... ok ./http_keepalive_shutdown.t ................ ok ./http_listen.t ............................ ok ./http_listen_wildcard.t ................... skipped: listen on wildcard address ./http_location.t .......................... ok ./http_location_auto.t ..................... ok ./http_location_win32.t .................... skipped: not win32 ./http_method.t ............................ ok ./http_resolver.t .......................... ok ./http_resolver_aaaa.t ..................... ok ./http_resolver_cleanup.t .................. ok ./http_resolver_cname.t .................... ok ./http_resolver_ipv4.t ..................... ok ./http_server_name.t ....................... ok ./http_try_files.t ......................... ok ./http_uri.t ............................... ok ./http_variables.t ......................... ok ./ignore_invalid_headers.t ................. ok ./image_filter.t ........................... skipped: GD not installed ./image_filter_finalize.t .................. skipped: no image_filter available ./image_filter_webp.t ...................... skipped: no image_filter available ./index.t .................................. ok ./limit_conn.t ............................. ok ./limit_conn_complex.t ..................... ok ./limit_conn_dry_run.t ..................... ok ./limit_rate.t ............................. ok ./limit_req.t .............................. ok ./limit_req2.t ............................. ok ./limit_req_delay.t ........................ ok ./limit_req_dry_run.t ...................... ok ./mail_capability.t ........................ skipped: no imap available ./mail_error_log.t ......................... skipped: no imap available ./mail_imap.t .............................. skipped: no imap available ./mail_imap_ssl.t .......................... skipped: no imap available ./mail_max_errors.t ........................ skipped: no imap available ./mail_pop3.t .............................. skipped: no pop3 available ./mail_proxy_protocol.t .................... skipped: no smtp available ./mail_proxy_smtp_auth.t ................... skipped: no smtp available ./mail_resolver.t .......................... skipped: no smtp available ./mail_smtp.t .............................. skipped: no smtp available ./mail_smtp_greeting_delay.t ............... skipped: no smtp available ./mail_smtp_xclient.t ...................... skipped: no smtp available ./mail_ssl.t ............................... skipped: no imap available ./mail_ssl_conf_command.t .................. skipped: no imap available ./mail_ssl_session_reuse.t ................. skipped: no imap available ./map.t .................................... ok ./map_complex.t ............................ ok ./map_volatile.t ........................... ok ./memcached.t .............................. skipped: Cache::Memcached not installed ./memcached_fake.t ......................... ok ./memcached_fake_extra.t ................... ok ./memcached_keepalive.t .................... skipped: Cache::Memcached not installed ./memcached_keepalive_stale.t .............. skipped: Cache::Memcached not installed ./merge_slashes.t .......................... ok ./mirror.t ................................. ok ./mirror_proxy.t ........................... ok ./mp4.t .................................... ok ./mp4_ssi.t ................................ ok ./mp4_start_key_frame.t .................... ok ./msie_refresh.t ........................... ok ./not_modified.t ........................... ok ./not_modified_finalize.t .................. ok ./not_modified_proxy.t ..................... ok ./perl.t ................................... skipped: no perl available ./perl_gzip.t .............................. skipped: no perl available ./perl_sleep.t ............................. skipped: no perl available ./perl_ssi.t ............................... skipped: no perl available ./post_action.t ............................ ok ./proxy.t .................................. ok ./proxy_available.t ........................ ok ./proxy_bind.t ............................. ok ./proxy_bind_transparent.t ................. skipped: must be root ./proxy_bind_transparent_capability.t ...... skipped: must be root ./proxy_cache.t ............................ ok ./proxy_cache_bypass.t ..................... ok ./proxy_cache_chunked.t .................... ok ./proxy_cache_control.t .................... ok ./proxy_cache_convert_head.t ............... ok ./proxy_cache_error.t ...................... ok ./proxy_cache_lock.t ....................... ok ./proxy_cache_lock_age.t ................... ok ./proxy_cache_lock_ssi.t ................... ok ./proxy_cache_manager.t .................... skipped: long test ./proxy_cache_max_range_offset.t ........... ok ./proxy_cache_min_free.t ................... ok ./proxy_cache_path.t ....................... ok ./proxy_cache_range.t ...................... ok ./proxy_cache_revalidate.t ................. ok ./proxy_cache_use_stale.t .................. ok ./proxy_cache_valid.t ...................... ok ./proxy_cache_variables.t .................. ok ./proxy_cache_vary.t ....................... ok ./proxy_chunked.t .......................... ok ./proxy_chunked_extra.t .................... ok ./proxy_cookie.t ........................... ok ./proxy_cookie_flags.t ..................... ok ./proxy_duplicate_headers.t ................ ok ./proxy_extra_data.t ....................... ok ./proxy_force_ranges.t ..................... ok ./proxy_if.t ............................... ok ./proxy_implicit.t ......................... ok ./proxy_intercept_errors.t ................. ok ./proxy_keepalive.t ........................ ok ./proxy_limit_rate.t ....................... ok ./proxy_max_temp_file_size.t ............... ok ./proxy_merge_headers.t .................... ok ./proxy_method.t ........................... ok ./proxy_next_upstream.t .................... ok ./proxy_next_upstream_tries.t .............. ok ./proxy_noclose.t .......................... ok ./proxy_non_idempotent.t ................... ok ./proxy_pass_request.t ..................... ok ./proxy_protocol.t ......................... ok ./proxy_protocol2.t ........................ ok ./proxy_protocol2_tlv.t .................... ok ./proxy_protocol_ipv6.t .................... ok ./proxy_protocol_unix.t .................... ok ./proxy_redirect.t ......................... ok ./proxy_request_buffering.t ................ ok ./proxy_request_buffering_chunked.t ........ ok ./proxy_request_buffering_keepalive.t ...... ok ./proxy_request_buffering_ssl.t ............ ok ./proxy_set_body.t ......................... ok ./proxy_ssi_body.t ......................... ok ./proxy_ssl.t .............................. ok ./proxy_ssl_certificate.t .................. ok ./proxy_ssl_certificate_empty.t ............ ok ./proxy_ssl_certificate_vars.t ............. ok ./proxy_ssl_conf_command.t ................. ok ./proxy_ssl_keepalive.t .................... ok ./proxy_ssl_name.t ......................... ok ./proxy_ssl_verify.t ....................... ok ./proxy_store.t ............................ ok ./proxy_unfinished.t ....................... ok ./proxy_unix.t ............................. ok ./proxy_upgrade.t .......................... ok ./proxy_upstream_cookie.t .................. ok ./proxy_variables.t ........................ ok ./proxy_websocket.t ........................ skipped: Protocol::WebSocket not installed ./proxy_xar.t .............................. ok ./quic_ciphers.t ........................... skipped: no http_v3 available ./quic_key_update.t ........................ skipped: no http_v3 available ./quic_migration.t ......................... skipped: no http_v3 available ./quic_retry.t ............................. skipped: no http_v3 available ./random_index.t ........................... ok ./range.t .................................. ok ./range_charset.t .......................... ok ./range_clearing.t ......................... ok ./range_flv.t .............................. ok ./range_if_range.t ......................... ok ./range_mp4.t .............................. ok ./realip.t ................................. ok ./realip_hostname.t ........................ ok ./realip_remote_addr.t ..................... ok ./realip_remote_port.t ..................... ok ./referer.t ................................ ok ./request_id.t ............................. ok ./rewrite.t ................................ ok ./rewrite_if.t ............................. ok ./rewrite_set.t ............................ ok ./rewrite_unescape.t ....................... ok ./scgi.t ................................... skipped: SCGI not installed ./scgi_body.t .............................. skipped: SCGI not installed ./scgi_cache.t ............................. skipped: SCGI not installed ./scgi_extra_data.t ........................ skipped: SCGI not installed ./scgi_gzip.t .............................. skipped: SCGI not installed ./scgi_merge_params.t ...................... skipped: SCGI not installed ./secure_link.t ............................ ok ./server_tokens.t .......................... ok ./slice.t .................................. ok ./split_clients.t .......................... ok ./ssi.t .................................... ok ./ssi_delayed.t ............................ ok ./ssi_if.t ................................. ok ./ssi_include_big.t ........................ ok ./ssi_waited.t ............................. ok ./ssl.t .................................... ok ./ssl_certificate.t ........................ ok # Failed test 'intermediate' # at ./ssl_certificate_chain.t line 137. # Failed test 'intermediate server' # at ./ssl_certificate_chain.t line 138. # Looks like you failed 2 tests of 5. ./ssl_certificate_chain.t .................. Dubious, test returned 2 (wstat 512, 0x200) Failed 2/5 subtests ./ssl_certificate_perl.t ................... skipped: no perl available ./ssl_certificates.t ....................... ok ./ssl_client_escaped_cert.t ................ ok ./ssl_conf_command.t ....................... ok # Failed test 'crl - no revoked certs' # at ./ssl_crl.t line 157. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:36:46 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: FAILED:unsuitable certificate purpose # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^:SUCCESS)' # Looks like you failed 1 test of 5. ./ssl_crl.t ................................ Dubious, test returned 1 (wstat 256, 0x100) Failed 1/5 subtests ./ssl_curve.t .............................. ok ./ssl_engine_keys.t ........................ skipped: may not work, leaves coredump # Failed test 'ocsp leaf' # at ./ssl_ocsp.t line 273. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:36:47 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: xFAILED:unsuitable certificate purpose:.x # X-SSL-Protocol: TLSv1.3 # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^s:200 OK.*SUCCESS)' # Failed test 'ocsp many failed request' # at ./ssl_ocsp.t line 277. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:36:47 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: xFAILED:unsuitable certificate purpose:.x # X-SSL-Protocol: TLSv1.3 # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^s:400 Bad.*FAILED:certificate status request failed)' # Failed test 'ocsp many failed' # at ./ssl_ocsp.t line 283. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:36:47 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: xFAILED:unsuitable certificate purpose:.x # X-SSL-Protocol: TLSv1.3 # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^s:400 Bad.*FAILED:certificate status request failed)' # Failed test 'ocsp many' # at ./ssl_ocsp.t line 299. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:36:47 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: xFAILED:unsuitable certificate purpose:.x # X-SSL-Protocol: TLSv1.3 # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^s:200 OK.*SUCCESS)' # Failed test 'cache store' # at ./ssl_ocsp.t line 303. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:36:47 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: xFAILED:unsuitable certificate purpose:.x # X-SSL-Protocol: TLSv1.3 # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^s:200 OK.*SUCCESS)' # Failed test 'revoked' # at ./ssl_ocsp.t line 322. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:36:47 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: xFAILED:unsuitable certificate purpose:.x # X-SSL-Protocol: TLSv1.3 # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^s:400 Bad.*FAILED:certificate revoked)' # Failed test 'ocsp responder' # at ./ssl_ocsp.t line 326. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:36:47 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: xFAILED:unsuitable certificate purpose:.x # X-SSL-Protocol: TLSv1.3 # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^s:200 OK.*SUCCESS)' # Failed test 'ocsp context' # at ./ssl_ocsp.t line 330. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:36:47 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: xFAILED:unsuitable certificate purpose:.x # X-SSL-Protocol: TLSv1.3 # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^s:200 OK.*SUCCESS)' # Failed test 'cache lookup' # at ./ssl_ocsp.t line 334. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:36:47 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: xFAILED:unsuitable certificate purpose:.x # X-SSL-Protocol: TLSv1.3 # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^s:200 OK.*SUCCESS)' # Failed test 'root ca not trusted' # at ./ssl_ocsp.t line 338. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:36:47 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: xFAILED:unsuitable certificate purpose:.x # X-SSL-Protocol: TLSv1.3 # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^s:400 Bad.*FAILED:certificate status request failed)' # Failed test 'ocsp ecdsa' # at ./ssl_ocsp.t line 350. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:36:47 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: xFAILED:unsuitable certificate purpose:.x # X-SSL-Protocol: TLSv1.3 # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^s:200 OK.*SUCCESS)' # Failed test 'session reused' # at ./ssl_ocsp.t line 362. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:36:47 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: xFAILED:unsuitable certificate purpose:.x # X-SSL-Protocol: TLSv1.3 # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^s:200 OK.*SUCCESS:r)' # Failed test 'session reused - revoked' # at ./ssl_ocsp.t line 394. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:36:47 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: xFAILED:unsuitable certificate purpose:.x # X-SSL-Protocol: TLSv1.3 # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^s:400 Bad.*FAILED:certificate revoked:r)' # Looks like you failed 13 tests of 17. ./ssl_ocsp.t ............................... Dubious, test returned 13 (wstat 3328, 0xd00) Failed 13/17 subtests ./ssl_password_file.t ...................... ok ./ssl_proxy_protocol.t ..................... ok ./ssl_proxy_upgrade.t ...................... ok ./ssl_reject_handshake.t ................... ok ./ssl_session_reuse.t ...................... ok ./ssl_session_ticket_key.t ................. ok ./ssl_sni.t ................................ ok ./ssl_sni_reneg.t .......................... ok ./ssl_sni_sessions.t ....................... ok ./ssl_stapling.t ........................... ok ./ssl_verify_client.t ...................... ok # Failed test 'verify depth 1 - int' # at ./ssl_verify_depth.t line 161. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:37:01 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Client: CN=int # X-Verify: FAILED:unsuitable certificate purpose # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^:SUCCESS)' # Failed test 'verify depth 2 - int' # at ./ssl_verify_depth.t line 168. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:37:01 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Client: CN=int # X-Verify: FAILED:unsuitable certificate purpose # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^:SUCCESS)' # Failed test 'verify depth 2 - end' # at ./ssl_verify_depth.t line 169. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.24.0 # Date: Fri, 26 Jan 2024 11:37:01 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Client: CN=end # X-Verify: FAILED:unsuitable certificate purpose # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.24.0
# # # ' # doesn't match '(?^:SUCCESS)' # Looks like you failed 3 tests of 11. ./ssl_verify_depth.t ....................... Dubious, test returned 3 (wstat 768, 0x300) Failed 3/11 subtests ./stream_access.t .......................... ok ./stream_access_log.t ...................... ok ./stream_access_log_escape.t ............... ok ./stream_access_log_none.t ................. ok ./stream_error_log.t ....................... ok ./stream_geo.t ............................. ok ./stream_geo_binary.t ...................... skipped: long configuration parsing ./stream_geo_ipv6.t ........................ ok ./stream_geo_unix.t ........................ ok ./stream_geoip.t ........................... skipped: no stream_geoip available ./stream_limit_conn.t ...................... ok ./stream_limit_conn_complex.t .............. ok ./stream_limit_conn_dry_run.t .............. ok ./stream_limit_rate.t ...................... ok ./stream_limit_rate2.t ..................... ok ./stream_map.t ............................. ok ./stream_proxy.t ........................... ok ./stream_proxy_bind.t ...................... ok ./stream_proxy_complex.t ................... ok ./stream_proxy_half_close.t ................ ok ./stream_proxy_next_upstream.t ............. ok ./stream_proxy_protocol.t .................. ok ./stream_proxy_protocol2_tlv.t ............. ok ./stream_proxy_protocol_ipv6.t ............. ok ./stream_proxy_protocol_ssl.t .............. ok ./stream_proxy_ssl.t ....................... ok ./stream_proxy_ssl_certificate.t ........... ok ./stream_proxy_ssl_certificate_vars.t ...... ok ./stream_proxy_ssl_conf_command.t .......... ok ./stream_proxy_ssl_name.t .................. ok ./stream_proxy_ssl_name_complex.t .......... ok ./stream_proxy_ssl_verify.t ................ ok ./stream_realip.t .......................... ok ./stream_realip_hostname.t ................. ok ./stream_resolver.t ........................ ok ./stream_set.t ............................. ok ./stream_split_clients.t ................... ok ./stream_ssl.t ............................. ok ./stream_ssl_alpn.t ........................ ok ./stream_ssl_certificate.t ................. ok ./stream_ssl_conf_command.t ................ ok ./stream_ssl_preread.t ..................... ok ./stream_ssl_preread_alpn.t ................ ok ./stream_ssl_preread_protocol.t ............ ok ./stream_ssl_realip.t ...................... ok ./stream_ssl_session_reuse.t ............... ok ./stream_ssl_variables.t ................... ok ./stream_ssl_verify_client.t ............... ok ./stream_status_variable.t ................. ok ./stream_tcp_nodelay.t ..................... ok ./stream_udp_limit_conn.t .................. ok ./stream_udp_limit_rate.t .................. ok ./stream_udp_proxy.t ....................... ok ./stream_udp_proxy_requests.t .............. ok ./stream_udp_stream.t ...................... ok ./stream_udp_upstream.t .................... ok ./stream_udp_upstream_hash.t ............... ok ./stream_udp_upstream_least_conn.t ......... ok ./stream_udp_wildcard.t .................... skipped: listen on wildcard address ./stream_unix.t ............................ ok ./stream_upstream.t ........................ ok ./stream_upstream_hash.t ................... ok ./stream_upstream_least_conn.t ............. ok ./stream_upstream_max_conns.t .............. ok ./stream_upstream_random.t ................. ok ./stream_upstream_zone.t ................... ok ./stream_upstream_zone_ssl.t ............... ok ./stream_variables.t ....................... ok ./stub_status.t ............................ ok ./sub_filter.t ............................. ok ./sub_filter_buffering.t ................... ok ./sub_filter_merge.t ....................... ok ./sub_filter_multi.t ....................... ok ./sub_filter_multi2.t ...................... ok ./sub_filter_perl.t ........................ skipped: no perl available ./sub_filter_slice.t ....................... ok ./sub_filter_ssi.t ......................... skipped: no xslt available ./subrequest_output_buffer_size.t .......... ok ./syslog.t ................................. ok ./trailers.t ............................... ok ./upstream.t ............................... ok ./upstream_hash.t .......................... ok ./upstream_hash_memcached.t ................ skipped: Cache::Memcached not installed ./upstream_ip_hash.t ....................... ok ./upstream_ip_hash_ipv6.t .................. ok ./upstream_keepalive.t ..................... ok ./upstream_least_conn.t .................... ok ./upstream_max_conns.t ..................... ok ./upstream_random.t ........................ ok ./upstream_zone.t .......................... ok ./upstream_zone_ssl.t ...................... ok ./userid.t ................................. ok ./userid_flags.t ........................... ok ./uwsgi.t .................................. skipped: uwsgi not found ./uwsgi_body.t ............................. skipped: uwsgi not found ./uwsgi_ssl.t .............................. skipped: uwsgi not found ./uwsgi_ssl_certificate.t .................. ok ./uwsgi_ssl_certificate_vars.t ............. ok ./uwsgi_ssl_verify.t ....................... skipped: uwsgi not found ./worker_shutdown_timeout.t ................ ok ./worker_shutdown_timeout_h2.t ............. ok ./worker_shutdown_timeout_mail.t ........... skipped: no imap available ./worker_shutdown_timeout_proxy_upgrade.t .. ok ./worker_shutdown_timeout_stream.t ......... ok ./xslt.t ................................... skipped: no xslt available ./xslt_params.t ............................ skipped: no xslt available Test Summary Report ------------------- ./ssl_certificate_chain.t (Wstat: 512 (exited 2) Tests: 5 Failed: 2) Failed tests: 2-3 Non-zero exit status: 2 ./ssl_crl.t (Wstat: 256 (exited 1) Tests: 5 Failed: 1) Failed test: 1 Non-zero exit status: 1 ./ssl_ocsp.t (Wstat: 3328 (exited 13) Tests: 17 Failed: 13) Failed tests: 1-13 Non-zero exit status: 13 ./ssl_verify_depth.t (Wstat: 768 (exited 3) Tests: 11 Failed: 3) Failed tests: 5, 8-9 Non-zero exit status: 3 Files=416, Tests=4620, 423 wallclock secs ( 2.27 usr 0.88 sys + 78.05 cusr 14.15 csys = 95.35 CPU) Result: FAIL -- Renato Botelho From jordanc.carter at outlook.com Fri Jan 26 20:36:19 2024 From: jordanc.carter at outlook.com (J Carter) Date: Fri, 26 Jan 2024 20:36:19 +0000 Subject: nginx-tests: Some SSL tests are failing with openssl 3.2.0 In-Reply-To: <68c9e6c9-70b6-4ac0-b9ad-4b04ece6e311@FreeBSD.org> References: <68c9e6c9-70b6-4ac0-b9ad-4b04ece6e311@FreeBSD.org> Message-ID: Hello, On Fri, 26 Jan 2024 16:28:15 -0300 Renato Botelho wrote: > Hello! > > I'm building nginx on an environment with openssl 3.2.0 and some SSL > tests are failing. I suspect it's related to openssl version because it > works on another env with older openssl. But maybe I'm wrong. > > Here are results [..] I was able to reproduce exact same result on Arch Linux (also Openssl 3.2.0, so looks likely). ./ssl_certificate_chain.t .................. Dubious, test returned 2 (wstat 512, 0x200) Failed 2/5 subtests ./ssl_certificate.t ........................ ok ./ssl_conf_command.t ....................... ok ./ssl_certificates.t ....................... ok ./ssl_engine_keys.t ........................ skipped: may not work, leaves coredump ./ssl_client_escaped_cert.t ................ ok ./ssl_proxy_protocol.t ..................... skipped: no realip available ===( 2096;27 1/3 4/23 1/5 1/3 0/? 0/5 0/32 0/9 )=========== # Failed test 'crl - no revoked certs' # at ./ssl_crl.t line 157. # 'HTTP/1.1 400 Bad Request # Server: nginx/1.25.4 # Date: Fri, 26 Jan 2024 20:26:41 GMT # Content-Type: text/html # Content-Length: 215 # Connection: close # X-Verify: FAILED:unsuitable certificate purpose # # # 400 The SSL certificate error # #

400 Bad Request

#
The SSL certificate error
#
nginx/1.25.4
# # # ' # doesn't match '(?^:SUCCESS)' ./ssl_curve.t .............................. ok ./ssi_delayed.t ............................ ok ===( 2105;27 4/23 3/5 0/? 1/5 2/32 0/9 0/? 0/? )===========# Looks like you failed 1 test of 5. ./ssl_crl.t ................................ Dubious, test returned 1 (wstat 256, 0x100) ...and so on.... Test Summary Report ------------------- ./ssl_certificate_chain.t (Wstat: 512 (exited 2) Tests: 5 Failed: 2) Failed tests: 2-3 Non-zero exit status: 2 ./ssl_crl.t (Wstat: 256 (exited 1) Tests: 5 Failed: 1) Failed test: 1 Non-zero exit status: 1 ./ssl_ocsp.t (Wstat: 3328 (exited 13) Tests: 17 Failed: 13) Failed tests: 1-13 Non-zero exit status: 13 ./ssl_verify_depth.t (Wstat: 768 (exited 3) Tests: 11 Failed: 3) Failed tests: 5, 8-9 Non-zero exit status: 3 Files=416, Tests=2505, 35 wallclock secs ( 1.43 usr 0.71 sys + 41.49 cusr 8.42 csys = 52.05 CPU) Result: FAIL [vagrant at archlinux nginx-tests]$ openssl version OpenSSL 3.2.0 23 Nov 2023 (Library: OpenSSL 3.2.0 23 Nov 2023) [vagrant at archlinux nginx-tests]$ uname -a Linux archlinux 6.7.0-arch3-1 #1 SMP PREEMPT_DYNAMIC Sat, 13 Jan 2024 14:37:14 +0000 x86_64 GNU/Linux [vagrant at archlinux nginx-tests]$ ../nginx/objs/nginx -V nginx version: nginx/1.25.4 built by gcc 13.2.1 20230801 (GCC) built with OpenSSL 3.2.0 23 Nov 2023 TLS SNI support enabled configure arguments: --with-http_ssl_module From mdounin at mdounin.ru Sat Jan 27 04:19:44 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 27 Jan 2024 07:19:44 +0300 Subject: nginx-tests SSL tests failing out of the box? In-Reply-To: <20240126172958.qqcmvtqo7rws7iwe@Y9MQ9X2QVV> References: <20240126172958.qqcmvtqo7rws7iwe@Y9MQ9X2QVV> Message-ID: Hello! On Fri, Jan 26, 2024 at 09:29:58PM +0400, Sergey Kandaurov wrote: > On Thu, Jan 25, 2024 at 11:38:57PM +0300, Maxim Dounin wrote: > > Hello! > > > > On Thu, Jan 25, 2024 at 06:59:36PM +0000, Mayerhofer, Austin via nginx-devel wrote: > > > > > Hi all, > > > > > > I have not made any changes to NGINX. Vanilla NGINX (./configure with no flags) passes all tests that run, but when compiling with SSL, not all SSL tests are passing. Is this expected, or do I need to configure nginx further aside from adding the --with-http_ssl_module flag? Do each of the failing tests below require separate fixes, or is there a one-size-fits-all solution for all of them? > > > > > > OS: MacOS 12.6.3 > > > Chip: Apple M1 Max > > > NGINX: 1.24.0 built from source code with ./configure --with-debug --with-http_ssl_module > > > Nginx-tests: https://github.com/nginx/nginx-tests/tree/4c2ad8093952706f327d04887c5546bad91b75a6 > > > OpenSSL: 3.2.0 (/opt/homebrew/bin/openssl) > > > Perl: 5.30.3 (/usr/bin/perl) > > > > > > When I run > > > > > > ``` > > > TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v ssl.t > > > ``` > > > > > > I see > > > > > > ``` > > > not ok 2 - session reused > > > > > > # Failed test 'session reused' > > > # at ssl.t line 187. > > > # 'HTTP/1.1 200 OK > > > # Server: nginx/1.24.0 > > > # Date: Thu, 25 Jan 2024 18:50:10 GMT > > > # Content-Type: text/plain > > > # Content-Length: 6 > > > # Connection: close > > > # > > > # body .' > > > # doesn't match '(?^m:^body r$)' > > > ``` > > > > [...] > > > > It looks like SSL session reuse is broken in Perl you are > > using. This might be the case if, for example, Net::SSLeay in > > your installation was compiled with system LibreSSL as an SSL > > library - at least on the server side LibreSSL simply does not > > support session reuse with TLSv1.3. > > > > Test suite checks if nginx was compiled with LibreSSL and marks > > appropriate tests as TODO, but if the Perl module is broken > > instead, the test will fail. > > > > Well, technically, we could test this and skip appropriately: > > diff --git a/ssl_session_reuse.t b/ssl_session_reuse.t > --- a/ssl_session_reuse.t > +++ b/ssl_session_reuse.t > @@ -166,7 +166,9 @@ local $TODO = 'no TLSv1.3 sessions, old > local $TODO = 'no TLSv1.3 sessions, old IO::Socket::SSL' > if $IO::Socket::SSL::VERSION < 2.061 && test_tls13(); > local $TODO = 'no TLSv1.3 sessions in LibreSSL' > - if $t->has_module('LibreSSL') && test_tls13(); > + if ($t->has_module('LibreSSL') > + || Net::SSLeay::constant("LIBRESSL_VERSION_NUMBER")) > + && test_tls13(); > > is(test_reuse(8443), 1, 'tickets reused'); > is(test_reuse(8444), 1, 'tickets and cache reused'); > > But I see little to no purpose: if the testing tool is broken > in various unexpected ways (another example is X509_V_ERR_INVALID_PURPOSE > in peer certificate verification as reported in the adjacent thread), > I think we barely can handle this in general. I generally agree. Still, the X509_V_ERR_INVALID_PURPOSE seems to be an OpenSSL 3.2.0-related issue: for tests using CA root certificates without CA:TRUE it now generates X509_V_ERR_INVALID_CA on the root certificate, which then changed to X509_V_ERR_INVALID_PURPOSE. Given the list of incompatible changes from NEWS.md, and the fact that the same tests work fine with OpenSSL 3.2.0 but with "openssl" binary from older versions, it seems to be this: * The `x509`, `ca`, and `req` apps now always produce X.509v3 certificates. This needs to be addressed. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Sun Jan 28 18:56:53 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 28 Jan 2024 21:56:53 +0300 Subject: nginx-tests: Some SSL tests are failing with openssl 3.2.0 In-Reply-To: <68c9e6c9-70b6-4ac0-b9ad-4b04ece6e311@FreeBSD.org> References: <68c9e6c9-70b6-4ac0-b9ad-4b04ece6e311@FreeBSD.org> Message-ID: Hello! On Fri, Jan 26, 2024 at 04:28:15PM -0300, Renato Botelho wrote: > I'm building nginx on an environment with openssl 3.2.0 and some SSL tests > are failing. I suspect it's related to openssl version because it works on > another env with older openssl. But maybe I'm wrong. That's a result of incompatible change introduced in the "openssl" application by OpenSSL 3.2.0, it now generates X509v3 certs even if not asked to, just discussed here: https://mailman.nginx.org/pipermail/nginx-devel/2024-January/32O7PUI3XJZZGBMLS2NAH654MS23MVDD.html Will be eventually fixed. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Mon Jan 29 03:24:53 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 29 Jan 2024 06:24:53 +0300 Subject: nginx-tests SSL tests failing out of the box? In-Reply-To: References: <20240126172958.qqcmvtqo7rws7iwe@Y9MQ9X2QVV> Message-ID: Hello! On Sat, Jan 27, 2024 at 07:19:45AM +0300, Maxim Dounin wrote: > Hello! > > On Fri, Jan 26, 2024 at 09:29:58PM +0400, Sergey Kandaurov wrote: > > > On Thu, Jan 25, 2024 at 11:38:57PM +0300, Maxim Dounin wrote: > > > Hello! > > > > > > On Thu, Jan 25, 2024 at 06:59:36PM +0000, Mayerhofer, Austin via nginx-devel wrote: > > > > > > > Hi all, > > > > > > > > I have not made any changes to NGINX. Vanilla NGINX (./configure with no flags) passes all tests that run, but when compiling with SSL, not all SSL tests are passing. Is this expected, or do I need to configure nginx further aside from adding the --with-http_ssl_module flag? Do each of the failing tests below require separate fixes, or is there a one-size-fits-all solution for all of them? > > > > > > > > OS: MacOS 12.6.3 > > > > Chip: Apple M1 Max > > > > NGINX: 1.24.0 built from source code with ./configure --with-debug --with-http_ssl_module > > > > Nginx-tests: https://github.com/nginx/nginx-tests/tree/4c2ad8093952706f327d04887c5546bad91b75a6 > > > > OpenSSL: 3.2.0 (/opt/homebrew/bin/openssl) > > > > Perl: 5.30.3 (/usr/bin/perl) > > > > > > > > When I run > > > > > > > > ``` > > > > TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v ssl.t > > > > ``` > > > > > > > > I see > > > > > > > > ``` > > > > not ok 2 - session reused > > > > > > > > # Failed test 'session reused' > > > > # at ssl.t line 187. > > > > # 'HTTP/1.1 200 OK > > > > # Server: nginx/1.24.0 > > > > # Date: Thu, 25 Jan 2024 18:50:10 GMT > > > > # Content-Type: text/plain > > > > # Content-Length: 6 > > > > # Connection: close > > > > # > > > > # body .' > > > > # doesn't match '(?^m:^body r$)' > > > > ``` > > > > > > [...] > > > > > > It looks like SSL session reuse is broken in Perl you are > > > using. This might be the case if, for example, Net::SSLeay in > > > your installation was compiled with system LibreSSL as an SSL > > > library - at least on the server side LibreSSL simply does not > > > support session reuse with TLSv1.3. > > > > > > Test suite checks if nginx was compiled with LibreSSL and marks > > > appropriate tests as TODO, but if the Perl module is broken > > > instead, the test will fail. > > > > > > > Well, technically, we could test this and skip appropriately: > > > > diff --git a/ssl_session_reuse.t b/ssl_session_reuse.t > > --- a/ssl_session_reuse.t > > +++ b/ssl_session_reuse.t > > @@ -166,7 +166,9 @@ local $TODO = 'no TLSv1.3 sessions, old > > local $TODO = 'no TLSv1.3 sessions, old IO::Socket::SSL' > > if $IO::Socket::SSL::VERSION < 2.061 && test_tls13(); > > local $TODO = 'no TLSv1.3 sessions in LibreSSL' > > - if $t->has_module('LibreSSL') && test_tls13(); > > + if ($t->has_module('LibreSSL') > > + || Net::SSLeay::constant("LIBRESSL_VERSION_NUMBER")) > > + && test_tls13(); > > > > is(test_reuse(8443), 1, 'tickets reused'); > > is(test_reuse(8444), 1, 'tickets and cache reused'); > > > > But I see little to no purpose: if the testing tool is broken > > in various unexpected ways (another example is X509_V_ERR_INVALID_PURPOSE > > in peer certificate verification as reported in the adjacent thread), > > I think we barely can handle this in general. > > I generally agree. > > Still, the X509_V_ERR_INVALID_PURPOSE seems to be an OpenSSL > 3.2.0-related issue: for tests using CA root certificates without > CA:TRUE it now generates X509_V_ERR_INVALID_CA on the root > certificate, which then changed to X509_V_ERR_INVALID_PURPOSE. > > Given the list of incompatible changes from NEWS.md, and the fact > that the same tests work fine with OpenSSL 3.2.0 but with > "openssl" binary from older versions, it seems to be this: > > * The `x509`, `ca`, and `req` apps now always produce X.509v3 certificates. > > This needs to be addressed. Patch: # HG changeset patch # User Maxim Dounin # Date 1706477656 -10800 # Mon Jan 29 00:34:16 2024 +0300 # Node ID 156665421f83a054cf331e8f9a27dd4d2f86114d # Parent 27a79d3a8658794d7c0f8c246bcd92a9861da468 Tests: compatibility with "openssl" app from OpenSSL 3.2.0. OpenSSL 3.2.0's "openssl" app generates X.509v3 certificates unless explicitly asked not to. Such certificates, even self-signed ones, cannot be used to sign other certificates without CA:TRUE explicitly set in the basicConstraints extension. As a result, tests doing so are now failing. Fix is to provide basicConstraints with CA:TRUE for self-signed root certificates used in "openssl ca" calls. diff -r 27a79d3a8658 -r 156665421f83 ssl.t --- a/ssl.t Sun Jan 28 23:12:26 2024 +0300 +++ b/ssl.t Mon Jan 29 00:34:16 2024 +0300 @@ -119,7 +119,10 @@ EOF default_bits = 2048 encrypt_key = no distinguished_name = req_distinguished_name +x509_extensions = myca_extensions [ req_distinguished_name ] +[ myca_extensions ] +basicConstraints = critical,CA:TRUE EOF my $d = $t->testdir(); diff -r 27a79d3a8658 -r 156665421f83 ssl_certificate_chain.t --- a/ssl_certificate_chain.t Sun Jan 28 23:12:26 2024 +0300 +++ b/ssl_certificate_chain.t Mon Jan 29 00:34:16 2024 +0300 @@ -71,7 +71,10 @@ my $d = $t->testdir(); default_bits = 2048 encrypt_key = no distinguished_name = req_distinguished_name +x509_extensions = myca_extensions [ req_distinguished_name ] +[ myca_extensions ] +basicConstraints = critical,CA:TRUE EOF $t->write_file('ca.conf', <testdir(); default_bits = 2048 encrypt_key = no distinguished_name = req_distinguished_name +x509_extensions = myca_extensions [ req_distinguished_name ] +[ myca_extensions ] +basicConstraints = critical,CA:TRUE EOF $t->write_file('ca.conf', <write_file('ca.conf', <write_file('ca.conf', <testdir(); default_bits = 2048 encrypt_key = no distinguished_name = req_distinguished_name +x509_extensions = myca_extensions [ req_distinguished_name ] +[ myca_extensions ] +basicConstraints = critical,CA:TRUE EOF $t->write_file('ca.conf', < References: Message-ID: Hello! On Fri, Jan 26, 2024 at 04:26:00PM +0400, Sergey Kandaurov wrote: > > On 27 Nov 2023, at 06:50, Maxim Dounin wrote: > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1701049758 -10800 > > # Mon Nov 27 04:49:18 2023 +0300 > > # Node ID faf0b9defc76b8683af466f8a950c2c241382970 > > # Parent a5e39e9d1f4c84dcbe6a2f9e079372a3d63aef0b > > Upstream: fixed usage of closed sockets with filter finalization. > > > > When filter finalization is triggered when working with an upstream server, > > and error_page redirects request processing to some simple handler, > > ngx_http_request_finalize() triggers request termination when the response > > is sent. In particular, via the upstream cleanup handler, nginx will close > > the upstream connection and the corresponding socket. > > > > Still, this can happen to be with ngx_event_pipe() on stack. While > > the code will set p->downstream_error due to NGX_ERROR returned from the > > output filter chain by filter finalization, otherwise the error will be > > ignored till control returns to ngx_http_upstream_process_request(). > > And event pipe might try reading from the (already closed) socket, resulting > > in "readv() failed (9: Bad file descriptor) while reading upstream" errors > > (or even segfaults with SSL). > > > > Such errors were seen with the following configuration: > > > > location /t2 { > > proxy_pass http://127.0.0.1:8080/big; > > > > image_filter_buffer 10m; > > image_filter resize 150 100; > > error_page 415 = /empty; > > } > > > > location /empty { > > return 204; > > } > > > > location /big { > > # big enough static file > > } > > > > Fix is to set p->upstream_error in ngx_http_upstream_finalize_request(), > > so the existing checks in ngx_event_pipe_read_upstream() will prevent > > further reading from the closed upstream connection. > > > > Similarly, p->upstream_error is now checked when handling events at > > ngx_event_pipe() exit, as checking p->upstream->fd is not enough if > > keepalive upstream connections are being used and the connection was > > saved to cache during request termination. > > > > Setting p->upstream_error in ngx_http_upstream_finalize_request() > may look suspicious, because it is used to be set on connection errors > such as upstream timeout or recv error, or, as a recently introduced > exception in the fastcgi module, - also when the FastCGI record ends > prematurely, before receiving all the expected content. > But technically I think this is quite correct, because we no longer > want to receive further data, and also (and you mention this in the > commit log) this repeats closing an upstream connection socket in > the same place in ngx_http_upstream_finalize_request(). > So I think it should be fine. The biggest concern I personally see here is with the added p->upstream_error check at ngx_event_pipe() exit. If there is a real upstream error, such as when the connection is reset by the upstream server, and if we want the pipe to be active for some time (for example, if we want it to continue writing to the downstream connection), there will be no ngx_handle_read_event() call. For level-triggered event methods this means that the read event for the upstream connection will be generated again and again. This shouldn't be the problem for existing ngx_event_pipe() uses though, as p->upstream_error is anyway triggers ngx_http_upstream_finalize_request(). Still, we can consider introducing a separate flag, such as p->upstream_closed, or clearing p->upstream, and checking these in ngx_event_pipe() instead. This probably would be a more clear solution. Updated patch below: # HG changeset patch # User Maxim Dounin # Date 1706510064 -10800 # Mon Jan 29 09:34:24 2024 +0300 # Node ID 4a91a03dcd8df0652884ed6ebe9f7437ce82fd26 # Parent 7b630f6487068f7cc9dd83762fb4ea39f2f340e9 Upstream: fixed usage of closed sockets with filter finalization. When filter finalization is triggered when working with an upstream server, and error_page redirects request processing to some simple handler, ngx_http_request_finalize() triggers request termination when the response is sent. In particular, via the upstream cleanup handler, nginx will close the upstream connection and the corresponding socket. Still, this can happen to be with ngx_event_pipe() on stack. While the code will set p->downstream_error due to NGX_ERROR returned from the output filter chain by filter finalization, otherwise the error will be ignored till control returns to ngx_http_upstream_process_request(). And event pipe might try reading from the (already closed) socket, resulting in "readv() failed (9: Bad file descriptor) while reading upstream" errors (or even segfaults with SSL). Such errors were seen with the following configuration: location /t2 { proxy_pass http://127.0.0.1:8080/big; image_filter_buffer 10m; image_filter resize 150 100; error_page 415 = /empty; } location /empty { return 204; } location /big { # big enough static file } Fix is to clear p->upstream in ngx_http_upstream_finalize_request(), and ensure that p->upstream is checked in ngx_event_pipe_read_upstream() and when handling events at ngx_event_pipe() exit. diff --git a/src/event/ngx_event_pipe.c b/src/event/ngx_event_pipe.c --- a/src/event/ngx_event_pipe.c +++ b/src/event/ngx_event_pipe.c @@ -57,7 +57,9 @@ ngx_event_pipe(ngx_event_pipe_t *p, ngx_ do_write = 1; } - if (p->upstream->fd != (ngx_socket_t) -1) { + if (p->upstream + && p->upstream->fd != (ngx_socket_t) -1) + { rev = p->upstream->read; flags = (rev->eof || rev->error) ? NGX_CLOSE_EVENT : 0; @@ -108,7 +110,9 @@ ngx_event_pipe_read_upstream(ngx_event_p ngx_msec_t delay; ngx_chain_t *chain, *cl, *ln; - if (p->upstream_eof || p->upstream_error || p->upstream_done) { + if (p->upstream_eof || p->upstream_error || p->upstream_done + || p->upstream == NULL) + { return NGX_OK; } diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -4561,6 +4561,10 @@ ngx_http_upstream_finalize_request(ngx_h u->peer.connection = NULL; + if (u->pipe) { + u->pipe->upstream = NULL; + } + if (u->pipe && u->pipe->temp_file) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http upstream temp fd: %d", -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Mon Jan 29 07:30:54 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 29 Jan 2024 10:30:54 +0300 Subject: [PATCH 3 of 4] Silenced complaints about socket leaks on forced termination In-Reply-To: <65A3D86F-FAE9-405A-990D-F1EA14D64D8E@nginx.com> References: <61d08e4cf97cc073200e.1701053426@vm-bsd.mdounin.ru> <65A3D86F-FAE9-405A-990D-F1EA14D64D8E@nginx.com> Message-ID: Hello! On Fri, Jan 26, 2024 at 04:26:21PM +0400, Sergey Kandaurov wrote: > > > On 27 Nov 2023, at 06:50, Maxim Dounin wrote: > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1701049787 -10800 > > # Mon Nov 27 04:49:47 2023 +0300 > > # Node ID 61d08e4cf97cc073200ec32fc6ada9a2d48ffe51 > > # Parent faf0b9defc76b8683af466f8a950c2c241382970 > > Silenced complaints about socket leaks on forced termination. > > > > When graceful shutdown was requested, and then nginx was forced to > > do fast shutdown, it used to (incorrectly) complain about open sockets > > left in connections which weren't yet closed when fast shutdown > > was requested. > > > > Fix is to avoid complaining about open sockets when fast shutdown was > > requested after graceful one. Abnormal termination, if requested with > > the WINCH signal, can still happen though. > > I've been wondering about such IMHO odd behaviour and support the fix. > There might be an opinion that once you requested graceful shutdown, > you have to wait until it's done, but I think that requesting fast > shutdown afterwards should be legitimate. I tend to think that the existing behaviour might be usable in some situations, like when one wants to look into remaining connections after waiting for some time for graceful shutdown to complete. Still, it is very confusing for unaware people, and I've seen lots of reports about socket leaks which in fact aren't. And, more importantly, with the existing behaviour when looking at a socket leak report you never know if it's real or not. So it is certainly worth fixing. > > > > > diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c > > --- a/src/os/unix/ngx_process_cycle.c > > +++ b/src/os/unix/ngx_process_cycle.c > > @@ -948,7 +948,7 @@ ngx_worker_process_exit(ngx_cycle_t *cyc > > } > > } > > > > - if (ngx_exiting) { > > + if (ngx_exiting && !ngx_terminate) { > > c = cycle->connections; > > for (i = 0; i < cycle->connection_n; i++) { > > if (c[i].fd != -1 > > @@ -963,11 +963,11 @@ ngx_worker_process_exit(ngx_cycle_t *cyc > > ngx_debug_quit = 1; > > } > > } > > + } > > > > - if (ngx_debug_quit) { > > - ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "aborting"); > > - ngx_debug_point(); > > - } > > + if (ngx_debug_quit) { > > + ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "aborting"); > > + ngx_debug_point(); > > } > > > > /* > > diff --git a/src/os/win32/ngx_process_cycle.c b/src/os/win32/ngx_process_cycle.c > > --- a/src/os/win32/ngx_process_cycle.c > > +++ b/src/os/win32/ngx_process_cycle.c > > @@ -834,7 +834,7 @@ ngx_worker_process_exit(ngx_cycle_t *cyc > > } > > } > > > > - if (ngx_exiting) { > > + if (ngx_exiting && !ngx_terminate) { > > c = cycle->connections; > > for (i = 0; i < cycle->connection_n; i++) { > > if (c[i].fd != (ngx_socket_t) -1 > > I think it's fine. Thanks for looking, pushed to http://mdounin.ru/hg/nginx/. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Mon Jan 29 07:31:30 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 29 Jan 2024 10:31:30 +0300 Subject: [PATCH 4 of 4] AIO operations now add timers (ticket #2162) In-Reply-To: References: <00c3e7333145ddb5ea0e.1701053427@vm-bsd.mdounin.ru> Message-ID: Hello! On Fri, Jan 26, 2024 at 04:27:30PM +0400, Sergey Kandaurov wrote: > > > On 27 Nov 2023, at 06:50, Maxim Dounin wrote: > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1701050170 -10800 > > # Mon Nov 27 04:56:10 2023 +0300 > > # Node ID 00c3e7333145ddb5ea0eeaaa66b3d9c26973c9c2 > > # Parent 61d08e4cf97cc073200ec32fc6ada9a2d48ffe51 > > AIO operations now add timers (ticket #2162). > > > > Each AIO (thread IO) operation being run is now accompanied with 1-minute > > timer. This timer prevents unexpected shutdown of the worker process while > > an AIO operation is running, and logs an alert if the operation is running > > for too long. > > > > This fixes "open socket left" alerts during worker processes shutdown > > due to pending AIO (or thread IO) operations while corresponding requests > > have no timers. In particular, such errors were observed while reading > > cache headers (ticket #2162), and with worker_shutdown_timeout. > > > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > > --- a/src/http/ngx_http_copy_filter_module.c > > +++ b/src/http/ngx_http_copy_filter_module.c > > @@ -170,6 +170,8 @@ ngx_http_copy_aio_handler(ngx_output_cha > > file->aio->data = r; > > file->aio->handler = ngx_http_copy_aio_event_handler; > > > > + ngx_add_timer(&file->aio->event, 60000); > > + > > r->main->blocked++; > > r->aio = 1; > > ctx->aio = 1; > > @@ -192,6 +194,17 @@ ngx_http_copy_aio_event_handler(ngx_even > > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > > "http aio: \"%V?%V\"", &r->uri, &r->args); > > > > + if (ev->timedout) { > > + ngx_log_error(NGX_LOG_ALERT, c->log, 0, > > + "aio operation took too long"); > > + ev->timedout = 0; > > + return; > > + } > > + > > + if (ev->timer_set) { > > + ngx_del_timer(ev); > > + } > > + > > r->main->blocked--; > > r->aio = 0; > > > > @@ -273,6 +286,8 @@ ngx_http_copy_thread_handler(ngx_thread_ > > return NGX_ERROR; > > } > > > > + ngx_add_timer(&task->event, 60000); > > + > > r->main->blocked++; > > r->aio = 1; > > > > @@ -297,6 +312,17 @@ ngx_http_copy_thread_event_handler(ngx_e > > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > > "http thread: \"%V?%V\"", &r->uri, &r->args); > > > > + if (ev->timedout) { > > + ngx_log_error(NGX_LOG_ALERT, c->log, 0, > > + "thread operation took too long"); > > + ev->timedout = 0; > > + return; > > + } > > + > > + if (ev->timer_set) { > > + ngx_del_timer(ev); > > + } > > + > > r->main->blocked--; > > r->aio = 0; > > > > diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c > > --- a/src/http/ngx_http_file_cache.c > > +++ b/src/http/ngx_http_file_cache.c > > @@ -705,6 +705,8 @@ ngx_http_file_cache_aio_read(ngx_http_re > > c->file.aio->data = r; > > c->file.aio->handler = ngx_http_cache_aio_event_handler; > > > > + ngx_add_timer(&c->file.aio->event, 60000); > > + > > r->main->blocked++; > > r->aio = 1; > > > > @@ -752,6 +754,17 @@ ngx_http_cache_aio_event_handler(ngx_eve > > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > > "http file cache aio: \"%V?%V\"", &r->uri, &r->args); > > > > + if (ev->timedout) { > > + ngx_log_error(NGX_LOG_ALERT, c->log, 0, > > + "aio operation took too long"); > > + ev->timedout = 0; > > + return; > > + } > > + > > + if (ev->timer_set) { > > + ngx_del_timer(ev); > > + } > > + > > r->main->blocked--; > > r->aio = 0; > > > > @@ -810,6 +823,8 @@ ngx_http_cache_thread_handler(ngx_thread > > return NGX_ERROR; > > } > > > > + ngx_add_timer(&task->event, 60000); > > + > > r->main->blocked++; > > r->aio = 1; > > > > @@ -831,6 +846,17 @@ ngx_http_cache_thread_event_handler(ngx_ > > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > > "http file cache thread: \"%V?%V\"", &r->uri, &r->args); > > > > + if (ev->timedout) { > > + ngx_log_error(NGX_LOG_ALERT, c->log, 0, > > + "thread operation took too long"); > > + ev->timedout = 0; > > + return; > > + } > > + > > + if (ev->timer_set) { > > + ngx_del_timer(ev); > > + } > > + > > r->main->blocked--; > > r->aio = 0; > > > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > > --- a/src/http/ngx_http_upstream.c > > +++ b/src/http/ngx_http_upstream.c > > @@ -3949,6 +3949,8 @@ ngx_http_upstream_thread_handler(ngx_thr > > r->aio = 1; > > p->aio = 1; > > > > + ngx_add_timer(&task->event, 60000); > > + > > return NGX_OK; > > } > > > > @@ -3967,6 +3969,17 @@ ngx_http_upstream_thread_event_handler(n > > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > > "http upstream thread: \"%V?%V\"", &r->uri, &r->args); > > > > + if (ev->timedout) { > > + ngx_log_error(NGX_LOG_ALERT, c->log, 0, > > + "thread operation took too long"); > > + ev->timedout = 0; > > + return; > > + } > > + > > + if (ev->timer_set) { > > + ngx_del_timer(ev); > > + } > > + > > r->main->blocked--; > > r->aio = 0; > > > > diff --git a/src/os/unix/ngx_files.c b/src/os/unix/ngx_files.c > > --- a/src/os/unix/ngx_files.c > > +++ b/src/os/unix/ngx_files.c > > @@ -110,6 +110,8 @@ ngx_thread_read(ngx_file_t *file, u_char > > return NGX_ERROR; > > } > > > > + task->event.log = file->log; > > + > > file->thread_task = task; > > } > > > > @@ -493,6 +495,8 @@ ngx_thread_write_chain_to_file(ngx_file_ > > return NGX_ERROR; > > } > > > > + task->event.log = file->log; > > + > > file->thread_task = task; > > } > > > > diff --git a/src/os/unix/ngx_linux_sendfile_chain.c b/src/os/unix/ngx_linux_sendfile_chain.c > > --- a/src/os/unix/ngx_linux_sendfile_chain.c > > +++ b/src/os/unix/ngx_linux_sendfile_chain.c > > @@ -332,6 +332,7 @@ ngx_linux_sendfile_thread(ngx_connection > > return NGX_ERROR; > > } > > > > + task->event.log = c->log; > > task->handler = ngx_linux_sendfile_thread_handler; > > > > c->sendfile_task = task; > > Looks good to me. Thanks for looking, pushed to http://mdounin.ru/hg/nginx/. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Mon Jan 29 07:58:09 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 29 Jan 2024 10:58:09 +0300 Subject: [PATCH 1 of 4] Fixed request termination with AIO and subrequests (ticket #2555) In-Reply-To: <20240126120230.45y2unpnlbzll4ru@N00W24XTQX> References: <20240126120230.45y2unpnlbzll4ru@N00W24XTQX> Message-ID: Hello! On Fri, Jan 26, 2024 at 04:02:30PM +0400, Roman Arutyunyan wrote: > On Mon, Nov 27, 2023 at 05:50:24AM +0300, Maxim Dounin wrote: > > # HG changeset patch > > # User Maxim Dounin > > # Date 1701049682 -10800 > > # Mon Nov 27 04:48:02 2023 +0300 > > # Node ID a5e39e9d1f4c84dcbe6a2f9e079372a3d63aef0b > > # Parent f366007dd23a6ce8e8427c1b3042781b618a2ade > > Fixed request termination with AIO and subrequests (ticket #2555). > > > > When a request was terminated due to an error via ngx_http_terminate_request() > > while an AIO operation was running in a subrequest, various issues were > > observed. This happened because ngx_http_request_finalizer() was only set > > in the subrequest where ngx_http_terminate_request() was called, but not > > in the subrequest where the AIO operation was running. After completion > > of the AIO operation resumed normal processing of the subrequest, leading > > to issues. > > Something is wrong with the last sentence. Thanks, rewritten as: ... After completion of the AIO operation normal processing of the subrequest was resumed, leading to issues. > > In particular, in case of the upstream module, termination of the request > > called upstream cleanup, which closed the upstream connection. Attempts to > > further work with the upstream connection after AIO operation completion > > resulted in segfaults in ngx_ssl_recv(), "readv() failed (9: Bad file > > descriptor) while reading upstream" errors, or socket leaks. > > Can you elaborate on socket leaks? For example, consider a request which is waiting for additional data from the upstream, so the only timer is the read timer for the upstream connection, and which is terminated because the client closed the connection. Request termination will remove the only timer. Still, the client connection is not yet closed by nginx. So as long as the request is not actually freed following completion of the AIO operation, this is a socket leak: we have no timers left, and no further events expected. And this can easily happen if neither segfault nor readv() error was triggered (for example, if p->upstream->read->ready was not set during AIO operation completion). > > In ticket #2555, issues were observed with the following configuration > > with cache background update (with thread writing instrumented to > > introduce a delay, when a client closes the connection during an update): > > > > location = /background-and-aio-write { > > proxy_pass ... > > proxy_cache one; > > proxy_cache_valid 200 1s; > > proxy_cache_background_update on; > > proxy_cache_use_stale updating; > > aio threads; > > aio_write on; > > limit_rate 1000; > > } > > > > Similarly, the same issue can be seen with SSI, and can be caused by > > errors in subrequests, such as in the following configuration > > (were "/proxy" uses AIO, and "/sleep" returns 444 after some delay, > > s/were/where/ ? Fixed, thanks. > > causing request termination): > > > > location = /ssi-active-boom { > > ssi on; > > ssi_types *; > > return 200 ' > > > > > > '; > > limit_rate 1000; > > } > > > > Or the same with both AIO operation and the error in non-active subrequests > > (which needs slightly different handling, see below): > > > > location = /ssi-non-active-boom { > > ssi on; > > ssi_types *; > > return 200 ' > > > > > > > > '; > > limit_rate 1000; > > } > > > > Similarly, issues can be observed with just static files. However, > > with static files potential impact is limited due to timeout safeguards > > in ngx_http_writer(), and the fact that c->error is set during request > > termination. > > In a simple configuration with an AIO operation in the active subrequest, > > such as in the following configuration, the connection is closed right > > after completion of the AIO operation anyway, since ngx_http_writer() > > tries to write to the connection and fails due to c->error set: > > > > location = /ssi-active-static-boom { > > ssi on; > > ssi_types *; > > return 200 ' > > > > > > '; > > limit_rate 1000; > > } > > > > In the following configuration, with an AIO operation in a non-active > > subrequest, the connection is closed only after send_timeout expires: > > > > location = /ssi-non-active-static-boom { > > ssi on; > > ssi_types *; > > return 200 ' > > > > > > > > '; > > limit_rate 1000; > > } > > > > Fix is to introduce r->main->terminated flag, which is to be checked > > by AIO event handlers when the r->main->blocked counter is decremented. > > When the flag is set, handlers are expected to wake up the connection > > instead of the subrequest (which might be already cleaned up). > > > > Additionally, now ngx_http_request_finalizer() is always set in the > > active subrequest, so waking up the connection properly finalizes the > > request even if termination happened in a non-active subrequest. > > The issue does not seem to be significant for static file. In fact, the > biggest problem is trying to use a resource after it was freed by an > ngx_http_cleanup_add()-registered handler, as opposed to ngx_pool_cleanup_add() > handlers which are safer, but serve a slightly different purpose. > > As for non-ngx_http_cleanup_add() related code (like static files), the effect > of the issue is just a possible delay of the connection closure until output is > produced, in which case typically ngx_http_write_filter() triggers the closure. > So the patch basically fixes a time delay (usually limited by a timeout). > IMO there's no need to go in so many details about that. The issue with static files demonstrates that the issue goes beyond use of cleanup handlers. As such, just skipping cleanup handlers with mr->blocked is wrong (see below). Also, it shows that the safeguard timeouts we use in ngx_http_writer() are actually useful, and save us from bigger problems here. In the past, there were attempts to remove these timers as long as an AIO operation is running. And the details provide ways to reproduce different aspects of the issue (and were actually used during testing and development of the patch). > > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > > --- a/src/http/ngx_http_copy_filter_module.c > > +++ b/src/http/ngx_http_copy_filter_module.c > > @@ -195,9 +195,18 @@ ngx_http_copy_aio_event_handler(ngx_even > > r->main->blocked--; > > r->aio = 0; > > > > - r->write_event_handler(r); > > + if (r->main->terminated) { > > + /* > > + * trigger connection event handler if the request was > > + * terminated > > + */ > > > > - ngx_http_run_posted_requests(c); > > + c->write->handler(c->write); > > + > > + } else { > > + r->write_event_handler(r); > > + ngx_http_run_posted_requests(c); > > + } > > } > > > > #endif > > @@ -305,11 +314,11 @@ ngx_http_copy_thread_event_handler(ngx_e > > > > #endif > > > > - if (r->done) { > > + if (r->done || r->main->terminated) { > > /* > > * trigger connection event handler if the subrequest was > > - * already finalized; this can happen if the handler is used > > - * for sendfile() in threads > > + * already finalized (this can happen if the handler is used > > + * for sendfile() in threads), or if the request was terminated > > */ > > > > c->write->handler(c->write); > > diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c > > --- a/src/http/ngx_http_file_cache.c > > +++ b/src/http/ngx_http_file_cache.c > > @@ -14,7 +14,7 @@ > > static ngx_int_t ngx_http_file_cache_lock(ngx_http_request_t *r, > > ngx_http_cache_t *c); > > static void ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev); > > -static void ngx_http_file_cache_lock_wait(ngx_http_request_t *r, > > +static ngx_int_t ngx_http_file_cache_lock_wait(ngx_http_request_t *r, > > ngx_http_cache_t *c); > > static ngx_int_t ngx_http_file_cache_read(ngx_http_request_t *r, > > ngx_http_cache_t *c); > > @@ -463,6 +463,7 @@ ngx_http_file_cache_lock(ngx_http_reques > > static void > > ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev) > > { > > + ngx_int_t rc; > > ngx_connection_t *c; > > ngx_http_request_t *r; > > > > @@ -474,13 +475,31 @@ ngx_http_file_cache_lock_wait_handler(ng > > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > > "http file cache wait: \"%V?%V\"", &r->uri, &r->args); > > > > - ngx_http_file_cache_lock_wait(r, r->cache); > > - > > - ngx_http_run_posted_requests(c); > > + rc = ngx_http_file_cache_lock_wait(r, r->cache); > > + > > + if (rc == NGX_AGAIN) { > > + return; > > + } > > + > > + r->cache->waiting = 0; > > + r->main->blocked--; > > + > > + if (r->main->terminated) { > > + /* > > + * trigger connection event handler if the request was > > + * terminated > > + */ > > + > > + c->write->handler(c->write); > > + > > + } else { > > + r->write_event_handler(r); > > + ngx_http_run_posted_requests(c); > > + } > > } > > BTW, cache lock is not a real aio. It's just a regular event timer. > And it's deleted in ngx_http_file_cache_free() which is called from > ngx_http_upstream_finalize_request(). So it looks like the "terminated" > flag will never be 1 here. The upstream cleanup handler is installed after checking the cache (and waiting for the cache lock, if needed). And that's, basically, why the code uses r->blocked in the first place. While the code can be rewritten to not depend on the r->blocked flag and so checking r->main->terminated won't be needed as well, this is not currently the case. > > > -static void > > +static ngx_int_t > > ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c) > > { > > ngx_uint_t wait; > > @@ -495,7 +514,7 @@ ngx_http_file_cache_lock_wait(ngx_http_r > > ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, > > "cache lock timeout"); > > c->lock_timeout = 0; > > - goto wakeup; > > + return NGX_OK; > > } > > > > cache = c->file_cache; > > @@ -513,14 +532,10 @@ ngx_http_file_cache_lock_wait(ngx_http_r > > > > if (wait) { > > ngx_add_timer(&c->wait_event, (timer > 500) ? 500 : timer); > > - return; > > + return NGX_AGAIN; > > } > > > > -wakeup: > > - > > - c->waiting = 0; > > - r->main->blocked--; > > - r->write_event_handler(r); > > + return NGX_OK; > > } > > > > > > @@ -740,9 +755,18 @@ ngx_http_cache_aio_event_handler(ngx_eve > > r->main->blocked--; > > r->aio = 0; > > > > - r->write_event_handler(r); > > - > > - ngx_http_run_posted_requests(c); > > + if (r->main->terminated) { > > + /* > > + * trigger connection event handler if the request was > > + * terminated > > + */ > > + > > + c->write->handler(c->write); > > + > > + } else { > > + r->write_event_handler(r); > > + ngx_http_run_posted_requests(c); > > + } > > } > > > > #endif > > @@ -810,9 +834,18 @@ ngx_http_cache_thread_event_handler(ngx_ > > r->main->blocked--; > > r->aio = 0; > > > > - r->write_event_handler(r); > > - > > - ngx_http_run_posted_requests(c); > > + if (r->main->terminated) { > > + /* > > + * trigger connection event handler if the request was > > + * terminated > > + */ > > + > > + c->write->handler(c->write); > > + > > + } else { > > + r->write_event_handler(r); > > + ngx_http_run_posted_requests(c); > > + } > > } > > > > #endif > > diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c > > --- a/src/http/ngx_http_request.c > > +++ b/src/http/ngx_http_request.c > > @@ -2681,6 +2681,8 @@ ngx_http_terminate_request(ngx_http_requ > > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, > > "http terminate request count:%d", mr->count); > > > > + mr->terminated = 1; > > Another solution could be skipping the cleanup handlers below if mr->blocked > is set. This would fix the crash, but would not fix the delay though. Exactly. While skipping cleanup handlers will fix a particular crash in the upstream module, the same issue observed with static files clearly demonstrates that this is a wrong approach. Further, skipping cleanup handlers means that we will continue doing unneeded (and potentially dangerous, given that request termination can be due to a fatal error, such as a memory allocation error) work we can otherwise cancel early by calling the cleanup handler. > > > if (rc > 0 && (mr->headers_out.status == 0 || mr->connection->sent == 0)) { > > mr->headers_out.status = rc; > > } > > @@ -2703,8 +2705,13 @@ ngx_http_terminate_request(ngx_http_requ > > if (mr->write_event_handler) { > > > > if (mr->blocked) { > > + if (r != r->connection->data) { > > + r = r->connection->data; > > + } > > Why not simply r = r->connection->data. Or maybe a new variable > ar (active request) similar to mr (main request) would make sense. When writing this, I've decided that using conditional assignment better explains the idea that we need the active subrequest, and therefore switch to it if "r" is not active. Otherwise, just assignment is equivalent. Changed to just "r = r->connection->data;", as I have no strong preference here (and we already do "r = r->main;" in several places). The new variable looks like an overkill though. > > > + > > r->connection->error = 1; > > r->write_event_handler = ngx_http_request_finalizer; > > + > > return; > > } > > > > diff --git a/src/http/ngx_http_request.h b/src/http/ngx_http_request.h > > --- a/src/http/ngx_http_request.h > > +++ b/src/http/ngx_http_request.h > > @@ -550,6 +550,7 @@ struct ngx_http_request_s { > > unsigned root_tested:1; > > unsigned done:1; > > unsigned logged:1; > > + unsigned terminated:1; > > > > unsigned buffered:4; > > > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > > --- a/src/http/ngx_http_upstream.c > > +++ b/src/http/ngx_http_upstream.c > > @@ -3984,11 +3984,11 @@ ngx_http_upstream_thread_event_handler(n > > > > #endif > > > > - if (r->done) { > > + if (r->done || r->main->terminated) { > > /* > > * trigger connection event handler if the subrequest was > > - * already finalized; this can happen if the handler is used > > - * for sendfile() in threads > > + * already finalized (this can happen if the handler is used > > + * for sendfile() in threads), or if the request was terminated > > */ > > > > c->write->handler(c->write); > > The patch is generally ok. Just in case, here is the updated patch: # HG changeset patch # User Maxim Dounin # Date 1706513520 -10800 # Mon Jan 29 10:32:00 2024 +0300 # Node ID 35bfb011f69bb97cf853b379fbdcfd5052d0e3ed # Parent e88cdaa0f1ffc9af3144770c72ee5baf07b2562e Fixed request termination with AIO and subrequests (ticket #2555). When a request was terminated due to an error via ngx_http_terminate_request() while an AIO operation was running in a subrequest, various issues were observed. This happened because ngx_http_request_finalizer() was only set in the subrequest where ngx_http_terminate_request() was called, but not in the subrequest where the AIO operation was running. After completion of the AIO operation normal processing of the subrequest was resumed, leading to issues. In particular, in case of the upstream module, termination of the request called upstream cleanup, which closed the upstream connection. Attempts to further work with the upstream connection after AIO operation completion resulted in segfaults in ngx_ssl_recv(), "readv() failed (9: Bad file descriptor) while reading upstream" errors, or socket leaks. In ticket #2555, issues were observed with the following configuration with cache background update (with thread writing instrumented to introduce a delay, when a client closes the connection during an update): location = /background-and-aio-write { proxy_pass ... proxy_cache one; proxy_cache_valid 200 1s; proxy_cache_background_update on; proxy_cache_use_stale updating; aio threads; aio_write on; limit_rate 1000; } Similarly, the same issue can be seen with SSI, and can be caused by errors in subrequests, such as in the following configuration (where "/proxy" uses AIO, and "/sleep" returns 444 after some delay, causing request termination): location = /ssi-active-boom { ssi on; ssi_types *; return 200 ' '; limit_rate 1000; } Or the same with both AIO operation and the error in non-active subrequests (which needs slightly different handling, see below): location = /ssi-non-active-boom { ssi on; ssi_types *; return 200 ' '; limit_rate 1000; } Similarly, issues can be observed with just static files. However, with static files potential impact is limited due to timeout safeguards in ngx_http_writer(), and the fact that c->error is set during request termination. In a simple configuration with an AIO operation in the active subrequest, such as in the following configuration, the connection is closed right after completion of the AIO operation anyway, since ngx_http_writer() tries to write to the connection and fails due to c->error set: location = /ssi-active-static-boom { ssi on; ssi_types *; return 200 ' '; limit_rate 1000; } In the following configuration, with an AIO operation in a non-active subrequest, the connection is closed only after send_timeout expires: location = /ssi-non-active-static-boom { ssi on; ssi_types *; return 200 ' '; limit_rate 1000; } Fix is to introduce r->main->terminated flag, which is to be checked by AIO event handlers when the r->main->blocked counter is decremented. When the flag is set, handlers are expected to wake up the connection instead of the subrequest (which might be already cleaned up). Additionally, now ngx_http_request_finalizer() is always set in the active subrequest, so waking up the connection properly finalizes the request even if termination happened in a non-active subrequest. diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c +++ b/src/http/ngx_http_copy_filter_module.c @@ -208,9 +208,18 @@ ngx_http_copy_aio_event_handler(ngx_even r->main->blocked--; r->aio = 0; - r->write_event_handler(r); + if (r->main->terminated) { + /* + * trigger connection event handler if the request was + * terminated + */ - ngx_http_run_posted_requests(c); + c->write->handler(c->write); + + } else { + r->write_event_handler(r); + ngx_http_run_posted_requests(c); + } } #endif @@ -331,11 +340,11 @@ ngx_http_copy_thread_event_handler(ngx_e #endif - if (r->done) { + if (r->done || r->main->terminated) { /* * trigger connection event handler if the subrequest was - * already finalized; this can happen if the handler is used - * for sendfile() in threads + * already finalized (this can happen if the handler is used + * for sendfile() in threads), or if the request was terminated */ c->write->handler(c->write); diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c --- a/src/http/ngx_http_file_cache.c +++ b/src/http/ngx_http_file_cache.c @@ -14,7 +14,7 @@ static ngx_int_t ngx_http_file_cache_lock(ngx_http_request_t *r, ngx_http_cache_t *c); static void ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev); -static void ngx_http_file_cache_lock_wait(ngx_http_request_t *r, +static ngx_int_t ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c); static ngx_int_t ngx_http_file_cache_read(ngx_http_request_t *r, ngx_http_cache_t *c); @@ -463,6 +463,7 @@ ngx_http_file_cache_lock(ngx_http_reques static void ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev) { + ngx_int_t rc; ngx_connection_t *c; ngx_http_request_t *r; @@ -474,13 +475,31 @@ ngx_http_file_cache_lock_wait_handler(ng ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "http file cache wait: \"%V?%V\"", &r->uri, &r->args); - ngx_http_file_cache_lock_wait(r, r->cache); - - ngx_http_run_posted_requests(c); + rc = ngx_http_file_cache_lock_wait(r, r->cache); + + if (rc == NGX_AGAIN) { + return; + } + + r->cache->waiting = 0; + r->main->blocked--; + + if (r->main->terminated) { + /* + * trigger connection event handler if the request was + * terminated + */ + + c->write->handler(c->write); + + } else { + r->write_event_handler(r); + ngx_http_run_posted_requests(c); + } } -static void +static ngx_int_t ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c) { ngx_uint_t wait; @@ -495,7 +514,7 @@ ngx_http_file_cache_lock_wait(ngx_http_r ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, "cache lock timeout"); c->lock_timeout = 0; - goto wakeup; + return NGX_OK; } cache = c->file_cache; @@ -513,14 +532,10 @@ ngx_http_file_cache_lock_wait(ngx_http_r if (wait) { ngx_add_timer(&c->wait_event, (timer > 500) ? 500 : timer); - return; + return NGX_AGAIN; } -wakeup: - - c->waiting = 0; - r->main->blocked--; - r->write_event_handler(r); + return NGX_OK; } @@ -753,9 +768,18 @@ ngx_http_cache_aio_event_handler(ngx_eve r->main->blocked--; r->aio = 0; - r->write_event_handler(r); - - ngx_http_run_posted_requests(c); + if (r->main->terminated) { + /* + * trigger connection event handler if the request was + * terminated + */ + + c->write->handler(c->write); + + } else { + r->write_event_handler(r); + ngx_http_run_posted_requests(c); + } } #endif @@ -836,9 +860,18 @@ ngx_http_cache_thread_event_handler(ngx_ r->main->blocked--; r->aio = 0; - r->write_event_handler(r); - - ngx_http_run_posted_requests(c); + if (r->main->terminated) { + /* + * trigger connection event handler if the request was + * terminated + */ + + c->write->handler(c->write); + + } else { + r->write_event_handler(r); + ngx_http_run_posted_requests(c); + } } #endif diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c +++ b/src/http/ngx_http_request.c @@ -2694,6 +2694,8 @@ ngx_http_terminate_request(ngx_http_requ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http terminate request count:%d", mr->count); + mr->terminated = 1; + if (rc > 0 && (mr->headers_out.status == 0 || mr->connection->sent == 0)) { mr->headers_out.status = rc; } @@ -2716,8 +2718,11 @@ ngx_http_terminate_request(ngx_http_requ if (mr->write_event_handler) { if (mr->blocked) { + r = r->connection->data; + r->connection->error = 1; r->write_event_handler = ngx_http_request_finalizer; + return; } diff --git a/src/http/ngx_http_request.h b/src/http/ngx_http_request.h --- a/src/http/ngx_http_request.h +++ b/src/http/ngx_http_request.h @@ -550,6 +550,7 @@ struct ngx_http_request_s { unsigned root_tested:1; unsigned done:1; unsigned logged:1; + unsigned terminated:1; unsigned buffered:4; diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -3997,11 +3997,11 @@ ngx_http_upstream_thread_event_handler(n #endif - if (r->done) { + if (r->done || r->main->terminated) { /* * trigger connection event handler if the subrequest was - * already finalized; this can happen if the handler is used - * for sendfile() in threads + * already finalized (this can happen if the handler is used + * for sendfile() in threads), or if the request was terminated */ c->write->handler(c->write); -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Mon Jan 29 08:12:17 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 29 Jan 2024 11:12:17 +0300 Subject: Nginx-tests stream_ssl_conf_command.t test hanging indefinitely In-Reply-To: References: Message-ID: Hello! On Fri, Jan 26, 2024 at 01:14:56AM +0000, Mayerhofer, Austin via nginx-devel wrote: > Hey Maxim, > > Thanks, I installed homebrew’s Perl and all these tests are > passing now, woohoo! > > However a few others are failing now including ssl_ocsp.t and > ssl_verify_depth.t, failing 13/17 and 3/11 tests respectively > with the same error: > > ``` > # Failed test 'verify depth 2 - end' > # at ssl_verify_depth.t line 169. > # 'HTTP/1.1 400 Bad Request > # Server: nginx/1.24.0 > # Date: Fri, 26 Jan 2024 01:08:10 GMT > # Content-Type: text/html > # Content-Length: 215 > # Connection: close > # X-Client: CN=end > # X-Verify: FAILED:unsuitable certificate purpose [...] Should be fixed with this patch: https://mailman.nginx.org/pipermail/nginx-devel/2024-January/TSFBB5DWWQKXKDTGVLSH5VWJYMRCMCV4.html Thanks for reporting this. -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Mon Jan 29 13:21:43 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 29 Jan 2024 17:21:43 +0400 Subject: [PATCH 2 of 4] Upstream: fixed usage of closed sockets with filter finalization In-Reply-To: References: Message-ID: <3AC7E113-5762-4A69-B854-492ED0DE3002@nginx.com> > On 29 Jan 2024, at 10:43, Maxim Dounin wrote: > > Hello! > > On Fri, Jan 26, 2024 at 04:26:00PM +0400, Sergey Kandaurov wrote: > >>> On 27 Nov 2023, at 06:50, Maxim Dounin wrote: >>> >>> # HG changeset patch >>> # User Maxim Dounin >>> # Date 1701049758 -10800 >>> # Mon Nov 27 04:49:18 2023 +0300 >>> # Node ID faf0b9defc76b8683af466f8a950c2c241382970 >>> # Parent a5e39e9d1f4c84dcbe6a2f9e079372a3d63aef0b >>> Upstream: fixed usage of closed sockets with filter finalization. >>> >>> When filter finalization is triggered when working with an upstream server, >>> and error_page redirects request processing to some simple handler, >>> ngx_http_request_finalize() triggers request termination when the response >>> is sent. In particular, via the upstream cleanup handler, nginx will close >>> the upstream connection and the corresponding socket. >>> >>> Still, this can happen to be with ngx_event_pipe() on stack. While >>> the code will set p->downstream_error due to NGX_ERROR returned from the >>> output filter chain by filter finalization, otherwise the error will be >>> ignored till control returns to ngx_http_upstream_process_request(). >>> And event pipe might try reading from the (already closed) socket, resulting >>> in "readv() failed (9: Bad file descriptor) while reading upstream" errors >>> (or even segfaults with SSL). >>> >>> Such errors were seen with the following configuration: >>> >>> location /t2 { >>> proxy_pass http://127.0.0.1:8080/big; >>> >>> image_filter_buffer 10m; >>> image_filter resize 150 100; >>> error_page 415 = /empty; >>> } >>> >>> location /empty { >>> return 204; >>> } >>> >>> location /big { >>> # big enough static file >>> } >>> >>> Fix is to set p->upstream_error in ngx_http_upstream_finalize_request(), >>> so the existing checks in ngx_event_pipe_read_upstream() will prevent >>> further reading from the closed upstream connection. >>> >>> Similarly, p->upstream_error is now checked when handling events at >>> ngx_event_pipe() exit, as checking p->upstream->fd is not enough if >>> keepalive upstream connections are being used and the connection was >>> saved to cache during request termination. >>> >> >> Setting p->upstream_error in ngx_http_upstream_finalize_request() >> may look suspicious, because it is used to be set on connection errors >> such as upstream timeout or recv error, or, as a recently introduced >> exception in the fastcgi module, - also when the FastCGI record ends >> prematurely, before receiving all the expected content. >> But technically I think this is quite correct, because we no longer >> want to receive further data, and also (and you mention this in the >> commit log) this repeats closing an upstream connection socket in >> the same place in ngx_http_upstream_finalize_request(). >> So I think it should be fine. > > The biggest concern I personally see here is with the added > p->upstream_error check at ngx_event_pipe() exit. If there is a > real upstream error, such as when the connection is reset by the > upstream server, and if we want the pipe to be active for some > time (for example, if we want it to continue writing to the > downstream connection), there will be no ngx_handle_read_event() > call. For level-triggered event methods this means that the read > event for the upstream connection will be generated again and > again. > > This shouldn't be the problem for existing ngx_event_pipe() uses > though, as p->upstream_error is anyway triggers > ngx_http_upstream_finalize_request(). > > Still, we can consider introducing a separate flag, such as > p->upstream_closed, or clearing p->upstream, and checking these in > ngx_event_pipe() instead. This probably would be a more clear > solution. > > Updated patch below: > > # HG changeset patch > # User Maxim Dounin > # Date 1706510064 -10800 > # Mon Jan 29 09:34:24 2024 +0300 > # Node ID 4a91a03dcd8df0652884ed6ebe9f7437ce82fd26 > # Parent 7b630f6487068f7cc9dd83762fb4ea39f2f340e9 > Upstream: fixed usage of closed sockets with filter finalization. > > When filter finalization is triggered when working with an upstream server, > and error_page redirects request processing to some simple handler, > ngx_http_request_finalize() triggers request termination when the response > is sent. In particular, via the upstream cleanup handler, nginx will close > the upstream connection and the corresponding socket. > > Still, this can happen to be with ngx_event_pipe() on stack. While > the code will set p->downstream_error due to NGX_ERROR returned from the > output filter chain by filter finalization, otherwise the error will be > ignored till control returns to ngx_http_upstream_process_request(). > And event pipe might try reading from the (already closed) socket, resulting > in "readv() failed (9: Bad file descriptor) while reading upstream" errors > (or even segfaults with SSL). > > Such errors were seen with the following configuration: > > location /t2 { > proxy_pass http://127.0.0.1:8080/big; > > image_filter_buffer 10m; > image_filter resize 150 100; > error_page 415 = /empty; > } > > location /empty { > return 204; > } > > location /big { > # big enough static file > } > > Fix is to clear p->upstream in ngx_http_upstream_finalize_request(), > and ensure that p->upstream is checked in ngx_event_pipe_read_upstream() > and when handling events at ngx_event_pipe() exit. > > diff --git a/src/event/ngx_event_pipe.c b/src/event/ngx_event_pipe.c > --- a/src/event/ngx_event_pipe.c > +++ b/src/event/ngx_event_pipe.c > @@ -57,7 +57,9 @@ ngx_event_pipe(ngx_event_pipe_t *p, ngx_ > do_write = 1; > } > > - if (p->upstream->fd != (ngx_socket_t) -1) { > + if (p->upstream > + && p->upstream->fd != (ngx_socket_t) -1) > + { > rev = p->upstream->read; > > flags = (rev->eof || rev->error) ? NGX_CLOSE_EVENT : 0; > @@ -108,7 +110,9 @@ ngx_event_pipe_read_upstream(ngx_event_p > ngx_msec_t delay; > ngx_chain_t *chain, *cl, *ln; > > - if (p->upstream_eof || p->upstream_error || p->upstream_done) { > + if (p->upstream_eof || p->upstream_error || p->upstream_done > + || p->upstream == NULL) > + { > return NGX_OK; > } > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c > +++ b/src/http/ngx_http_upstream.c > @@ -4561,6 +4561,10 @@ ngx_http_upstream_finalize_request(ngx_h > > u->peer.connection = NULL; > > + if (u->pipe) { > + u->pipe->upstream = NULL; > + } > + > if (u->pipe && u->pipe->temp_file) { > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, > "http upstream temp fd: %d", > Indeed, this fix looks more isolated, I like it. -- Sergey Kandaurov From pluknet at nginx.com Mon Jan 29 13:23:15 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 29 Jan 2024 17:23:15 +0400 Subject: nginx-tests SSL tests failing out of the box? In-Reply-To: References: <20240126172958.qqcmvtqo7rws7iwe@Y9MQ9X2QVV> Message-ID: > On 29 Jan 2024, at 07:24, Maxim Dounin wrote: > > Hello! > > On Sat, Jan 27, 2024 at 07:19:45AM +0300, Maxim Dounin wrote: > >> Hello! >> >> On Fri, Jan 26, 2024 at 09:29:58PM +0400, Sergey Kandaurov wrote: >> >>> On Thu, Jan 25, 2024 at 11:38:57PM +0300, Maxim Dounin wrote: >>>> Hello! >>>> >>>> On Thu, Jan 25, 2024 at 06:59:36PM +0000, Mayerhofer, Austin via nginx-devel wrote: >>>> >>>>> Hi all, >>>>> >>>>> I have not made any changes to NGINX. Vanilla NGINX (./configure with no flags) passes all tests that run, but when compiling with SSL, not all SSL tests are passing. Is this expected, or do I need to configure nginx further aside from adding the --with-http_ssl_module flag? Do each of the failing tests below require separate fixes, or is there a one-size-fits-all solution for all of them? >>>>> >>>>> OS: MacOS 12.6.3 >>>>> Chip: Apple M1 Max >>>>> NGINX: 1.24.0 built from source code with ./configure --with-debug --with-http_ssl_module >>>>> Nginx-tests: https://github.com/nginx/nginx-tests/tree/4c2ad8093952706f327d04887c5546bad91b75a6 >>>>> OpenSSL: 3.2.0 (/opt/homebrew/bin/openssl) >>>>> Perl: 5.30.3 (/usr/bin/perl) >>>>> >>>>> When I run >>>>> >>>>> ``` >>>>> TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v ssl.t >>>>> ``` >>>>> >>>>> I see >>>>> >>>>> ``` >>>>> not ok 2 - session reused >>>>> >>>>> # Failed test 'session reused' >>>>> # at ssl.t line 187. >>>>> # 'HTTP/1.1 200 OK >>>>> # Server: nginx/1.24.0 >>>>> # Date: Thu, 25 Jan 2024 18:50:10 GMT >>>>> # Content-Type: text/plain >>>>> # Content-Length: 6 >>>>> # Connection: close >>>>> # >>>>> # body .' >>>>> # doesn't match '(?^m:^body r$)' >>>>> ``` >>>> >>>> [...] >>>> >>>> It looks like SSL session reuse is broken in Perl you are >>>> using. This might be the case if, for example, Net::SSLeay in >>>> your installation was compiled with system LibreSSL as an SSL >>>> library - at least on the server side LibreSSL simply does not >>>> support session reuse with TLSv1.3. >>>> >>>> Test suite checks if nginx was compiled with LibreSSL and marks >>>> appropriate tests as TODO, but if the Perl module is broken >>>> instead, the test will fail. >>>> >>> >>> Well, technically, we could test this and skip appropriately: >>> >>> diff --git a/ssl_session_reuse.t b/ssl_session_reuse.t >>> --- a/ssl_session_reuse.t >>> +++ b/ssl_session_reuse.t >>> @@ -166,7 +166,9 @@ local $TODO = 'no TLSv1.3 sessions, old >>> local $TODO = 'no TLSv1.3 sessions, old IO::Socket::SSL' >>> if $IO::Socket::SSL::VERSION < 2.061 && test_tls13(); >>> local $TODO = 'no TLSv1.3 sessions in LibreSSL' >>> - if $t->has_module('LibreSSL') && test_tls13(); >>> + if ($t->has_module('LibreSSL') >>> + || Net::SSLeay::constant("LIBRESSL_VERSION_NUMBER")) >>> + && test_tls13(); >>> >>> is(test_reuse(8443), 1, 'tickets reused'); >>> is(test_reuse(8444), 1, 'tickets and cache reused'); >>> >>> But I see little to no purpose: if the testing tool is broken >>> in various unexpected ways (another example is X509_V_ERR_INVALID_PURPOSE >>> in peer certificate verification as reported in the adjacent thread), >>> I think we barely can handle this in general. >> >> I generally agree. >> >> Still, the X509_V_ERR_INVALID_PURPOSE seems to be an OpenSSL >> 3.2.0-related issue: for tests using CA root certificates without >> CA:TRUE it now generates X509_V_ERR_INVALID_CA on the root >> certificate, which then changed to X509_V_ERR_INVALID_PURPOSE. >> >> Given the list of incompatible changes from NEWS.md, and the fact >> that the same tests work fine with OpenSSL 3.2.0 but with >> "openssl" binary from older versions, it seems to be this: >> >> * The `x509`, `ca`, and `req` apps now always produce X.509v3 certificates. >> >> This needs to be addressed. > > Patch: > > # HG changeset patch > # User Maxim Dounin > # Date 1706477656 -10800 > # Mon Jan 29 00:34:16 2024 +0300 > # Node ID 156665421f83a054cf331e8f9a27dd4d2f86114d > # Parent 27a79d3a8658794d7c0f8c246bcd92a9861da468 > Tests: compatibility with "openssl" app from OpenSSL 3.2.0. > > OpenSSL 3.2.0's "openssl" app generates X.509v3 certificates unless explicitly > asked not to. Such certificates, even self-signed ones, cannot be used to sign > other certificates without CA:TRUE explicitly set in the basicConstraints > extension. As a result, tests doing so are now failing. > > Fix is to provide basicConstraints with CA:TRUE for self-signed root > certificates used in "openssl ca" calls. > Looks good. [..] -- Sergey Kandaurov From arut at nginx.com Mon Jan 29 14:07:58 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 29 Jan 2024 18:07:58 +0400 Subject: [PATCH 1 of 4] Fixed request termination with AIO and subrequests (ticket #2555) In-Reply-To: References: <20240126120230.45y2unpnlbzll4ru@N00W24XTQX> Message-ID: <20240129140758.3dnvlrjzypfxokxm@N00W24XTQX> Hi, On Mon, Jan 29, 2024 at 10:58:09AM +0300, Maxim Dounin wrote: > Hello! > > On Fri, Jan 26, 2024 at 04:02:30PM +0400, Roman Arutyunyan wrote: > > > On Mon, Nov 27, 2023 at 05:50:24AM +0300, Maxim Dounin wrote: > > > # HG changeset patch > > > # User Maxim Dounin > > > # Date 1701049682 -10800 > > > # Mon Nov 27 04:48:02 2023 +0300 > > > # Node ID a5e39e9d1f4c84dcbe6a2f9e079372a3d63aef0b > > > # Parent f366007dd23a6ce8e8427c1b3042781b618a2ade > > > Fixed request termination with AIO and subrequests (ticket #2555). > > > > > > When a request was terminated due to an error via ngx_http_terminate_request() > > > while an AIO operation was running in a subrequest, various issues were > > > observed. This happened because ngx_http_request_finalizer() was only set > > > in the subrequest where ngx_http_terminate_request() was called, but not > > > in the subrequest where the AIO operation was running. After completion > > > of the AIO operation resumed normal processing of the subrequest, leading > > > to issues. > > > > Something is wrong with the last sentence. > > Thanks, rewritten as: > > ... After completion > of the AIO operation normal processing of the subrequest was resumed, leading > to issues. > > > > In particular, in case of the upstream module, termination of the request > > > called upstream cleanup, which closed the upstream connection. Attempts to > > > further work with the upstream connection after AIO operation completion > > > resulted in segfaults in ngx_ssl_recv(), "readv() failed (9: Bad file > > > descriptor) while reading upstream" errors, or socket leaks. > > > > Can you elaborate on socket leaks? > > For example, consider a request which is waiting for additional > data from the upstream, so the only timer is the read timer for > the upstream connection, and which is terminated because the > client closed the connection. Request termination will remove the > only timer. Still, the client connection is not yet closed by > nginx. So as long as the request is not actually freed following > completion of the AIO operation, this is a socket leak: we have no > timers left, and no further events expected. And this can easily > happen if neither segfault nor readv() error was triggered (for > example, if p->upstream->read->ready was not set during AIO > operation completion). > > > > In ticket #2555, issues were observed with the following configuration > > > with cache background update (with thread writing instrumented to > > > introduce a delay, when a client closes the connection during an update): > > > > > > location = /background-and-aio-write { > > > proxy_pass ... > > > proxy_cache one; > > > proxy_cache_valid 200 1s; > > > proxy_cache_background_update on; > > > proxy_cache_use_stale updating; > > > aio threads; > > > aio_write on; > > > limit_rate 1000; > > > } > > > > > > Similarly, the same issue can be seen with SSI, and can be caused by > > > errors in subrequests, such as in the following configuration > > > (were "/proxy" uses AIO, and "/sleep" returns 444 after some delay, > > > > s/were/where/ ? > > Fixed, thanks. > > > > causing request termination): > > > > > > location = /ssi-active-boom { > > > ssi on; > > > ssi_types *; > > > return 200 ' > > > > > > > > > '; > > > limit_rate 1000; > > > } > > > > > > Or the same with both AIO operation and the error in non-active subrequests > > > (which needs slightly different handling, see below): > > > > > > location = /ssi-non-active-boom { > > > ssi on; > > > ssi_types *; > > > return 200 ' > > > > > > > > > > > > '; > > > limit_rate 1000; > > > } > > > > > > Similarly, issues can be observed with just static files. However, > > > with static files potential impact is limited due to timeout safeguards > > > in ngx_http_writer(), and the fact that c->error is set during request > > > termination. > > > In a simple configuration with an AIO operation in the active subrequest, > > > such as in the following configuration, the connection is closed right > > > after completion of the AIO operation anyway, since ngx_http_writer() > > > tries to write to the connection and fails due to c->error set: > > > > > > location = /ssi-active-static-boom { > > > ssi on; > > > ssi_types *; > > > return 200 ' > > > > > > > > > '; > > > limit_rate 1000; > > > } > > > > > > In the following configuration, with an AIO operation in a non-active > > > subrequest, the connection is closed only after send_timeout expires: > > > > > > location = /ssi-non-active-static-boom { > > > ssi on; > > > ssi_types *; > > > return 200 ' > > > > > > > > > > > > '; > > > limit_rate 1000; > > > } > > > > > > Fix is to introduce r->main->terminated flag, which is to be checked > > > by AIO event handlers when the r->main->blocked counter is decremented. > > > When the flag is set, handlers are expected to wake up the connection > > > instead of the subrequest (which might be already cleaned up). > > > > > > Additionally, now ngx_http_request_finalizer() is always set in the > > > active subrequest, so waking up the connection properly finalizes the > > > request even if termination happened in a non-active subrequest. > > > > The issue does not seem to be significant for static file. In fact, the > > biggest problem is trying to use a resource after it was freed by an > > ngx_http_cleanup_add()-registered handler, as opposed to ngx_pool_cleanup_add() > > handlers which are safer, but serve a slightly different purpose. > > > > As for non-ngx_http_cleanup_add() related code (like static files), the effect > > of the issue is just a possible delay of the connection closure until output is > > produced, in which case typically ngx_http_write_filter() triggers the closure. > > So the patch basically fixes a time delay (usually limited by a timeout). > > IMO there's no need to go in so many details about that. > > The issue with static files demonstrates that the issue goes > beyond use of cleanup handlers. As such, just skipping cleanup > handlers with mr->blocked is wrong (see below). > > Also, it shows that the safeguard timeouts we use in > ngx_http_writer() are actually useful, and save us from bigger > problems here. In the past, there were attempts to remove these > timers as long as an AIO operation is running. > > And the details provide ways to reproduce different aspects of the > issue (and were actually used during testing and development of > the patch). > > > > > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > > > --- a/src/http/ngx_http_copy_filter_module.c > > > +++ b/src/http/ngx_http_copy_filter_module.c > > > @@ -195,9 +195,18 @@ ngx_http_copy_aio_event_handler(ngx_even > > > r->main->blocked--; > > > r->aio = 0; > > > > > > - r->write_event_handler(r); > > > + if (r->main->terminated) { > > > + /* > > > + * trigger connection event handler if the request was > > > + * terminated > > > + */ > > > > > > - ngx_http_run_posted_requests(c); > > > + c->write->handler(c->write); > > > + > > > + } else { > > > + r->write_event_handler(r); > > > + ngx_http_run_posted_requests(c); > > > + } > > > } > > > > > > #endif > > > @@ -305,11 +314,11 @@ ngx_http_copy_thread_event_handler(ngx_e > > > > > > #endif > > > > > > - if (r->done) { > > > + if (r->done || r->main->terminated) { > > > /* > > > * trigger connection event handler if the subrequest was > > > - * already finalized; this can happen if the handler is used > > > - * for sendfile() in threads > > > + * already finalized (this can happen if the handler is used > > > + * for sendfile() in threads), or if the request was terminated > > > */ > > > > > > c->write->handler(c->write); > > > diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c > > > --- a/src/http/ngx_http_file_cache.c > > > +++ b/src/http/ngx_http_file_cache.c > > > @@ -14,7 +14,7 @@ > > > static ngx_int_t ngx_http_file_cache_lock(ngx_http_request_t *r, > > > ngx_http_cache_t *c); > > > static void ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev); > > > -static void ngx_http_file_cache_lock_wait(ngx_http_request_t *r, > > > +static ngx_int_t ngx_http_file_cache_lock_wait(ngx_http_request_t *r, > > > ngx_http_cache_t *c); > > > static ngx_int_t ngx_http_file_cache_read(ngx_http_request_t *r, > > > ngx_http_cache_t *c); > > > @@ -463,6 +463,7 @@ ngx_http_file_cache_lock(ngx_http_reques > > > static void > > > ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev) > > > { > > > + ngx_int_t rc; > > > ngx_connection_t *c; > > > ngx_http_request_t *r; > > > > > > @@ -474,13 +475,31 @@ ngx_http_file_cache_lock_wait_handler(ng > > > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > > > "http file cache wait: \"%V?%V\"", &r->uri, &r->args); > > > > > > - ngx_http_file_cache_lock_wait(r, r->cache); > > > - > > > - ngx_http_run_posted_requests(c); > > > + rc = ngx_http_file_cache_lock_wait(r, r->cache); > > > + > > > + if (rc == NGX_AGAIN) { > > > + return; > > > + } > > > + > > > + r->cache->waiting = 0; > > > + r->main->blocked--; > > > + > > > + if (r->main->terminated) { > > > + /* > > > + * trigger connection event handler if the request was > > > + * terminated > > > + */ > > > + > > > + c->write->handler(c->write); > > > + > > > + } else { > > > + r->write_event_handler(r); > > > + ngx_http_run_posted_requests(c); > > > + } > > > } > > > > BTW, cache lock is not a real aio. It's just a regular event timer. > > And it's deleted in ngx_http_file_cache_free() which is called from > > ngx_http_upstream_finalize_request(). So it looks like the "terminated" > > flag will never be 1 here. > > The upstream cleanup handler is installed after checking the > cache (and waiting for the cache lock, if needed). And that's, > basically, why the code uses r->blocked in the first place. > > While the code can be rewritten to not depend on the r->blocked > flag and so checking r->main->terminated won't be needed as well, > this is not currently the case. > > > > > > -static void > > > +static ngx_int_t > > > ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c) > > > { > > > ngx_uint_t wait; > > > @@ -495,7 +514,7 @@ ngx_http_file_cache_lock_wait(ngx_http_r > > > ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, > > > "cache lock timeout"); > > > c->lock_timeout = 0; > > > - goto wakeup; > > > + return NGX_OK; > > > } > > > > > > cache = c->file_cache; > > > @@ -513,14 +532,10 @@ ngx_http_file_cache_lock_wait(ngx_http_r > > > > > > if (wait) { > > > ngx_add_timer(&c->wait_event, (timer > 500) ? 500 : timer); > > > - return; > > > + return NGX_AGAIN; > > > } > > > > > > -wakeup: > > > - > > > - c->waiting = 0; > > > - r->main->blocked--; > > > - r->write_event_handler(r); > > > + return NGX_OK; > > > } > > > > > > > > > @@ -740,9 +755,18 @@ ngx_http_cache_aio_event_handler(ngx_eve > > > r->main->blocked--; > > > r->aio = 0; > > > > > > - r->write_event_handler(r); > > > - > > > - ngx_http_run_posted_requests(c); > > > + if (r->main->terminated) { > > > + /* > > > + * trigger connection event handler if the request was > > > + * terminated > > > + */ > > > + > > > + c->write->handler(c->write); > > > + > > > + } else { > > > + r->write_event_handler(r); > > > + ngx_http_run_posted_requests(c); > > > + } > > > } > > > > > > #endif > > > @@ -810,9 +834,18 @@ ngx_http_cache_thread_event_handler(ngx_ > > > r->main->blocked--; > > > r->aio = 0; > > > > > > - r->write_event_handler(r); > > > - > > > - ngx_http_run_posted_requests(c); > > > + if (r->main->terminated) { > > > + /* > > > + * trigger connection event handler if the request was > > > + * terminated > > > + */ > > > + > > > + c->write->handler(c->write); > > > + > > > + } else { > > > + r->write_event_handler(r); > > > + ngx_http_run_posted_requests(c); > > > + } > > > } > > > > > > #endif > > > diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c > > > --- a/src/http/ngx_http_request.c > > > +++ b/src/http/ngx_http_request.c > > > @@ -2681,6 +2681,8 @@ ngx_http_terminate_request(ngx_http_requ > > > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, > > > "http terminate request count:%d", mr->count); > > > > > > + mr->terminated = 1; > > > > Another solution could be skipping the cleanup handlers below if mr->blocked > > is set. This would fix the crash, but would not fix the delay though. > > Exactly. While skipping cleanup handlers will fix a particular > crash in the upstream module, the same issue observed with static > files clearly demonstrates that this is a wrong approach. > > Further, skipping cleanup handlers means that we will continue > doing unneeded (and potentially dangerous, given that request > termination can be due to a fatal error, such as a memory > allocation error) work we can otherwise cancel early by calling > the cleanup handler. > > > > > > if (rc > 0 && (mr->headers_out.status == 0 || mr->connection->sent == 0)) { > > > mr->headers_out.status = rc; > > > } > > > @@ -2703,8 +2705,13 @@ ngx_http_terminate_request(ngx_http_requ > > > if (mr->write_event_handler) { > > > > > > if (mr->blocked) { > > > + if (r != r->connection->data) { > > > + r = r->connection->data; > > > + } > > > > Why not simply r = r->connection->data. Or maybe a new variable > > ar (active request) similar to mr (main request) would make sense. > > When writing this, I've decided that using conditional assignment > better explains the idea that we need the active subrequest, and > therefore switch to it if "r" is not active. Otherwise, just > assignment is equivalent. > > Changed to just "r = r->connection->data;", as I have no strong > preference here (and we already do "r = r->main;" in several > places). > > The new variable looks like an overkill though. > > > > > > + > > > r->connection->error = 1; > > > r->write_event_handler = ngx_http_request_finalizer; > > > + > > > return; > > > } > > > > > > diff --git a/src/http/ngx_http_request.h b/src/http/ngx_http_request.h > > > --- a/src/http/ngx_http_request.h > > > +++ b/src/http/ngx_http_request.h > > > @@ -550,6 +550,7 @@ struct ngx_http_request_s { > > > unsigned root_tested:1; > > > unsigned done:1; > > > unsigned logged:1; > > > + unsigned terminated:1; > > > > > > unsigned buffered:4; > > > > > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > > > --- a/src/http/ngx_http_upstream.c > > > +++ b/src/http/ngx_http_upstream.c > > > @@ -3984,11 +3984,11 @@ ngx_http_upstream_thread_event_handler(n > > > > > > #endif > > > > > > - if (r->done) { > > > + if (r->done || r->main->terminated) { > > > /* > > > * trigger connection event handler if the subrequest was > > > - * already finalized; this can happen if the handler is used > > > - * for sendfile() in threads > > > + * already finalized (this can happen if the handler is used > > > + * for sendfile() in threads), or if the request was terminated > > > */ > > > > > > c->write->handler(c->write); > > > > The patch is generally ok. > > Just in case, here is the updated patch: > > # HG changeset patch > # User Maxim Dounin > # Date 1706513520 -10800 > # Mon Jan 29 10:32:00 2024 +0300 > # Node ID 35bfb011f69bb97cf853b379fbdcfd5052d0e3ed > # Parent e88cdaa0f1ffc9af3144770c72ee5baf07b2562e > Fixed request termination with AIO and subrequests (ticket #2555). > > When a request was terminated due to an error via ngx_http_terminate_request() > while an AIO operation was running in a subrequest, various issues were > observed. This happened because ngx_http_request_finalizer() was only set > in the subrequest where ngx_http_terminate_request() was called, but not > in the subrequest where the AIO operation was running. After completion > of the AIO operation normal processing of the subrequest was resumed, leading > to issues. > > In particular, in case of the upstream module, termination of the request > called upstream cleanup, which closed the upstream connection. Attempts to > further work with the upstream connection after AIO operation completion > resulted in segfaults in ngx_ssl_recv(), "readv() failed (9: Bad file > descriptor) while reading upstream" errors, or socket leaks. > > In ticket #2555, issues were observed with the following configuration > with cache background update (with thread writing instrumented to > introduce a delay, when a client closes the connection during an update): > > location = /background-and-aio-write { > proxy_pass ... > proxy_cache one; > proxy_cache_valid 200 1s; > proxy_cache_background_update on; > proxy_cache_use_stale updating; > aio threads; > aio_write on; > limit_rate 1000; > } > > Similarly, the same issue can be seen with SSI, and can be caused by > errors in subrequests, such as in the following configuration > (where "/proxy" uses AIO, and "/sleep" returns 444 after some delay, > causing request termination): > > location = /ssi-active-boom { > ssi on; > ssi_types *; > return 200 ' > > > '; > limit_rate 1000; > } > > Or the same with both AIO operation and the error in non-active subrequests > (which needs slightly different handling, see below): > > location = /ssi-non-active-boom { > ssi on; > ssi_types *; > return 200 ' > > > > '; > limit_rate 1000; > } > > Similarly, issues can be observed with just static files. However, > with static files potential impact is limited due to timeout safeguards > in ngx_http_writer(), and the fact that c->error is set during request > termination. > > In a simple configuration with an AIO operation in the active subrequest, > such as in the following configuration, the connection is closed right > after completion of the AIO operation anyway, since ngx_http_writer() > tries to write to the connection and fails due to c->error set: > > location = /ssi-active-static-boom { > ssi on; > ssi_types *; > return 200 ' > > > '; > limit_rate 1000; > } > > In the following configuration, with an AIO operation in a non-active > subrequest, the connection is closed only after send_timeout expires: > > location = /ssi-non-active-static-boom { > ssi on; > ssi_types *; > return 200 ' > > > > '; > limit_rate 1000; > } > > Fix is to introduce r->main->terminated flag, which is to be checked > by AIO event handlers when the r->main->blocked counter is decremented. > When the flag is set, handlers are expected to wake up the connection > instead of the subrequest (which might be already cleaned up). > > Additionally, now ngx_http_request_finalizer() is always set in the > active subrequest, so waking up the connection properly finalizes the > request even if termination happened in a non-active subrequest. > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > --- a/src/http/ngx_http_copy_filter_module.c > +++ b/src/http/ngx_http_copy_filter_module.c > @@ -208,9 +208,18 @@ ngx_http_copy_aio_event_handler(ngx_even > r->main->blocked--; > r->aio = 0; > > - r->write_event_handler(r); > + if (r->main->terminated) { > + /* > + * trigger connection event handler if the request was > + * terminated > + */ > > - ngx_http_run_posted_requests(c); > + c->write->handler(c->write); > + > + } else { > + r->write_event_handler(r); > + ngx_http_run_posted_requests(c); > + } > } > > #endif > @@ -331,11 +340,11 @@ ngx_http_copy_thread_event_handler(ngx_e > > #endif > > - if (r->done) { > + if (r->done || r->main->terminated) { > /* > * trigger connection event handler if the subrequest was > - * already finalized; this can happen if the handler is used > - * for sendfile() in threads > + * already finalized (this can happen if the handler is used > + * for sendfile() in threads), or if the request was terminated > */ > > c->write->handler(c->write); > diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c > --- a/src/http/ngx_http_file_cache.c > +++ b/src/http/ngx_http_file_cache.c > @@ -14,7 +14,7 @@ > static ngx_int_t ngx_http_file_cache_lock(ngx_http_request_t *r, > ngx_http_cache_t *c); > static void ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev); > -static void ngx_http_file_cache_lock_wait(ngx_http_request_t *r, > +static ngx_int_t ngx_http_file_cache_lock_wait(ngx_http_request_t *r, > ngx_http_cache_t *c); > static ngx_int_t ngx_http_file_cache_read(ngx_http_request_t *r, > ngx_http_cache_t *c); > @@ -463,6 +463,7 @@ ngx_http_file_cache_lock(ngx_http_reques > static void > ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev) > { > + ngx_int_t rc; > ngx_connection_t *c; > ngx_http_request_t *r; > > @@ -474,13 +475,31 @@ ngx_http_file_cache_lock_wait_handler(ng > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > "http file cache wait: \"%V?%V\"", &r->uri, &r->args); > > - ngx_http_file_cache_lock_wait(r, r->cache); > - > - ngx_http_run_posted_requests(c); > + rc = ngx_http_file_cache_lock_wait(r, r->cache); > + > + if (rc == NGX_AGAIN) { > + return; > + } > + > + r->cache->waiting = 0; > + r->main->blocked--; > + > + if (r->main->terminated) { > + /* > + * trigger connection event handler if the request was > + * terminated > + */ > + > + c->write->handler(c->write); > + > + } else { > + r->write_event_handler(r); > + ngx_http_run_posted_requests(c); > + } > } > > > -static void > +static ngx_int_t > ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c) > { > ngx_uint_t wait; > @@ -495,7 +514,7 @@ ngx_http_file_cache_lock_wait(ngx_http_r > ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, > "cache lock timeout"); > c->lock_timeout = 0; > - goto wakeup; > + return NGX_OK; > } > > cache = c->file_cache; > @@ -513,14 +532,10 @@ ngx_http_file_cache_lock_wait(ngx_http_r > > if (wait) { > ngx_add_timer(&c->wait_event, (timer > 500) ? 500 : timer); > - return; > + return NGX_AGAIN; > } > > -wakeup: > - > - c->waiting = 0; > - r->main->blocked--; > - r->write_event_handler(r); > + return NGX_OK; > } > > > @@ -753,9 +768,18 @@ ngx_http_cache_aio_event_handler(ngx_eve > r->main->blocked--; > r->aio = 0; > > - r->write_event_handler(r); > - > - ngx_http_run_posted_requests(c); > + if (r->main->terminated) { > + /* > + * trigger connection event handler if the request was > + * terminated > + */ > + > + c->write->handler(c->write); > + > + } else { > + r->write_event_handler(r); > + ngx_http_run_posted_requests(c); > + } > } > > #endif > @@ -836,9 +860,18 @@ ngx_http_cache_thread_event_handler(ngx_ > r->main->blocked--; > r->aio = 0; > > - r->write_event_handler(r); > - > - ngx_http_run_posted_requests(c); > + if (r->main->terminated) { > + /* > + * trigger connection event handler if the request was > + * terminated > + */ > + > + c->write->handler(c->write); > + > + } else { > + r->write_event_handler(r); > + ngx_http_run_posted_requests(c); > + } > } > > #endif > diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c > --- a/src/http/ngx_http_request.c > +++ b/src/http/ngx_http_request.c > @@ -2694,6 +2694,8 @@ ngx_http_terminate_request(ngx_http_requ > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, > "http terminate request count:%d", mr->count); > > + mr->terminated = 1; > + > if (rc > 0 && (mr->headers_out.status == 0 || mr->connection->sent == 0)) { > mr->headers_out.status = rc; > } > @@ -2716,8 +2718,11 @@ ngx_http_terminate_request(ngx_http_requ > if (mr->write_event_handler) { > > if (mr->blocked) { > + r = r->connection->data; > + > r->connection->error = 1; > r->write_event_handler = ngx_http_request_finalizer; > + > return; > } > > diff --git a/src/http/ngx_http_request.h b/src/http/ngx_http_request.h > --- a/src/http/ngx_http_request.h > +++ b/src/http/ngx_http_request.h > @@ -550,6 +550,7 @@ struct ngx_http_request_s { > unsigned root_tested:1; > unsigned done:1; > unsigned logged:1; > + unsigned terminated:1; > > unsigned buffered:4; > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c > +++ b/src/http/ngx_http_upstream.c > @@ -3997,11 +3997,11 @@ ngx_http_upstream_thread_event_handler(n > > #endif > > - if (r->done) { > + if (r->done || r->main->terminated) { > /* > * trigger connection event handler if the subrequest was > - * already finalized; this can happen if the handler is used > - * for sendfile() in threads > + * already finalized (this can happen if the handler is used > + * for sendfile() in threads), or if the request was terminated > */ > > c->write->handler(c->write); > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel Looks ok From v.zhestikov at f5.com Mon Jan 29 16:59:30 2024 From: v.zhestikov at f5.com (=?utf-8?q?Vadim_Zhestikov?=) Date: Mon, 29 Jan 2024 16:59:30 +0000 Subject: [njs] Fixed parsing of for-statement. Message-ID: details: https://hg.nginx.org/njs/rev/08d7391230be branches: changeset: 2274:08d7391230be user: Vadim Zhestikov date: Mon Jan 29 08:57:48 2024 -0800 description: Fixed parsing of for-statement. diffstat: src/njs_parser.c | 37 +++++-------------------------------- src/test/njs_unit_test.c | 3 +++ 2 files changed, 8 insertions(+), 32 deletions(-) diffs (81 lines): diff -r 6770c015efdc -r 08d7391230be src/njs_parser.c --- a/src/njs_parser.c Thu Jan 25 14:48:05 2024 -0800 +++ b/src/njs_parser.c Mon Jan 29 08:57:48 2024 -0800 @@ -5491,23 +5491,9 @@ njs_parser_iteration_statement_for(njs_p static njs_int_t -njs_parser_for_var_in_of_expression_chk_fail(njs_parser_t *parser, - njs_lexer_token_t *token, njs_queue_link_t *current) -{ - if (parser->ret != NJS_OK) { - return njs_parser_failed(parser); - } - - return njs_parser_for_var_in_of_expression(parser, token, current); -} - - -static njs_int_t njs_parser_for_expression_map_reparse(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { - njs_str_t *text; - if (parser->ret != NJS_OK && parser->node != NULL) { return njs_parser_failed(parser); } @@ -5517,20 +5503,7 @@ njs_parser_for_expression_map_reparse(nj njs_parser_next(parser, njs_parser_expression); - /* - * Here we pass not a node, but a token, this is important. - * This is necessary for correct error output. - */ - - text = njs_mp_alloc(parser->vm->mem_pool, sizeof(njs_str_t)); - if (text == NULL) { - return NJS_ERROR; - } - - *text = token->text; - - return njs_parser_after(parser, current, text, 0, - njs_parser_for_var_in_of_expression_chk_fail); + return NJS_OK; } return njs_parser_stack_pop(parser); @@ -5793,14 +5766,14 @@ njs_parser_iteration_statement_for_map(n *text = token->text; - ret = njs_parser_after(parser, current, text, 1, - njs_parser_for_expression_map_continue); + ret = njs_parser_after(parser, current, text, 0, + njs_parser_for_expression_map_reparse); if (ret != NJS_OK) { return NJS_ERROR; } - return njs_parser_after(parser, current, text, 0, - njs_parser_for_expression_map_reparse); + return njs_parser_after(parser, current, text, 1, + njs_parser_for_expression_map_continue); } expression_after: diff -r 6770c015efdc -r 08d7391230be src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Thu Jan 25 14:48:05 2024 -0800 +++ b/src/test/njs_unit_test.c Mon Jan 29 08:57:48 2024 -0800 @@ -2975,6 +2975,9 @@ static njs_unit_test_t njs_test[] = { njs_str("for(var``>0; 0 ;) ;"), njs_str("SyntaxError: Unexpected token \"`\" in 1") }, + { njs_str("for(1;;)for(-x;;)fr({-x;;)f"), + njs_str("SyntaxError: Unexpected token \"-\" in 1") }, + { njs_str("for(i;;)for(-new+3;;)break;"), njs_str("SyntaxError: Unexpected token \"+\" in 1") }, From v.zhestikov at f5.com Mon Jan 29 16:59:32 2024 From: v.zhestikov at f5.com (=?utf-8?q?Vadim_Zhestikov?=) Date: Mon, 29 Jan 2024 16:59:32 +0000 Subject: [njs] Fixed parsing of hex, oct and binary literals with no digits. Message-ID: details: https://hg.nginx.org/njs/rev/a2959e490279 branches: changeset: 2275:a2959e490279 user: Vadim Zhestikov date: Mon Jan 29 08:57:49 2024 -0800 description: Fixed parsing of hex, oct and binary literals with no digits. diffstat: src/njs_lexer.c | 6 +++--- src/test/njs_unit_test.c | 15 ++++++++++++--- 2 files changed, 15 insertions(+), 6 deletions(-) diffs (69 lines): diff -r 08d7391230be -r a2959e490279 src/njs_lexer.c --- a/src/njs_lexer.c Mon Jan 29 08:57:48 2024 -0800 +++ b/src/njs_lexer.c Mon Jan 29 08:57:49 2024 -0800 @@ -915,7 +915,7 @@ njs_lexer_number(njs_lexer_t *lexer, njs if (*p == 'x' || *p == 'X') { p++; - if (p == lexer->end) { + if (p == lexer->end || njs_char_to_hex(*p) < 0) { goto illegal_token; } @@ -929,7 +929,7 @@ njs_lexer_number(njs_lexer_t *lexer, njs if (*p == 'o' || *p == 'O') { p++; - if (p == lexer->end) { + if (p == lexer->end || (u_char)(*p - '0') > 7) { goto illegal_token; } @@ -947,7 +947,7 @@ njs_lexer_number(njs_lexer_t *lexer, njs if (*p == 'b' || *p == 'B') { p++; - if (p == lexer->end) { + if (p == lexer->end || (u_char)(*p - '0') > 1) { goto illegal_token; } diff -r 08d7391230be -r a2959e490279 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Mon Jan 29 08:57:48 2024 -0800 +++ b/src/test/njs_unit_test.c Mon Jan 29 08:57:49 2024 -0800 @@ -353,7 +353,10 @@ static njs_unit_test_t njs_test[] = njs_str("SyntaxError: Unexpected token \"0O778\" in 1") }, { njs_str("0O_7"), - njs_str("SyntaxError: Unexpected token \"_7\" in 1") }, + njs_str("SyntaxError: Unexpected token \"0O\" in 1") }, + + { njs_str("0O + 1"), + njs_str("SyntaxError: Unexpected token \"0O\" in 1") }, { njs_str("0o7_"), njs_str("SyntaxError: Unexpected token \"_\" in 1") }, @@ -408,7 +411,10 @@ static njs_unit_test_t njs_test[] = njs_str("SyntaxError: Unexpected token \"0B12\" in 1") }, { njs_str("0b_11"), - njs_str("SyntaxError: Unexpected token \"_11\" in 1") }, + njs_str("SyntaxError: Unexpected token \"0b\" in 1") }, + + { njs_str("0b + 1"), + njs_str("SyntaxError: Unexpected token \"0b\" in 1") }, { njs_str("0B1__1"), njs_str("SyntaxError: Unexpected token \"__1\" in 1") }, @@ -446,7 +452,10 @@ static njs_unit_test_t njs_test[] = njs_str("SyntaxError: Unexpected token \"g\" in 1") }, { njs_str("0X_ff"), - njs_str("SyntaxError: Unexpected token \"_ff\" in 1") }, + njs_str("SyntaxError: Unexpected token \"0X\" in 1") }, + + { njs_str("0X + 1"), + njs_str("SyntaxError: Unexpected token \"0X\" in 1") }, { njs_str("0xff_"), njs_str("SyntaxError: Unexpected token \"_\" in 1") }, From mdounin at mdounin.ru Tue Jan 30 00:18:41 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 30 Jan 2024 03:18:41 +0300 Subject: nginx-tests SSL tests failing out of the box? In-Reply-To: References: <20240126172958.qqcmvtqo7rws7iwe@Y9MQ9X2QVV> Message-ID: Hello! On Mon, Jan 29, 2024 at 05:23:15PM +0400, Sergey Kandaurov wrote: > > On 29 Jan 2024, at 07:24, Maxim Dounin wrote: > > > > Hello! > > > > On Sat, Jan 27, 2024 at 07:19:45AM +0300, Maxim Dounin wrote: > > > >> Hello! > >> > >> On Fri, Jan 26, 2024 at 09:29:58PM +0400, Sergey Kandaurov wrote: > >> > >>> On Thu, Jan 25, 2024 at 11:38:57PM +0300, Maxim Dounin wrote: > >>>> Hello! > >>>> > >>>> On Thu, Jan 25, 2024 at 06:59:36PM +0000, Mayerhofer, Austin via nginx-devel wrote: > >>>> > >>>>> Hi all, > >>>>> > >>>>> I have not made any changes to NGINX. Vanilla NGINX (./configure with no flags) passes all tests that run, but when compiling with SSL, not all SSL tests are passing. Is this expected, or do I need to configure nginx further aside from adding the --with-http_ssl_module flag? Do each of the failing tests below require separate fixes, or is there a one-size-fits-all solution for all of them? > >>>>> > >>>>> OS: MacOS 12.6.3 > >>>>> Chip: Apple M1 Max > >>>>> NGINX: 1.24.0 built from source code with ./configure --with-debug --with-http_ssl_module > >>>>> Nginx-tests: https://github.com/nginx/nginx-tests/tree/4c2ad8093952706f327d04887c5546bad91b75a6 > >>>>> OpenSSL: 3.2.0 (/opt/homebrew/bin/openssl) > >>>>> Perl: 5.30.3 (/usr/bin/perl) > >>>>> > >>>>> When I run > >>>>> > >>>>> ``` > >>>>> TEST_NGINX_BINARY=/usr/local/nginx/sbin/nginx prove -v ssl.t > >>>>> ``` > >>>>> > >>>>> I see > >>>>> > >>>>> ``` > >>>>> not ok 2 - session reused > >>>>> > >>>>> # Failed test 'session reused' > >>>>> # at ssl.t line 187. > >>>>> # 'HTTP/1.1 200 OK > >>>>> # Server: nginx/1.24.0 > >>>>> # Date: Thu, 25 Jan 2024 18:50:10 GMT > >>>>> # Content-Type: text/plain > >>>>> # Content-Length: 6 > >>>>> # Connection: close > >>>>> # > >>>>> # body .' > >>>>> # doesn't match '(?^m:^body r$)' > >>>>> ``` > >>>> > >>>> [...] > >>>> > >>>> It looks like SSL session reuse is broken in Perl you are > >>>> using. This might be the case if, for example, Net::SSLeay in > >>>> your installation was compiled with system LibreSSL as an SSL > >>>> library - at least on the server side LibreSSL simply does not > >>>> support session reuse with TLSv1.3. > >>>> > >>>> Test suite checks if nginx was compiled with LibreSSL and marks > >>>> appropriate tests as TODO, but if the Perl module is broken > >>>> instead, the test will fail. > >>>> > >>> > >>> Well, technically, we could test this and skip appropriately: > >>> > >>> diff --git a/ssl_session_reuse.t b/ssl_session_reuse.t > >>> --- a/ssl_session_reuse.t > >>> +++ b/ssl_session_reuse.t > >>> @@ -166,7 +166,9 @@ local $TODO = 'no TLSv1.3 sessions, old > >>> local $TODO = 'no TLSv1.3 sessions, old IO::Socket::SSL' > >>> if $IO::Socket::SSL::VERSION < 2.061 && test_tls13(); > >>> local $TODO = 'no TLSv1.3 sessions in LibreSSL' > >>> - if $t->has_module('LibreSSL') && test_tls13(); > >>> + if ($t->has_module('LibreSSL') > >>> + || Net::SSLeay::constant("LIBRESSL_VERSION_NUMBER")) > >>> + && test_tls13(); > >>> > >>> is(test_reuse(8443), 1, 'tickets reused'); > >>> is(test_reuse(8444), 1, 'tickets and cache reused'); > >>> > >>> But I see little to no purpose: if the testing tool is broken > >>> in various unexpected ways (another example is X509_V_ERR_INVALID_PURPOSE > >>> in peer certificate verification as reported in the adjacent thread), > >>> I think we barely can handle this in general. > >> > >> I generally agree. > >> > >> Still, the X509_V_ERR_INVALID_PURPOSE seems to be an OpenSSL > >> 3.2.0-related issue: for tests using CA root certificates without > >> CA:TRUE it now generates X509_V_ERR_INVALID_CA on the root > >> certificate, which then changed to X509_V_ERR_INVALID_PURPOSE. > >> > >> Given the list of incompatible changes from NEWS.md, and the fact > >> that the same tests work fine with OpenSSL 3.2.0 but with > >> "openssl" binary from older versions, it seems to be this: > >> > >> * The `x509`, `ca`, and `req` apps now always produce X.509v3 certificates. > >> > >> This needs to be addressed. > > > > Patch: > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1706477656 -10800 > > # Mon Jan 29 00:34:16 2024 +0300 > > # Node ID 156665421f83a054cf331e8f9a27dd4d2f86114d > > # Parent 27a79d3a8658794d7c0f8c246bcd92a9861da468 > > Tests: compatibility with "openssl" app from OpenSSL 3.2.0. > > > > OpenSSL 3.2.0's "openssl" app generates X.509v3 certificates unless explicitly > > asked not to. Such certificates, even self-signed ones, cannot be used to sign > > other certificates without CA:TRUE explicitly set in the basicConstraints > > extension. As a result, tests doing so are now failing. > > > > Fix is to provide basicConstraints with CA:TRUE for self-signed root > > certificates used in "openssl ca" calls. > > > > Looks good. Pushed, thanks for looking. -- Maxim Dounin http://mdounin.ru/ From xeioex at nginx.com Tue Jan 30 01:17:26 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 30 Jan 2024 01:17:26 +0000 Subject: [njs] Test262: handling the mkdir issue with a host file system. Message-ID: details: https://hg.nginx.org/njs/rev/9e2a757cb33e branches: changeset: 2276:9e2a757cb33e user: Dmitry Volyntsev date: Mon Jan 29 17:16:01 2024 -0800 description: Test262: handling the mkdir issue with a host file system. diffstat: test/fs/promises_05.t.js | 13 ++++++++++++- 1 files changed, 12 insertions(+), 1 deletions(-) diffs (23 lines): diff -r a2959e490279 -r 9e2a757cb33e test/fs/promises_05.t.js --- a/test/fs/promises_05.t.js Mon Jan 29 08:57:49 2024 -0800 +++ b/test/fs/promises_05.t.js Mon Jan 29 17:16:01 2024 -0800 @@ -49,7 +49,18 @@ var testSync = () => new Promise((resolv try { fs.writeFileSync(fname(dname_utf8), fname(dname_utf8)); - throw new Error('fs.mkdirSync error 1'); + const mode = fs.statSync(fname(dname_utf8)).mode & 0o777; + + if (mode == 0o555) { + /* + * Some file systems ignore the mode parameter for mkdir. + * For example: a shared folder on a MacOS host mounted + * to a Linux guest via Parallels Desktop. + */ + throw new Error('fs.writeFileSync did not respect mode'); + } + + fs.unlinkSync(fname(dname_utf8)); } catch (e) { if (e.syscall != 'open' || e.code != 'EACCES') { From xeioex at nginx.com Tue Jan 30 01:17:27 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 30 Jan 2024 01:17:27 +0000 Subject: [njs] 2024 year. Message-ID: details: https://hg.nginx.org/njs/rev/478795e3296b branches: changeset: 2277:478795e3296b user: Dmitry Volyntsev date: Mon Jan 29 17:16:07 2024 -0800 description: 2024 year. diffstat: LICENSE | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diffs (16 lines): diff -r 9e2a757cb33e -r 478795e3296b LICENSE --- a/LICENSE Mon Jan 29 17:16:01 2024 -0800 +++ b/LICENSE Mon Jan 29 17:16:07 2024 -0800 @@ -1,9 +1,9 @@ /* - * Copyright (C) 2015-2023 NGINX, Inc. + * Copyright (C) 2015-2024 NGINX, Inc. * Copyright (C) 2015-2021 Igor Sysoev - * Copyright (C) 2017-2023 Dmitry Volyntsev + * Copyright (C) 2017-2024 Dmitry Volyntsev * Copyright (C) 2019-2022 Alexander Borisov - * Copyright (C) 2022-2023 Vadim Zhestikov + * Copyright (C) 2022-2024 Vadim Zhestikov * All rights reserved. * * Redistribution and use in source and binary forms, with or without From xeioex at nginx.com Tue Jan 30 01:17:29 2024 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 30 Jan 2024 01:17:29 +0000 Subject: [njs] Style. Message-ID: details: https://hg.nginx.org/njs/rev/fca50ba4db9d branches: changeset: 2278:fca50ba4db9d user: Dmitry Volyntsev date: Mon Jan 29 17:16:08 2024 -0800 description: Style. diffstat: src/njs_builtin.c | 2 +- src/njs_clang.h | 26 +++++++++++++------------- 2 files changed, 14 insertions(+), 14 deletions(-) diffs (48 lines): diff -r 478795e3296b -r fca50ba4db9d src/njs_builtin.c --- a/src/njs_builtin.c Mon Jan 29 17:16:07 2024 -0800 +++ b/src/njs_builtin.c Mon Jan 29 17:16:08 2024 -0800 @@ -28,7 +28,7 @@ static njs_int_t njs_global_this_prop_ha static njs_arr_t *njs_vm_expression_completions(njs_vm_t *vm, njs_str_t *expression); static njs_arr_t *njs_vm_global_var_completions(njs_vm_t *vm, - njs_str_t *expression); + njs_str_t *expression); static njs_arr_t *njs_object_completions(njs_vm_t *vm, njs_value_t *object, njs_str_t *expression); static njs_int_t njs_env_hash_init(njs_vm_t *vm, njs_lvlhsh_t *hash, diff -r 478795e3296b -r fca50ba4db9d src/njs_clang.h --- a/src/njs_clang.h Mon Jan 29 17:16:07 2024 -0800 +++ b/src/njs_clang.h Mon Jan 29 17:16:08 2024 -0800 @@ -193,19 +193,19 @@ njs_leading_zeros64(uint64_t x) njs_inline NJS_NOSANITIZE("float-cast-overflow") int64_t njs_unsafe_cast_double_to_int64(double num) { - /* - * Casting NaN to integer is undefined behavior, - * but it is fine in some cases where we do additional checks later. - * For example: - * int64_t i64 = njs_unsafe_cast_double_to_int64(num); - * if (i64 == num) { - * // num is integer - * } - * - * We do this as inline function to avoid UndefinedBehaviorSanitizer - * warnings. - */ - return (int64_t) num; + /* + * Casting NaN to integer is undefined behavior, + * but it is fine in some cases where we do additional checks later. + * For example: + * int64_t i64 = njs_unsafe_cast_double_to_int64(num); + * if (i64 == num) { + * // num is integer + * } + * + * We do this as inline function to avoid UndefinedBehaviorSanitizer + * warnings. + */ + return (int64_t) num; } From mdounin at mdounin.ru Tue Jan 30 03:07:19 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 30 Jan 2024 06:07:19 +0300 Subject: [PATCH 1 of 4] Fixed request termination with AIO and subrequests (ticket #2555) In-Reply-To: <20240129140758.3dnvlrjzypfxokxm@N00W24XTQX> References: <20240126120230.45y2unpnlbzll4ru@N00W24XTQX> <20240129140758.3dnvlrjzypfxokxm@N00W24XTQX> Message-ID: Hello! On Mon, Jan 29, 2024 at 06:07:58PM +0400, Roman Arutyunyan wrote: > Hi, > > On Mon, Jan 29, 2024 at 10:58:09AM +0300, Maxim Dounin wrote: > > Hello! > > > > On Fri, Jan 26, 2024 at 04:02:30PM +0400, Roman Arutyunyan wrote: > > > > > On Mon, Nov 27, 2023 at 05:50:24AM +0300, Maxim Dounin wrote: > > > > # HG changeset patch > > > > # User Maxim Dounin > > > > # Date 1701049682 -10800 > > > > # Mon Nov 27 04:48:02 2023 +0300 > > > > # Node ID a5e39e9d1f4c84dcbe6a2f9e079372a3d63aef0b > > > > # Parent f366007dd23a6ce8e8427c1b3042781b618a2ade > > > > Fixed request termination with AIO and subrequests (ticket #2555). > > > > > > > > When a request was terminated due to an error via ngx_http_terminate_request() > > > > while an AIO operation was running in a subrequest, various issues were > > > > observed. This happened because ngx_http_request_finalizer() was only set > > > > in the subrequest where ngx_http_terminate_request() was called, but not > > > > in the subrequest where the AIO operation was running. After completion > > > > of the AIO operation resumed normal processing of the subrequest, leading > > > > to issues. > > > > > > Something is wrong with the last sentence. > > > > Thanks, rewritten as: > > > > ... After completion > > of the AIO operation normal processing of the subrequest was resumed, leading > > to issues. > > > > > > In particular, in case of the upstream module, termination of the request > > > > called upstream cleanup, which closed the upstream connection. Attempts to > > > > further work with the upstream connection after AIO operation completion > > > > resulted in segfaults in ngx_ssl_recv(), "readv() failed (9: Bad file > > > > descriptor) while reading upstream" errors, or socket leaks. > > > > > > Can you elaborate on socket leaks? > > > > For example, consider a request which is waiting for additional > > data from the upstream, so the only timer is the read timer for > > the upstream connection, and which is terminated because the > > client closed the connection. Request termination will remove the > > only timer. Still, the client connection is not yet closed by > > nginx. So as long as the request is not actually freed following > > completion of the AIO operation, this is a socket leak: we have no > > timers left, and no further events expected. And this can easily > > happen if neither segfault nor readv() error was triggered (for > > example, if p->upstream->read->ready was not set during AIO > > operation completion). > > > > > > In ticket #2555, issues were observed with the following configuration > > > > with cache background update (with thread writing instrumented to > > > > introduce a delay, when a client closes the connection during an update): > > > > > > > > location = /background-and-aio-write { > > > > proxy_pass ... > > > > proxy_cache one; > > > > proxy_cache_valid 200 1s; > > > > proxy_cache_background_update on; > > > > proxy_cache_use_stale updating; > > > > aio threads; > > > > aio_write on; > > > > limit_rate 1000; > > > > } > > > > > > > > Similarly, the same issue can be seen with SSI, and can be caused by > > > > errors in subrequests, such as in the following configuration > > > > (were "/proxy" uses AIO, and "/sleep" returns 444 after some delay, > > > > > > s/were/where/ ? > > > > Fixed, thanks. > > > > > > causing request termination): > > > > > > > > location = /ssi-active-boom { > > > > ssi on; > > > > ssi_types *; > > > > return 200 ' > > > > > > > > > > > > '; > > > > limit_rate 1000; > > > > } > > > > > > > > Or the same with both AIO operation and the error in non-active subrequests > > > > (which needs slightly different handling, see below): > > > > > > > > location = /ssi-non-active-boom { > > > > ssi on; > > > > ssi_types *; > > > > return 200 ' > > > > > > > > > > > > > > > > '; > > > > limit_rate 1000; > > > > } > > > > > > > > Similarly, issues can be observed with just static files. However, > > > > with static files potential impact is limited due to timeout safeguards > > > > in ngx_http_writer(), and the fact that c->error is set during request > > > > termination. > > > > In a simple configuration with an AIO operation in the active subrequest, > > > > such as in the following configuration, the connection is closed right > > > > after completion of the AIO operation anyway, since ngx_http_writer() > > > > tries to write to the connection and fails due to c->error set: > > > > > > > > location = /ssi-active-static-boom { > > > > ssi on; > > > > ssi_types *; > > > > return 200 ' > > > > > > > > > > > > '; > > > > limit_rate 1000; > > > > } > > > > > > > > In the following configuration, with an AIO operation in a non-active > > > > subrequest, the connection is closed only after send_timeout expires: > > > > > > > > location = /ssi-non-active-static-boom { > > > > ssi on; > > > > ssi_types *; > > > > return 200 ' > > > > > > > > > > > > > > > > '; > > > > limit_rate 1000; > > > > } > > > > > > > > Fix is to introduce r->main->terminated flag, which is to be checked > > > > by AIO event handlers when the r->main->blocked counter is decremented. > > > > When the flag is set, handlers are expected to wake up the connection > > > > instead of the subrequest (which might be already cleaned up). > > > > > > > > Additionally, now ngx_http_request_finalizer() is always set in the > > > > active subrequest, so waking up the connection properly finalizes the > > > > request even if termination happened in a non-active subrequest. > > > > > > The issue does not seem to be significant for static file. In fact, the > > > biggest problem is trying to use a resource after it was freed by an > > > ngx_http_cleanup_add()-registered handler, as opposed to ngx_pool_cleanup_add() > > > handlers which are safer, but serve a slightly different purpose. > > > > > > As for non-ngx_http_cleanup_add() related code (like static files), the effect > > > of the issue is just a possible delay of the connection closure until output is > > > produced, in which case typically ngx_http_write_filter() triggers the closure. > > > So the patch basically fixes a time delay (usually limited by a timeout). > > > IMO there's no need to go in so many details about that. > > > > The issue with static files demonstrates that the issue goes > > beyond use of cleanup handlers. As such, just skipping cleanup > > handlers with mr->blocked is wrong (see below). > > > > Also, it shows that the safeguard timeouts we use in > > ngx_http_writer() are actually useful, and save us from bigger > > problems here. In the past, there were attempts to remove these > > timers as long as an AIO operation is running. > > > > And the details provide ways to reproduce different aspects of the > > issue (and were actually used during testing and development of > > the patch). > > > > > > > > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > > > > --- a/src/http/ngx_http_copy_filter_module.c > > > > +++ b/src/http/ngx_http_copy_filter_module.c > > > > @@ -195,9 +195,18 @@ ngx_http_copy_aio_event_handler(ngx_even > > > > r->main->blocked--; > > > > r->aio = 0; > > > > > > > > - r->write_event_handler(r); > > > > + if (r->main->terminated) { > > > > + /* > > > > + * trigger connection event handler if the request was > > > > + * terminated > > > > + */ > > > > > > > > - ngx_http_run_posted_requests(c); > > > > + c->write->handler(c->write); > > > > + > > > > + } else { > > > > + r->write_event_handler(r); > > > > + ngx_http_run_posted_requests(c); > > > > + } > > > > } > > > > > > > > #endif > > > > @@ -305,11 +314,11 @@ ngx_http_copy_thread_event_handler(ngx_e > > > > > > > > #endif > > > > > > > > - if (r->done) { > > > > + if (r->done || r->main->terminated) { > > > > /* > > > > * trigger connection event handler if the subrequest was > > > > - * already finalized; this can happen if the handler is used > > > > - * for sendfile() in threads > > > > + * already finalized (this can happen if the handler is used > > > > + * for sendfile() in threads), or if the request was terminated > > > > */ > > > > > > > > c->write->handler(c->write); > > > > diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c > > > > --- a/src/http/ngx_http_file_cache.c > > > > +++ b/src/http/ngx_http_file_cache.c > > > > @@ -14,7 +14,7 @@ > > > > static ngx_int_t ngx_http_file_cache_lock(ngx_http_request_t *r, > > > > ngx_http_cache_t *c); > > > > static void ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev); > > > > -static void ngx_http_file_cache_lock_wait(ngx_http_request_t *r, > > > > +static ngx_int_t ngx_http_file_cache_lock_wait(ngx_http_request_t *r, > > > > ngx_http_cache_t *c); > > > > static ngx_int_t ngx_http_file_cache_read(ngx_http_request_t *r, > > > > ngx_http_cache_t *c); > > > > @@ -463,6 +463,7 @@ ngx_http_file_cache_lock(ngx_http_reques > > > > static void > > > > ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev) > > > > { > > > > + ngx_int_t rc; > > > > ngx_connection_t *c; > > > > ngx_http_request_t *r; > > > > > > > > @@ -474,13 +475,31 @@ ngx_http_file_cache_lock_wait_handler(ng > > > > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > > > > "http file cache wait: \"%V?%V\"", &r->uri, &r->args); > > > > > > > > - ngx_http_file_cache_lock_wait(r, r->cache); > > > > - > > > > - ngx_http_run_posted_requests(c); > > > > + rc = ngx_http_file_cache_lock_wait(r, r->cache); > > > > + > > > > + if (rc == NGX_AGAIN) { > > > > + return; > > > > + } > > > > + > > > > + r->cache->waiting = 0; > > > > + r->main->blocked--; > > > > + > > > > + if (r->main->terminated) { > > > > + /* > > > > + * trigger connection event handler if the request was > > > > + * terminated > > > > + */ > > > > + > > > > + c->write->handler(c->write); > > > > + > > > > + } else { > > > > + r->write_event_handler(r); > > > > + ngx_http_run_posted_requests(c); > > > > + } > > > > } > > > > > > BTW, cache lock is not a real aio. It's just a regular event timer. > > > And it's deleted in ngx_http_file_cache_free() which is called from > > > ngx_http_upstream_finalize_request(). So it looks like the "terminated" > > > flag will never be 1 here. > > > > The upstream cleanup handler is installed after checking the > > cache (and waiting for the cache lock, if needed). And that's, > > basically, why the code uses r->blocked in the first place. > > > > While the code can be rewritten to not depend on the r->blocked > > flag and so checking r->main->terminated won't be needed as well, > > this is not currently the case. > > > > > > > > > -static void > > > > +static ngx_int_t > > > > ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c) > > > > { > > > > ngx_uint_t wait; > > > > @@ -495,7 +514,7 @@ ngx_http_file_cache_lock_wait(ngx_http_r > > > > ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, > > > > "cache lock timeout"); > > > > c->lock_timeout = 0; > > > > - goto wakeup; > > > > + return NGX_OK; > > > > } > > > > > > > > cache = c->file_cache; > > > > @@ -513,14 +532,10 @@ ngx_http_file_cache_lock_wait(ngx_http_r > > > > > > > > if (wait) { > > > > ngx_add_timer(&c->wait_event, (timer > 500) ? 500 : timer); > > > > - return; > > > > + return NGX_AGAIN; > > > > } > > > > > > > > -wakeup: > > > > - > > > > - c->waiting = 0; > > > > - r->main->blocked--; > > > > - r->write_event_handler(r); > > > > + return NGX_OK; > > > > } > > > > > > > > > > > > @@ -740,9 +755,18 @@ ngx_http_cache_aio_event_handler(ngx_eve > > > > r->main->blocked--; > > > > r->aio = 0; > > > > > > > > - r->write_event_handler(r); > > > > - > > > > - ngx_http_run_posted_requests(c); > > > > + if (r->main->terminated) { > > > > + /* > > > > + * trigger connection event handler if the request was > > > > + * terminated > > > > + */ > > > > + > > > > + c->write->handler(c->write); > > > > + > > > > + } else { > > > > + r->write_event_handler(r); > > > > + ngx_http_run_posted_requests(c); > > > > + } > > > > } > > > > > > > > #endif > > > > @@ -810,9 +834,18 @@ ngx_http_cache_thread_event_handler(ngx_ > > > > r->main->blocked--; > > > > r->aio = 0; > > > > > > > > - r->write_event_handler(r); > > > > - > > > > - ngx_http_run_posted_requests(c); > > > > + if (r->main->terminated) { > > > > + /* > > > > + * trigger connection event handler if the request was > > > > + * terminated > > > > + */ > > > > + > > > > + c->write->handler(c->write); > > > > + > > > > + } else { > > > > + r->write_event_handler(r); > > > > + ngx_http_run_posted_requests(c); > > > > + } > > > > } > > > > > > > > #endif > > > > diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c > > > > --- a/src/http/ngx_http_request.c > > > > +++ b/src/http/ngx_http_request.c > > > > @@ -2681,6 +2681,8 @@ ngx_http_terminate_request(ngx_http_requ > > > > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, > > > > "http terminate request count:%d", mr->count); > > > > > > > > + mr->terminated = 1; > > > > > > Another solution could be skipping the cleanup handlers below if mr->blocked > > > is set. This would fix the crash, but would not fix the delay though. > > > > Exactly. While skipping cleanup handlers will fix a particular > > crash in the upstream module, the same issue observed with static > > files clearly demonstrates that this is a wrong approach. > > > > Further, skipping cleanup handlers means that we will continue > > doing unneeded (and potentially dangerous, given that request > > termination can be due to a fatal error, such as a memory > > allocation error) work we can otherwise cancel early by calling > > the cleanup handler. > > > > > > > > > if (rc > 0 && (mr->headers_out.status == 0 || mr->connection->sent == 0)) { > > > > mr->headers_out.status = rc; > > > > } > > > > @@ -2703,8 +2705,13 @@ ngx_http_terminate_request(ngx_http_requ > > > > if (mr->write_event_handler) { > > > > > > > > if (mr->blocked) { > > > > + if (r != r->connection->data) { > > > > + r = r->connection->data; > > > > + } > > > > > > Why not simply r = r->connection->data. Or maybe a new variable > > > ar (active request) similar to mr (main request) would make sense. > > > > When writing this, I've decided that using conditional assignment > > better explains the idea that we need the active subrequest, and > > therefore switch to it if "r" is not active. Otherwise, just > > assignment is equivalent. > > > > Changed to just "r = r->connection->data;", as I have no strong > > preference here (and we already do "r = r->main;" in several > > places). > > > > The new variable looks like an overkill though. > > > > > > > > > + > > > > r->connection->error = 1; > > > > r->write_event_handler = ngx_http_request_finalizer; > > > > + > > > > return; > > > > } > > > > > > > > diff --git a/src/http/ngx_http_request.h b/src/http/ngx_http_request.h > > > > --- a/src/http/ngx_http_request.h > > > > +++ b/src/http/ngx_http_request.h > > > > @@ -550,6 +550,7 @@ struct ngx_http_request_s { > > > > unsigned root_tested:1; > > > > unsigned done:1; > > > > unsigned logged:1; > > > > + unsigned terminated:1; > > > > > > > > unsigned buffered:4; > > > > > > > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > > > > --- a/src/http/ngx_http_upstream.c > > > > +++ b/src/http/ngx_http_upstream.c > > > > @@ -3984,11 +3984,11 @@ ngx_http_upstream_thread_event_handler(n > > > > > > > > #endif > > > > > > > > - if (r->done) { > > > > + if (r->done || r->main->terminated) { > > > > /* > > > > * trigger connection event handler if the subrequest was > > > > - * already finalized; this can happen if the handler is used > > > > - * for sendfile() in threads > > > > + * already finalized (this can happen if the handler is used > > > > + * for sendfile() in threads), or if the request was terminated > > > > */ > > > > > > > > c->write->handler(c->write); > > > > > > The patch is generally ok. > > > > Just in case, here is the updated patch: > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1706513520 -10800 > > # Mon Jan 29 10:32:00 2024 +0300 > > # Node ID 35bfb011f69bb97cf853b379fbdcfd5052d0e3ed > > # Parent e88cdaa0f1ffc9af3144770c72ee5baf07b2562e > > Fixed request termination with AIO and subrequests (ticket #2555). > > > > When a request was terminated due to an error via ngx_http_terminate_request() > > while an AIO operation was running in a subrequest, various issues were > > observed. This happened because ngx_http_request_finalizer() was only set > > in the subrequest where ngx_http_terminate_request() was called, but not > > in the subrequest where the AIO operation was running. After completion > > of the AIO operation normal processing of the subrequest was resumed, leading > > to issues. > > > > In particular, in case of the upstream module, termination of the request > > called upstream cleanup, which closed the upstream connection. Attempts to > > further work with the upstream connection after AIO operation completion > > resulted in segfaults in ngx_ssl_recv(), "readv() failed (9: Bad file > > descriptor) while reading upstream" errors, or socket leaks. > > > > In ticket #2555, issues were observed with the following configuration > > with cache background update (with thread writing instrumented to > > introduce a delay, when a client closes the connection during an update): > > > > location = /background-and-aio-write { > > proxy_pass ... > > proxy_cache one; > > proxy_cache_valid 200 1s; > > proxy_cache_background_update on; > > proxy_cache_use_stale updating; > > aio threads; > > aio_write on; > > limit_rate 1000; > > } > > > > Similarly, the same issue can be seen with SSI, and can be caused by > > errors in subrequests, such as in the following configuration > > (where "/proxy" uses AIO, and "/sleep" returns 444 after some delay, > > causing request termination): > > > > location = /ssi-active-boom { > > ssi on; > > ssi_types *; > > return 200 ' > > > > > > '; > > limit_rate 1000; > > } > > > > Or the same with both AIO operation and the error in non-active subrequests > > (which needs slightly different handling, see below): > > > > location = /ssi-non-active-boom { > > ssi on; > > ssi_types *; > > return 200 ' > > > > > > > > '; > > limit_rate 1000; > > } > > > > Similarly, issues can be observed with just static files. However, > > with static files potential impact is limited due to timeout safeguards > > in ngx_http_writer(), and the fact that c->error is set during request > > termination. > > > > In a simple configuration with an AIO operation in the active subrequest, > > such as in the following configuration, the connection is closed right > > after completion of the AIO operation anyway, since ngx_http_writer() > > tries to write to the connection and fails due to c->error set: > > > > location = /ssi-active-static-boom { > > ssi on; > > ssi_types *; > > return 200 ' > > > > > > '; > > limit_rate 1000; > > } > > > > In the following configuration, with an AIO operation in a non-active > > subrequest, the connection is closed only after send_timeout expires: > > > > location = /ssi-non-active-static-boom { > > ssi on; > > ssi_types *; > > return 200 ' > > > > > > > > '; > > limit_rate 1000; > > } > > > > Fix is to introduce r->main->terminated flag, which is to be checked > > by AIO event handlers when the r->main->blocked counter is decremented. > > When the flag is set, handlers are expected to wake up the connection > > instead of the subrequest (which might be already cleaned up). > > > > Additionally, now ngx_http_request_finalizer() is always set in the > > active subrequest, so waking up the connection properly finalizes the > > request even if termination happened in a non-active subrequest. > > > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > > --- a/src/http/ngx_http_copy_filter_module.c > > +++ b/src/http/ngx_http_copy_filter_module.c > > @@ -208,9 +208,18 @@ ngx_http_copy_aio_event_handler(ngx_even > > r->main->blocked--; > > r->aio = 0; > > > > - r->write_event_handler(r); > > + if (r->main->terminated) { > > + /* > > + * trigger connection event handler if the request was > > + * terminated > > + */ > > > > - ngx_http_run_posted_requests(c); > > + c->write->handler(c->write); > > + > > + } else { > > + r->write_event_handler(r); > > + ngx_http_run_posted_requests(c); > > + } > > } > > > > #endif > > @@ -331,11 +340,11 @@ ngx_http_copy_thread_event_handler(ngx_e > > > > #endif > > > > - if (r->done) { > > + if (r->done || r->main->terminated) { > > /* > > * trigger connection event handler if the subrequest was > > - * already finalized; this can happen if the handler is used > > - * for sendfile() in threads > > + * already finalized (this can happen if the handler is used > > + * for sendfile() in threads), or if the request was terminated > > */ > > > > c->write->handler(c->write); > > diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c > > --- a/src/http/ngx_http_file_cache.c > > +++ b/src/http/ngx_http_file_cache.c > > @@ -14,7 +14,7 @@ > > static ngx_int_t ngx_http_file_cache_lock(ngx_http_request_t *r, > > ngx_http_cache_t *c); > > static void ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev); > > -static void ngx_http_file_cache_lock_wait(ngx_http_request_t *r, > > +static ngx_int_t ngx_http_file_cache_lock_wait(ngx_http_request_t *r, > > ngx_http_cache_t *c); > > static ngx_int_t ngx_http_file_cache_read(ngx_http_request_t *r, > > ngx_http_cache_t *c); > > @@ -463,6 +463,7 @@ ngx_http_file_cache_lock(ngx_http_reques > > static void > > ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev) > > { > > + ngx_int_t rc; > > ngx_connection_t *c; > > ngx_http_request_t *r; > > > > @@ -474,13 +475,31 @@ ngx_http_file_cache_lock_wait_handler(ng > > ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, > > "http file cache wait: \"%V?%V\"", &r->uri, &r->args); > > > > - ngx_http_file_cache_lock_wait(r, r->cache); > > - > > - ngx_http_run_posted_requests(c); > > + rc = ngx_http_file_cache_lock_wait(r, r->cache); > > + > > + if (rc == NGX_AGAIN) { > > + return; > > + } > > + > > + r->cache->waiting = 0; > > + r->main->blocked--; > > + > > + if (r->main->terminated) { > > + /* > > + * trigger connection event handler if the request was > > + * terminated > > + */ > > + > > + c->write->handler(c->write); > > + > > + } else { > > + r->write_event_handler(r); > > + ngx_http_run_posted_requests(c); > > + } > > } > > > > > > -static void > > +static ngx_int_t > > ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c) > > { > > ngx_uint_t wait; > > @@ -495,7 +514,7 @@ ngx_http_file_cache_lock_wait(ngx_http_r > > ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, > > "cache lock timeout"); > > c->lock_timeout = 0; > > - goto wakeup; > > + return NGX_OK; > > } > > > > cache = c->file_cache; > > @@ -513,14 +532,10 @@ ngx_http_file_cache_lock_wait(ngx_http_r > > > > if (wait) { > > ngx_add_timer(&c->wait_event, (timer > 500) ? 500 : timer); > > - return; > > + return NGX_AGAIN; > > } > > > > -wakeup: > > - > > - c->waiting = 0; > > - r->main->blocked--; > > - r->write_event_handler(r); > > + return NGX_OK; > > } > > > > > > @@ -753,9 +768,18 @@ ngx_http_cache_aio_event_handler(ngx_eve > > r->main->blocked--; > > r->aio = 0; > > > > - r->write_event_handler(r); > > - > > - ngx_http_run_posted_requests(c); > > + if (r->main->terminated) { > > + /* > > + * trigger connection event handler if the request was > > + * terminated > > + */ > > + > > + c->write->handler(c->write); > > + > > + } else { > > + r->write_event_handler(r); > > + ngx_http_run_posted_requests(c); > > + } > > } > > > > #endif > > @@ -836,9 +860,18 @@ ngx_http_cache_thread_event_handler(ngx_ > > r->main->blocked--; > > r->aio = 0; > > > > - r->write_event_handler(r); > > - > > - ngx_http_run_posted_requests(c); > > + if (r->main->terminated) { > > + /* > > + * trigger connection event handler if the request was > > + * terminated > > + */ > > + > > + c->write->handler(c->write); > > + > > + } else { > > + r->write_event_handler(r); > > + ngx_http_run_posted_requests(c); > > + } > > } > > > > #endif > > diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c > > --- a/src/http/ngx_http_request.c > > +++ b/src/http/ngx_http_request.c > > @@ -2694,6 +2694,8 @@ ngx_http_terminate_request(ngx_http_requ > > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, > > "http terminate request count:%d", mr->count); > > > > + mr->terminated = 1; > > + > > if (rc > 0 && (mr->headers_out.status == 0 || mr->connection->sent == 0)) { > > mr->headers_out.status = rc; > > } > > @@ -2716,8 +2718,11 @@ ngx_http_terminate_request(ngx_http_requ > > if (mr->write_event_handler) { > > > > if (mr->blocked) { > > + r = r->connection->data; > > + > > r->connection->error = 1; > > r->write_event_handler = ngx_http_request_finalizer; > > + > > return; > > } > > > > diff --git a/src/http/ngx_http_request.h b/src/http/ngx_http_request.h > > --- a/src/http/ngx_http_request.h > > +++ b/src/http/ngx_http_request.h > > @@ -550,6 +550,7 @@ struct ngx_http_request_s { > > unsigned root_tested:1; > > unsigned done:1; > > unsigned logged:1; > > + unsigned terminated:1; > > > > unsigned buffered:4; > > > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > > --- a/src/http/ngx_http_upstream.c > > +++ b/src/http/ngx_http_upstream.c > > @@ -3997,11 +3997,11 @@ ngx_http_upstream_thread_event_handler(n > > > > #endif > > > > - if (r->done) { > > + if (r->done || r->main->terminated) { > > /* > > * trigger connection event handler if the subrequest was > > - * already finalized; this can happen if the handler is used > > - * for sendfile() in threads > > + * already finalized (this can happen if the handler is used > > + * for sendfile() in threads), or if the request was terminated > > */ > > > > c->write->handler(c->write); > > > > -- > > Maxim Dounin > > http://mdounin.ru/ > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > > Looks ok Pushed to http://mdounin.ru/hg/nginx, thanks for looking. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Jan 30 03:07:53 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 30 Jan 2024 06:07:53 +0300 Subject: [PATCH 2 of 4] Upstream: fixed usage of closed sockets with filter finalization In-Reply-To: <3AC7E113-5762-4A69-B854-492ED0DE3002@nginx.com> References: <3AC7E113-5762-4A69-B854-492ED0DE3002@nginx.com> Message-ID: Hello! On Mon, Jan 29, 2024 at 05:21:43PM +0400, Sergey Kandaurov wrote: > > > On 29 Jan 2024, at 10:43, Maxim Dounin wrote: > > > > Hello! > > > > On Fri, Jan 26, 2024 at 04:26:00PM +0400, Sergey Kandaurov wrote: > > > >>> On 27 Nov 2023, at 06:50, Maxim Dounin wrote: > >>> > >>> # HG changeset patch > >>> # User Maxim Dounin > >>> # Date 1701049758 -10800 > >>> # Mon Nov 27 04:49:18 2023 +0300 > >>> # Node ID faf0b9defc76b8683af466f8a950c2c241382970 > >>> # Parent a5e39e9d1f4c84dcbe6a2f9e079372a3d63aef0b > >>> Upstream: fixed usage of closed sockets with filter finalization. > >>> > >>> When filter finalization is triggered when working with an upstream server, > >>> and error_page redirects request processing to some simple handler, > >>> ngx_http_request_finalize() triggers request termination when the response > >>> is sent. In particular, via the upstream cleanup handler, nginx will close > >>> the upstream connection and the corresponding socket. > >>> > >>> Still, this can happen to be with ngx_event_pipe() on stack. While > >>> the code will set p->downstream_error due to NGX_ERROR returned from the > >>> output filter chain by filter finalization, otherwise the error will be > >>> ignored till control returns to ngx_http_upstream_process_request(). > >>> And event pipe might try reading from the (already closed) socket, resulting > >>> in "readv() failed (9: Bad file descriptor) while reading upstream" errors > >>> (or even segfaults with SSL). > >>> > >>> Such errors were seen with the following configuration: > >>> > >>> location /t2 { > >>> proxy_pass http://127.0.0.1:8080/big; > >>> > >>> image_filter_buffer 10m; > >>> image_filter resize 150 100; > >>> error_page 415 = /empty; > >>> } > >>> > >>> location /empty { > >>> return 204; > >>> } > >>> > >>> location /big { > >>> # big enough static file > >>> } > >>> > >>> Fix is to set p->upstream_error in ngx_http_upstream_finalize_request(), > >>> so the existing checks in ngx_event_pipe_read_upstream() will prevent > >>> further reading from the closed upstream connection. > >>> > >>> Similarly, p->upstream_error is now checked when handling events at > >>> ngx_event_pipe() exit, as checking p->upstream->fd is not enough if > >>> keepalive upstream connections are being used and the connection was > >>> saved to cache during request termination. > >>> > >> > >> Setting p->upstream_error in ngx_http_upstream_finalize_request() > >> may look suspicious, because it is used to be set on connection errors > >> such as upstream timeout or recv error, or, as a recently introduced > >> exception in the fastcgi module, - also when the FastCGI record ends > >> prematurely, before receiving all the expected content. > >> But technically I think this is quite correct, because we no longer > >> want to receive further data, and also (and you mention this in the > >> commit log) this repeats closing an upstream connection socket in > >> the same place in ngx_http_upstream_finalize_request(). > >> So I think it should be fine. > > > > The biggest concern I personally see here is with the added > > p->upstream_error check at ngx_event_pipe() exit. If there is a > > real upstream error, such as when the connection is reset by the > > upstream server, and if we want the pipe to be active for some > > time (for example, if we want it to continue writing to the > > downstream connection), there will be no ngx_handle_read_event() > > call. For level-triggered event methods this means that the read > > event for the upstream connection will be generated again and > > again. > > > > This shouldn't be the problem for existing ngx_event_pipe() uses > > though, as p->upstream_error is anyway triggers > > ngx_http_upstream_finalize_request(). > > > > Still, we can consider introducing a separate flag, such as > > p->upstream_closed, or clearing p->upstream, and checking these in > > ngx_event_pipe() instead. This probably would be a more clear > > solution. > > > > Updated patch below: > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1706510064 -10800 > > # Mon Jan 29 09:34:24 2024 +0300 > > # Node ID 4a91a03dcd8df0652884ed6ebe9f7437ce82fd26 > > # Parent 7b630f6487068f7cc9dd83762fb4ea39f2f340e9 > > Upstream: fixed usage of closed sockets with filter finalization. > > > > When filter finalization is triggered when working with an upstream server, > > and error_page redirects request processing to some simple handler, > > ngx_http_request_finalize() triggers request termination when the response > > is sent. In particular, via the upstream cleanup handler, nginx will close > > the upstream connection and the corresponding socket. > > > > Still, this can happen to be with ngx_event_pipe() on stack. While > > the code will set p->downstream_error due to NGX_ERROR returned from the > > output filter chain by filter finalization, otherwise the error will be > > ignored till control returns to ngx_http_upstream_process_request(). > > And event pipe might try reading from the (already closed) socket, resulting > > in "readv() failed (9: Bad file descriptor) while reading upstream" errors > > (or even segfaults with SSL). > > > > Such errors were seen with the following configuration: > > > > location /t2 { > > proxy_pass http://127.0.0.1:8080/big; > > > > image_filter_buffer 10m; > > image_filter resize 150 100; > > error_page 415 = /empty; > > } > > > > location /empty { > > return 204; > > } > > > > location /big { > > # big enough static file > > } > > > > Fix is to clear p->upstream in ngx_http_upstream_finalize_request(), > > and ensure that p->upstream is checked in ngx_event_pipe_read_upstream() > > and when handling events at ngx_event_pipe() exit. > > > > diff --git a/src/event/ngx_event_pipe.c b/src/event/ngx_event_pipe.c > > --- a/src/event/ngx_event_pipe.c > > +++ b/src/event/ngx_event_pipe.c > > @@ -57,7 +57,9 @@ ngx_event_pipe(ngx_event_pipe_t *p, ngx_ > > do_write = 1; > > } > > > > - if (p->upstream->fd != (ngx_socket_t) -1) { > > + if (p->upstream > > + && p->upstream->fd != (ngx_socket_t) -1) > > + { > > rev = p->upstream->read; > > > > flags = (rev->eof || rev->error) ? NGX_CLOSE_EVENT : 0; > > @@ -108,7 +110,9 @@ ngx_event_pipe_read_upstream(ngx_event_p > > ngx_msec_t delay; > > ngx_chain_t *chain, *cl, *ln; > > > > - if (p->upstream_eof || p->upstream_error || p->upstream_done) { > > + if (p->upstream_eof || p->upstream_error || p->upstream_done > > + || p->upstream == NULL) > > + { > > return NGX_OK; > > } > > > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > > --- a/src/http/ngx_http_upstream.c > > +++ b/src/http/ngx_http_upstream.c > > @@ -4561,6 +4561,10 @@ ngx_http_upstream_finalize_request(ngx_h > > > > u->peer.connection = NULL; > > > > + if (u->pipe) { > > + u->pipe->upstream = NULL; > > + } > > + > > if (u->pipe && u->pipe->temp_file) { > > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, > > "http upstream temp fd: %d", > > > > Indeed, this fix looks more isolated, I like it. Pushed to http://mdounin.ru/hg/nginx, thanks for the review. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Jan 30 03:28:32 2024 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 30 Jan 2024 06:28:32 +0300 Subject: [PATCH] SSL: fixed $ssl_curves allocation error handling In-Reply-To: <2f70dd17c16461f833ea.1706265389@enoparse.local> References: <2f70dd17c16461f833ea.1706265389@enoparse.local> Message-ID: Hello! On Fri, Jan 26, 2024 at 02:36:29PM +0400, Sergey Kandaurov wrote: > # HG changeset patch > # User Sergey Kandaurov > # Date 1706265240 -14400 > # Fri Jan 26 14:34:00 2024 +0400 > # Node ID 2f70dd17c16461f833eafec2dcf9193557bfb176 > # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 > SSL: fixed $ssl_curves allocation error handling. > > diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c > --- a/src/event/ngx_event_openssl.c > +++ b/src/event/ngx_event_openssl.c > @@ -5187,6 +5187,9 @@ ngx_ssl_get_curves(ngx_connection_t *c, > } > > curves = ngx_palloc(pool, n * sizeof(int)); > + if (curves == NULL) { > + return NGX_ERROR; > + } > > n = SSL_get1_curves(c->ssl->connection, curves); > len = 0; Looks good. -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Tue Jan 30 15:08:17 2024 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Tue, 30 Jan 2024 15:08:17 +0000 Subject: [nginx] Silenced complaints about socket leaks on forced termination. Message-ID: details: https://hg.nginx.org/nginx/rev/791ead216b03 branches: changeset: 9201:791ead216b03 user: Maxim Dounin date: Mon Jan 29 10:29:39 2024 +0300 description: Silenced complaints about socket leaks on forced termination. When graceful shutdown was requested, and then nginx was forced to do fast shutdown, it used to (incorrectly) complain about open sockets left in connections which weren't yet closed when fast shutdown was requested. Fix is to avoid complaining about open sockets when fast shutdown was requested after graceful one. Abnormal termination, if requested with the WINCH signal, can still happen though. diffstat: src/os/unix/ngx_process_cycle.c | 10 +++++----- src/os/win32/ngx_process_cycle.c | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diffs (40 lines): diff -r ee40e2b1d083 -r 791ead216b03 src/os/unix/ngx_process_cycle.c --- a/src/os/unix/ngx_process_cycle.c Mon Dec 25 21:15:48 2023 +0400 +++ b/src/os/unix/ngx_process_cycle.c Mon Jan 29 10:29:39 2024 +0300 @@ -948,7 +948,7 @@ ngx_worker_process_exit(ngx_cycle_t *cyc } } - if (ngx_exiting) { + if (ngx_exiting && !ngx_terminate) { c = cycle->connections; for (i = 0; i < cycle->connection_n; i++) { if (c[i].fd != -1 @@ -963,11 +963,11 @@ ngx_worker_process_exit(ngx_cycle_t *cyc ngx_debug_quit = 1; } } + } - if (ngx_debug_quit) { - ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "aborting"); - ngx_debug_point(); - } + if (ngx_debug_quit) { + ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "aborting"); + ngx_debug_point(); } /* diff -r ee40e2b1d083 -r 791ead216b03 src/os/win32/ngx_process_cycle.c --- a/src/os/win32/ngx_process_cycle.c Mon Dec 25 21:15:48 2023 +0400 +++ b/src/os/win32/ngx_process_cycle.c Mon Jan 29 10:29:39 2024 +0300 @@ -834,7 +834,7 @@ ngx_worker_process_exit(ngx_cycle_t *cyc } } - if (ngx_exiting) { + if (ngx_exiting && !ngx_terminate) { c = cycle->connections; for (i = 0; i < cycle->connection_n; i++) { if (c[i].fd != (ngx_socket_t) -1 From pluknet at nginx.com Tue Jan 30 15:08:23 2024 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Tue, 30 Jan 2024 15:08:23 +0000 Subject: [nginx] Fixed request termination with AIO and subrequests (ticket #2555). Message-ID: details: https://hg.nginx.org/nginx/rev/0de20f43db25 branches: changeset: 9203:0de20f43db25 user: Maxim Dounin date: Tue Jan 30 03:20:05 2024 +0300 description: Fixed request termination with AIO and subrequests (ticket #2555). When a request was terminated due to an error via ngx_http_terminate_request() while an AIO operation was running in a subrequest, various issues were observed. This happened because ngx_http_request_finalizer() was only set in the subrequest where ngx_http_terminate_request() was called, but not in the subrequest where the AIO operation was running. After completion of the AIO operation normal processing of the subrequest was resumed, leading to issues. In particular, in case of the upstream module, termination of the request called upstream cleanup, which closed the upstream connection. Attempts to further work with the upstream connection after AIO operation completion resulted in segfaults in ngx_ssl_recv(), "readv() failed (9: Bad file descriptor) while reading upstream" errors, or socket leaks. In ticket #2555, issues were observed with the following configuration with cache background update (with thread writing instrumented to introduce a delay, when a client closes the connection during an update): location = /background-and-aio-write { proxy_pass ... proxy_cache one; proxy_cache_valid 200 1s; proxy_cache_background_update on; proxy_cache_use_stale updating; aio threads; aio_write on; limit_rate 1000; } Similarly, the same issue can be seen with SSI, and can be caused by errors in subrequests, such as in the following configuration (where "/proxy" uses AIO, and "/sleep" returns 444 after some delay, causing request termination): location = /ssi-active-boom { ssi on; ssi_types *; return 200 ' '; limit_rate 1000; } Or the same with both AIO operation and the error in non-active subrequests (which needs slightly different handling, see below): location = /ssi-non-active-boom { ssi on; ssi_types *; return 200 ' '; limit_rate 1000; } Similarly, issues can be observed with just static files. However, with static files potential impact is limited due to timeout safeguards in ngx_http_writer(), and the fact that c->error is set during request termination. In a simple configuration with an AIO operation in the active subrequest, such as in the following configuration, the connection is closed right after completion of the AIO operation anyway, since ngx_http_writer() tries to write to the connection and fails due to c->error set: location = /ssi-active-static-boom { ssi on; ssi_types *; return 200 ' '; limit_rate 1000; } In the following configuration, with an AIO operation in a non-active subrequest, the connection is closed only after send_timeout expires: location = /ssi-non-active-static-boom { ssi on; ssi_types *; return 200 ' '; limit_rate 1000; } Fix is to introduce r->main->terminated flag, which is to be checked by AIO event handlers when the r->main->blocked counter is decremented. When the flag is set, handlers are expected to wake up the connection instead of the subrequest (which might be already cleaned up). Additionally, now ngx_http_request_finalizer() is always set in the active subrequest, so waking up the connection properly finalizes the request even if termination happened in a non-active subrequest. diffstat: src/http/ngx_http_copy_filter_module.c | 19 ++++++-- src/http/ngx_http_file_cache.c | 69 +++++++++++++++++++++++++-------- src/http/ngx_http_request.c | 5 ++ src/http/ngx_http_request.h | 1 + src/http/ngx_http_upstream.c | 6 +- 5 files changed, 74 insertions(+), 26 deletions(-) diffs (218 lines): diff -r e88cdaa0f1ff -r 0de20f43db25 src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c Mon Jan 29 10:31:37 2024 +0300 +++ b/src/http/ngx_http_copy_filter_module.c Tue Jan 30 03:20:05 2024 +0300 @@ -208,9 +208,18 @@ ngx_http_copy_aio_event_handler(ngx_even r->main->blocked--; r->aio = 0; - r->write_event_handler(r); + if (r->main->terminated) { + /* + * trigger connection event handler if the request was + * terminated + */ - ngx_http_run_posted_requests(c); + c->write->handler(c->write); + + } else { + r->write_event_handler(r); + ngx_http_run_posted_requests(c); + } } #endif @@ -331,11 +340,11 @@ ngx_http_copy_thread_event_handler(ngx_e #endif - if (r->done) { + if (r->done || r->main->terminated) { /* * trigger connection event handler if the subrequest was - * already finalized; this can happen if the handler is used - * for sendfile() in threads + * already finalized (this can happen if the handler is used + * for sendfile() in threads), or if the request was terminated */ c->write->handler(c->write); diff -r e88cdaa0f1ff -r 0de20f43db25 src/http/ngx_http_file_cache.c --- a/src/http/ngx_http_file_cache.c Mon Jan 29 10:31:37 2024 +0300 +++ b/src/http/ngx_http_file_cache.c Tue Jan 30 03:20:05 2024 +0300 @@ -14,7 +14,7 @@ static ngx_int_t ngx_http_file_cache_lock(ngx_http_request_t *r, ngx_http_cache_t *c); static void ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev); -static void ngx_http_file_cache_lock_wait(ngx_http_request_t *r, +static ngx_int_t ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c); static ngx_int_t ngx_http_file_cache_read(ngx_http_request_t *r, ngx_http_cache_t *c); @@ -463,6 +463,7 @@ ngx_http_file_cache_lock(ngx_http_reques static void ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev) { + ngx_int_t rc; ngx_connection_t *c; ngx_http_request_t *r; @@ -474,13 +475,31 @@ ngx_http_file_cache_lock_wait_handler(ng ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "http file cache wait: \"%V?%V\"", &r->uri, &r->args); - ngx_http_file_cache_lock_wait(r, r->cache); - - ngx_http_run_posted_requests(c); + rc = ngx_http_file_cache_lock_wait(r, r->cache); + + if (rc == NGX_AGAIN) { + return; + } + + r->cache->waiting = 0; + r->main->blocked--; + + if (r->main->terminated) { + /* + * trigger connection event handler if the request was + * terminated + */ + + c->write->handler(c->write); + + } else { + r->write_event_handler(r); + ngx_http_run_posted_requests(c); + } } -static void +static ngx_int_t ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c) { ngx_uint_t wait; @@ -495,7 +514,7 @@ ngx_http_file_cache_lock_wait(ngx_http_r ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, "cache lock timeout"); c->lock_timeout = 0; - goto wakeup; + return NGX_OK; } cache = c->file_cache; @@ -513,14 +532,10 @@ ngx_http_file_cache_lock_wait(ngx_http_r if (wait) { ngx_add_timer(&c->wait_event, (timer > 500) ? 500 : timer); - return; + return NGX_AGAIN; } -wakeup: - - c->waiting = 0; - r->main->blocked--; - r->write_event_handler(r); + return NGX_OK; } @@ -753,9 +768,18 @@ ngx_http_cache_aio_event_handler(ngx_eve r->main->blocked--; r->aio = 0; - r->write_event_handler(r); - - ngx_http_run_posted_requests(c); + if (r->main->terminated) { + /* + * trigger connection event handler if the request was + * terminated + */ + + c->write->handler(c->write); + + } else { + r->write_event_handler(r); + ngx_http_run_posted_requests(c); + } } #endif @@ -836,9 +860,18 @@ ngx_http_cache_thread_event_handler(ngx_ r->main->blocked--; r->aio = 0; - r->write_event_handler(r); - - ngx_http_run_posted_requests(c); + if (r->main->terminated) { + /* + * trigger connection event handler if the request was + * terminated + */ + + c->write->handler(c->write); + + } else { + r->write_event_handler(r); + ngx_http_run_posted_requests(c); + } } #endif diff -r e88cdaa0f1ff -r 0de20f43db25 src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c Mon Jan 29 10:31:37 2024 +0300 +++ b/src/http/ngx_http_request.c Tue Jan 30 03:20:05 2024 +0300 @@ -2694,6 +2694,8 @@ ngx_http_terminate_request(ngx_http_requ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http terminate request count:%d", mr->count); + mr->terminated = 1; + if (rc > 0 && (mr->headers_out.status == 0 || mr->connection->sent == 0)) { mr->headers_out.status = rc; } @@ -2716,8 +2718,11 @@ ngx_http_terminate_request(ngx_http_requ if (mr->write_event_handler) { if (mr->blocked) { + r = r->connection->data; + r->connection->error = 1; r->write_event_handler = ngx_http_request_finalizer; + return; } diff -r e88cdaa0f1ff -r 0de20f43db25 src/http/ngx_http_request.h --- a/src/http/ngx_http_request.h Mon Jan 29 10:31:37 2024 +0300 +++ b/src/http/ngx_http_request.h Tue Jan 30 03:20:05 2024 +0300 @@ -550,6 +550,7 @@ struct ngx_http_request_s { unsigned root_tested:1; unsigned done:1; unsigned logged:1; + unsigned terminated:1; unsigned buffered:4; diff -r e88cdaa0f1ff -r 0de20f43db25 src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Mon Jan 29 10:31:37 2024 +0300 +++ b/src/http/ngx_http_upstream.c Tue Jan 30 03:20:05 2024 +0300 @@ -3997,11 +3997,11 @@ ngx_http_upstream_thread_event_handler(n #endif - if (r->done) { + if (r->done || r->main->terminated) { /* * trigger connection event handler if the subrequest was - * already finalized; this can happen if the handler is used - * for sendfile() in threads + * already finalized (this can happen if the handler is used + * for sendfile() in threads), or if the request was terminated */ c->write->handler(c->write); From pluknet at nginx.com Tue Jan 30 15:08:26 2024 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Tue, 30 Jan 2024 15:08:26 +0000 Subject: [nginx] Upstream: fixed usage of closed sockets with filter finalization. Message-ID: details: https://hg.nginx.org/nginx/rev/631ee3c6d38c branches: changeset: 9204:631ee3c6d38c user: Maxim Dounin date: Tue Jan 30 03:20:10 2024 +0300 description: Upstream: fixed usage of closed sockets with filter finalization. When filter finalization is triggered when working with an upstream server, and error_page redirects request processing to some simple handler, ngx_http_request_finalize() triggers request termination when the response is sent. In particular, via the upstream cleanup handler, nginx will close the upstream connection and the corresponding socket. Still, this can happen to be with ngx_event_pipe() on stack. While the code will set p->downstream_error due to NGX_ERROR returned from the output filter chain by filter finalization, otherwise the error will be ignored till control returns to ngx_http_upstream_process_request(). And event pipe might try reading from the (already closed) socket, resulting in "readv() failed (9: Bad file descriptor) while reading upstream" errors (or even segfaults with SSL). Such errors were seen with the following configuration: location /t2 { proxy_pass http://127.0.0.1:8080/big; image_filter_buffer 10m; image_filter resize 150 100; error_page 415 = /empty; } location /empty { return 204; } location /big { # big enough static file } Fix is to clear p->upstream in ngx_http_upstream_finalize_request(), and ensure that p->upstream is checked in ngx_event_pipe_read_upstream() and when handling events at ngx_event_pipe() exit. diffstat: src/event/ngx_event_pipe.c | 8 ++++++-- src/http/ngx_http_upstream.c | 4 ++++ 2 files changed, 10 insertions(+), 2 deletions(-) diffs (39 lines): diff -r 0de20f43db25 -r 631ee3c6d38c src/event/ngx_event_pipe.c --- a/src/event/ngx_event_pipe.c Tue Jan 30 03:20:05 2024 +0300 +++ b/src/event/ngx_event_pipe.c Tue Jan 30 03:20:10 2024 +0300 @@ -57,7 +57,9 @@ ngx_event_pipe(ngx_event_pipe_t *p, ngx_ do_write = 1; } - if (p->upstream->fd != (ngx_socket_t) -1) { + if (p->upstream + && p->upstream->fd != (ngx_socket_t) -1) + { rev = p->upstream->read; flags = (rev->eof || rev->error) ? NGX_CLOSE_EVENT : 0; @@ -108,7 +110,9 @@ ngx_event_pipe_read_upstream(ngx_event_p ngx_msec_t delay; ngx_chain_t *chain, *cl, *ln; - if (p->upstream_eof || p->upstream_error || p->upstream_done) { + if (p->upstream_eof || p->upstream_error || p->upstream_done + || p->upstream == NULL) + { return NGX_OK; } diff -r 0de20f43db25 -r 631ee3c6d38c src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Tue Jan 30 03:20:05 2024 +0300 +++ b/src/http/ngx_http_upstream.c Tue Jan 30 03:20:10 2024 +0300 @@ -4574,6 +4574,10 @@ ngx_http_upstream_finalize_request(ngx_h u->peer.connection = NULL; + if (u->pipe) { + u->pipe->upstream = NULL; + } + if (u->pipe && u->pipe->temp_file) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http upstream temp fd: %d", From pluknet at nginx.com Tue Jan 30 15:08:20 2024 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Tue, 30 Jan 2024 15:08:20 +0000 Subject: [nginx] AIO operations now add timers (ticket #2162). Message-ID: details: https://hg.nginx.org/nginx/rev/e88cdaa0f1ff branches: changeset: 9202:e88cdaa0f1ff user: Maxim Dounin date: Mon Jan 29 10:31:37 2024 +0300 description: AIO operations now add timers (ticket #2162). Each AIO (thread IO) operation being run is now accompanied with 1-minute timer. This timer prevents unexpected shutdown of the worker process while an AIO operation is running, and logs an alert if the operation is running for too long. This fixes "open socket left" alerts during worker processes shutdown due to pending AIO (or thread IO) operations while corresponding requests have no timers. In particular, such errors were observed while reading cache headers (ticket #2162), and with worker_shutdown_timeout. diffstat: src/http/ngx_http_copy_filter_module.c | 26 ++++++++++++++++++++++++++ src/http/ngx_http_file_cache.c | 26 ++++++++++++++++++++++++++ src/http/ngx_http_upstream.c | 13 +++++++++++++ src/os/unix/ngx_files.c | 4 ++++ src/os/unix/ngx_linux_sendfile_chain.c | 1 + 5 files changed, 70 insertions(+), 0 deletions(-) diffs (176 lines): diff -r 791ead216b03 -r e88cdaa0f1ff src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c Mon Jan 29 10:29:39 2024 +0300 +++ b/src/http/ngx_http_copy_filter_module.c Mon Jan 29 10:31:37 2024 +0300 @@ -170,6 +170,8 @@ ngx_http_copy_aio_handler(ngx_output_cha file->aio->data = r; file->aio->handler = ngx_http_copy_aio_event_handler; + ngx_add_timer(&file->aio->event, 60000); + r->main->blocked++; r->aio = 1; ctx->aio = 1; @@ -192,6 +194,17 @@ ngx_http_copy_aio_event_handler(ngx_even ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "http aio: \"%V?%V\"", &r->uri, &r->args); + if (ev->timedout) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "aio operation took too long"); + ev->timedout = 0; + return; + } + + if (ev->timer_set) { + ngx_del_timer(ev); + } + r->main->blocked--; r->aio = 0; @@ -264,6 +277,8 @@ ngx_http_copy_thread_handler(ngx_thread_ return NGX_ERROR; } + ngx_add_timer(&task->event, 60000); + r->main->blocked++; r->aio = 1; @@ -288,6 +303,17 @@ ngx_http_copy_thread_event_handler(ngx_e ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "http thread: \"%V?%V\"", &r->uri, &r->args); + if (ev->timedout) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "thread operation took too long"); + ev->timedout = 0; + return; + } + + if (ev->timer_set) { + ngx_del_timer(ev); + } + r->main->blocked--; r->aio = 0; diff -r 791ead216b03 -r e88cdaa0f1ff src/http/ngx_http_file_cache.c --- a/src/http/ngx_http_file_cache.c Mon Jan 29 10:29:39 2024 +0300 +++ b/src/http/ngx_http_file_cache.c Mon Jan 29 10:31:37 2024 +0300 @@ -690,6 +690,8 @@ ngx_http_file_cache_aio_read(ngx_http_re c->file.aio->data = r; c->file.aio->handler = ngx_http_cache_aio_event_handler; + ngx_add_timer(&c->file.aio->event, 60000); + r->main->blocked++; r->aio = 1; @@ -737,6 +739,17 @@ ngx_http_cache_aio_event_handler(ngx_eve ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "http file cache aio: \"%V?%V\"", &r->uri, &r->args); + if (ev->timedout) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "aio operation took too long"); + ev->timedout = 0; + return; + } + + if (ev->timer_set) { + ngx_del_timer(ev); + } + r->main->blocked--; r->aio = 0; @@ -786,6 +799,8 @@ ngx_http_cache_thread_handler(ngx_thread return NGX_ERROR; } + ngx_add_timer(&task->event, 60000); + r->main->blocked++; r->aio = 1; @@ -807,6 +822,17 @@ ngx_http_cache_thread_event_handler(ngx_ ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "http file cache thread: \"%V?%V\"", &r->uri, &r->args); + if (ev->timedout) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "thread operation took too long"); + ev->timedout = 0; + return; + } + + if (ev->timer_set) { + ngx_del_timer(ev); + } + r->main->blocked--; r->aio = 0; diff -r 791ead216b03 -r e88cdaa0f1ff src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Mon Jan 29 10:29:39 2024 +0300 +++ b/src/http/ngx_http_upstream.c Mon Jan 29 10:31:37 2024 +0300 @@ -3949,6 +3949,8 @@ ngx_http_upstream_thread_handler(ngx_thr r->aio = 1; p->aio = 1; + ngx_add_timer(&task->event, 60000); + return NGX_OK; } @@ -3967,6 +3969,17 @@ ngx_http_upstream_thread_event_handler(n ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "http upstream thread: \"%V?%V\"", &r->uri, &r->args); + if (ev->timedout) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "thread operation took too long"); + ev->timedout = 0; + return; + } + + if (ev->timer_set) { + ngx_del_timer(ev); + } + r->main->blocked--; r->aio = 0; diff -r 791ead216b03 -r e88cdaa0f1ff src/os/unix/ngx_files.c --- a/src/os/unix/ngx_files.c Mon Jan 29 10:29:39 2024 +0300 +++ b/src/os/unix/ngx_files.c Mon Jan 29 10:31:37 2024 +0300 @@ -110,6 +110,8 @@ ngx_thread_read(ngx_file_t *file, u_char return NGX_ERROR; } + task->event.log = file->log; + file->thread_task = task; } @@ -493,6 +495,8 @@ ngx_thread_write_chain_to_file(ngx_file_ return NGX_ERROR; } + task->event.log = file->log; + file->thread_task = task; } diff -r 791ead216b03 -r e88cdaa0f1ff src/os/unix/ngx_linux_sendfile_chain.c --- a/src/os/unix/ngx_linux_sendfile_chain.c Mon Jan 29 10:29:39 2024 +0300 +++ b/src/os/unix/ngx_linux_sendfile_chain.c Mon Jan 29 10:31:37 2024 +0300 @@ -332,6 +332,7 @@ ngx_linux_sendfile_thread(ngx_connection return NGX_ERROR; } + task->event.log = c->log; task->handler = ngx_linux_sendfile_thread_handler; c->sendfile_task = task; From pluknet at nginx.com Tue Jan 30 15:18:25 2024 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Tue, 30 Jan 2024 15:18:25 +0000 Subject: [nginx] Year 2024. Message-ID: details: https://hg.nginx.org/nginx/rev/4a3aa287704f branches: changeset: 9205:4a3aa287704f user: Sergey Kandaurov date: Tue Jan 30 19:14:16 2024 +0400 description: Year 2024. diffstat: docs/text/LICENSE | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (11 lines): diff -r 631ee3c6d38c -r 4a3aa287704f docs/text/LICENSE --- a/docs/text/LICENSE Tue Jan 30 03:20:10 2024 +0300 +++ b/docs/text/LICENSE Tue Jan 30 19:14:16 2024 +0400 @@ -1,6 +1,6 @@ /* * Copyright (C) 2002-2021 Igor Sysoev - * Copyright (C) 2011-2023 Nginx, Inc. + * Copyright (C) 2011-2024 Nginx, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without From pluknet at nginx.com Tue Jan 30 15:18:55 2024 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Tue, 30 Jan 2024 15:18:55 +0000 Subject: [nginx] SSL: fixed $ssl_curves allocation error handling. Message-ID: details: https://hg.nginx.org/nginx/rev/43fc897bbab8 branches: changeset: 9206:43fc897bbab8 user: Sergey Kandaurov date: Tue Jan 30 19:18:31 2024 +0400 description: SSL: fixed $ssl_curves allocation error handling. diffstat: src/event/ngx_event_openssl.c | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diffs (13 lines): diff -r 4a3aa287704f -r 43fc897bbab8 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Jan 30 19:14:16 2024 +0400 +++ b/src/event/ngx_event_openssl.c Tue Jan 30 19:18:31 2024 +0400 @@ -5187,6 +5187,9 @@ ngx_ssl_get_curves(ngx_connection_t *c, } curves = ngx_palloc(pool, n * sizeof(int)); + if (curves == NULL) { + return NGX_ERROR; + } n = SSL_get1_curves(c->ssl->connection, curves); len = 0; From pluknet at nginx.com Tue Jan 30 15:22:15 2024 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Tue, 30 Jan 2024 15:22:15 +0000 Subject: [nginx] HTTP/3: added more compatibility checks for "listen ... quic". Message-ID: details: https://hg.nginx.org/nginx/rev/73eb75bee30f branches: changeset: 9207:73eb75bee30f user: Sergey Kandaurov date: Tue Jan 30 19:19:26 2024 +0400 description: HTTP/3: added more compatibility checks for "listen ... quic". Now "fastopen", "backlog", "accept_filter", "deferred", and "so_keepalive" parameters are not allowed with "quic" in the "listen" directive. Reported by Izorkin. diffstat: src/http/ngx_http_core_module.c | 36 +++++++++++++++++++++++++++++++----- 1 files changed, 31 insertions(+), 5 deletions(-) diffs (80 lines): diff -r 43fc897bbab8 -r 73eb75bee30f src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c Tue Jan 30 19:18:31 2024 +0400 +++ b/src/http/ngx_http_core_module.c Tue Jan 30 19:19:26 2024 +0400 @@ -3961,7 +3961,7 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx ngx_str_t *value, size; ngx_url_t u; - ngx_uint_t n, i; + ngx_uint_t n, i, backlog; ngx_http_listen_opt_t lsopt; cscf->listen = 1; @@ -4000,6 +4000,8 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx lsopt.ipv6only = 1; #endif + backlog = 0; + for (n = 2; n < cf->args->nelts; n++) { if (ngx_strcmp(value[n].data, "default_server") == 0 @@ -4058,6 +4060,8 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx return NGX_CONF_ERROR; } + backlog = 1; + continue; } @@ -4305,9 +4309,29 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx return NGX_CONF_ERROR; } -#if (NGX_HTTP_V3) - if (lsopt.quic) { +#if (NGX_HAVE_TCP_FASTOPEN) + if (lsopt.fastopen != -1) { + return "\"fastopen\" parameter is incompatible with \"quic\""; + } +#endif + + if (backlog) { + return "\"backlog\" parameter is incompatible with \"quic\""; + } + +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) + if (lsopt.accept_filter) { + return "\"accept_filter\" parameter is incompatible with \"quic\""; + } +#endif + +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) + if (lsopt.deferred_accept) { + return "\"deferred\" parameter is incompatible with \"quic\""; + } +#endif + #if (NGX_HTTP_SSL) if (lsopt.ssl) { return "\"ssl\" parameter is incompatible with \"quic\""; @@ -4320,13 +4344,15 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx } #endif + if (lsopt.so_keepalive) { + return "\"so_keepalive\" parameter is incompatible with \"quic\""; + } + if (lsopt.proxy_protocol) { return "\"proxy_protocol\" parameter is incompatible with \"quic\""; } } -#endif - for (n = 0; n < u.naddrs; n++) { for (i = 0; i < n; i++) {